GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / input / rmi4 / rmi_spi.c
1 /*
2  * Copyright (c) 2011-2016 Synaptics Incorporated
3  * Copyright (c) 2011 Unixphere
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/rmi.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
15 #include <linux/of.h>
16 #include "rmi_driver.h"
17
18 #define RMI_SPI_DEFAULT_XFER_BUF_SIZE   64
19
20 #define RMI_PAGE_SELECT_REGISTER        0x00FF
21 #define RMI_SPI_PAGE(addr)              (((addr) >> 8) & 0x80)
22 #define RMI_SPI_XFER_SIZE_LIMIT         255
23
24 #define BUFFER_SIZE_INCREMENT 32
25
26 enum rmi_spi_op {
27         RMI_SPI_WRITE = 0,
28         RMI_SPI_READ,
29         RMI_SPI_V2_READ_UNIFIED,
30         RMI_SPI_V2_READ_SPLIT,
31         RMI_SPI_V2_WRITE,
32 };
33
34 struct rmi_spi_cmd {
35         enum rmi_spi_op op;
36         u16 addr;
37 };
38
39 struct rmi_spi_xport {
40         struct rmi_transport_dev xport;
41         struct spi_device *spi;
42
43         struct mutex page_mutex;
44         int page;
45
46         u8 *rx_buf;
47         u8 *tx_buf;
48         int xfer_buf_size;
49
50         struct spi_transfer *rx_xfers;
51         struct spi_transfer *tx_xfers;
52         int rx_xfer_count;
53         int tx_xfer_count;
54 };
55
56 static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
57 {
58         struct spi_device *spi = rmi_spi->spi;
59         int buf_size = rmi_spi->xfer_buf_size
60                 ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
61         struct spi_transfer *xfer_buf;
62         void *buf;
63         void *tmp;
64
65         while (buf_size < len)
66                 buf_size *= 2;
67
68         if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
69                 buf_size = RMI_SPI_XFER_SIZE_LIMIT;
70
71         tmp = rmi_spi->rx_buf;
72         buf = devm_kzalloc(&spi->dev, buf_size * 2,
73                                 GFP_KERNEL | GFP_DMA);
74         if (!buf)
75                 return -ENOMEM;
76
77         rmi_spi->rx_buf = buf;
78         rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
79         rmi_spi->xfer_buf_size = buf_size;
80
81         if (tmp)
82                 devm_kfree(&spi->dev, tmp);
83
84         if (rmi_spi->xport.pdata.spi_data.read_delay_us)
85                 rmi_spi->rx_xfer_count = buf_size;
86         else
87                 rmi_spi->rx_xfer_count = 1;
88
89         if (rmi_spi->xport.pdata.spi_data.write_delay_us)
90                 rmi_spi->tx_xfer_count = buf_size;
91         else
92                 rmi_spi->tx_xfer_count = 1;
93
94         /*
95          * Allocate a pool of spi_transfer buffers for devices which need
96          * per byte delays.
97          */
98         tmp = rmi_spi->rx_xfers;
99         xfer_buf = devm_kzalloc(&spi->dev,
100                 (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count)
101                 * sizeof(struct spi_transfer), GFP_KERNEL);
102         if (!xfer_buf)
103                 return -ENOMEM;
104
105         rmi_spi->rx_xfers = xfer_buf;
106         rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
107
108         if (tmp)
109                 devm_kfree(&spi->dev, tmp);
110
111         return 0;
112 }
113
114 static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
115                         const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
116                         int tx_len, u8 *rx_buf, int rx_len)
117 {
118         struct spi_device *spi = rmi_spi->spi;
119         struct rmi_device_platform_data_spi *spi_data =
120                                         &rmi_spi->xport.pdata.spi_data;
121         struct spi_message msg;
122         struct spi_transfer *xfer;
123         int ret = 0;
124         int len;
125         int cmd_len = 0;
126         int total_tx_len;
127         int i;
128         u16 addr = cmd->addr;
129
130         spi_message_init(&msg);
131
132         switch (cmd->op) {
133         case RMI_SPI_WRITE:
134         case RMI_SPI_READ:
135                 cmd_len += 2;
136                 break;
137         case RMI_SPI_V2_READ_UNIFIED:
138         case RMI_SPI_V2_READ_SPLIT:
139         case RMI_SPI_V2_WRITE:
140                 cmd_len += 4;
141                 break;
142         }
143
144         total_tx_len = cmd_len + tx_len;
145         len = max(total_tx_len, rx_len);
146
147         if (len > RMI_SPI_XFER_SIZE_LIMIT)
148                 return -EINVAL;
149
150         if (rmi_spi->xfer_buf_size < len) {
151                 ret = rmi_spi_manage_pools(rmi_spi, len);
152                 if (ret < 0)
153                         return ret;
154         }
155
156         if (addr == 0)
157                 /*
158                  * SPI needs an address. Use 0x7FF if we want to keep
159                  * reading from the last position of the register pointer.
160                  */
161                 addr = 0x7FF;
162
163         switch (cmd->op) {
164         case RMI_SPI_WRITE:
165                 rmi_spi->tx_buf[0] = (addr >> 8);
166                 rmi_spi->tx_buf[1] = addr & 0xFF;
167                 break;
168         case RMI_SPI_READ:
169                 rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
170                 rmi_spi->tx_buf[1] = addr & 0xFF;
171                 break;
172         case RMI_SPI_V2_READ_UNIFIED:
173                 break;
174         case RMI_SPI_V2_READ_SPLIT:
175                 break;
176         case RMI_SPI_V2_WRITE:
177                 rmi_spi->tx_buf[0] = 0x40;
178                 rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
179                 rmi_spi->tx_buf[2] = addr & 0xFF;
180                 rmi_spi->tx_buf[3] = tx_len;
181                 break;
182         }
183
184         if (tx_buf)
185                 memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
186
187         if (rmi_spi->tx_xfer_count > 1) {
188                 for (i = 0; i < total_tx_len; i++) {
189                         xfer = &rmi_spi->tx_xfers[i];
190                         memset(xfer, 0, sizeof(struct spi_transfer));
191                         xfer->tx_buf = &rmi_spi->tx_buf[i];
192                         xfer->len = 1;
193                         xfer->delay_usecs = spi_data->write_delay_us;
194                         spi_message_add_tail(xfer, &msg);
195                 }
196         } else {
197                 xfer = rmi_spi->tx_xfers;
198                 memset(xfer, 0, sizeof(struct spi_transfer));
199                 xfer->tx_buf = rmi_spi->tx_buf;
200                 xfer->len = total_tx_len;
201                 spi_message_add_tail(xfer, &msg);
202         }
203
204         rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
205                 __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
206                 total_tx_len, total_tx_len, rmi_spi->tx_buf);
207
208         if (rx_buf) {
209                 if (rmi_spi->rx_xfer_count > 1) {
210                         for (i = 0; i < rx_len; i++) {
211                                 xfer = &rmi_spi->rx_xfers[i];
212                                 memset(xfer, 0, sizeof(struct spi_transfer));
213                                 xfer->rx_buf = &rmi_spi->rx_buf[i];
214                                 xfer->len = 1;
215                                 xfer->delay_usecs = spi_data->read_delay_us;
216                                 spi_message_add_tail(xfer, &msg);
217                         }
218                 } else {
219                         xfer = rmi_spi->rx_xfers;
220                         memset(xfer, 0, sizeof(struct spi_transfer));
221                         xfer->rx_buf = rmi_spi->rx_buf;
222                         xfer->len = rx_len;
223                         spi_message_add_tail(xfer, &msg);
224                 }
225         }
226
227         ret = spi_sync(spi, &msg);
228         if (ret < 0) {
229                 dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
230                 return ret;
231         }
232
233         if (rx_buf) {
234                 memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
235                 rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
236                         __func__, rx_len, rx_len, rx_buf);
237         }
238
239         return 0;
240 }
241
242 /*
243  * rmi_set_page - Set RMI page
244  * @xport: The pointer to the rmi_transport_dev struct
245  * @page: The new page address.
246  *
247  * RMI devices have 16-bit addressing, but some of the transport
248  * implementations (like SMBus) only have 8-bit addressing. So RMI implements
249  * a page address at 0xff of every page so we can reliable page addresses
250  * every 256 registers.
251  *
252  * The page_mutex lock must be held when this function is entered.
253  *
254  * Returns zero on success, non-zero on failure.
255  */
256 static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
257 {
258         struct rmi_spi_cmd cmd;
259         int ret;
260
261         cmd.op = RMI_SPI_WRITE;
262         cmd.addr = RMI_PAGE_SELECT_REGISTER;
263
264         ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
265
266         if (ret)
267                 rmi_spi->page = page;
268
269         return ret;
270 }
271
272 static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
273                                const void *buf, size_t len)
274 {
275         struct rmi_spi_xport *rmi_spi =
276                 container_of(xport, struct rmi_spi_xport, xport);
277         struct rmi_spi_cmd cmd;
278         int ret;
279
280         mutex_lock(&rmi_spi->page_mutex);
281
282         if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
283                 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
284                 if (ret)
285                         goto exit;
286         }
287
288         cmd.op = RMI_SPI_WRITE;
289         cmd.addr = addr;
290
291         ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
292
293 exit:
294         mutex_unlock(&rmi_spi->page_mutex);
295         return ret;
296 }
297
298 static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
299                               void *buf, size_t len)
300 {
301         struct rmi_spi_xport *rmi_spi =
302                 container_of(xport, struct rmi_spi_xport, xport);
303         struct rmi_spi_cmd cmd;
304         int ret;
305
306         mutex_lock(&rmi_spi->page_mutex);
307
308         if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
309                 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
310                 if (ret)
311                         goto exit;
312         }
313
314         cmd.op = RMI_SPI_READ;
315         cmd.addr = addr;
316
317         ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
318
319 exit:
320         mutex_unlock(&rmi_spi->page_mutex);
321         return ret;
322 }
323
324 static const struct rmi_transport_ops rmi_spi_ops = {
325         .write_block    = rmi_spi_write_block,
326         .read_block     = rmi_spi_read_block,
327 };
328
329 #ifdef CONFIG_OF
330 static int rmi_spi_of_probe(struct spi_device *spi,
331                         struct rmi_device_platform_data *pdata)
332 {
333         struct device *dev = &spi->dev;
334         int retval;
335
336         retval = rmi_of_property_read_u32(dev,
337                         &pdata->spi_data.read_delay_us,
338                         "spi-rx-delay-us", 1);
339         if (retval)
340                 return retval;
341
342         retval = rmi_of_property_read_u32(dev,
343                         &pdata->spi_data.write_delay_us,
344                         "spi-tx-delay-us", 1);
345         if (retval)
346                 return retval;
347
348         return 0;
349 }
350
351 static const struct of_device_id rmi_spi_of_match[] = {
352         { .compatible = "syna,rmi4-spi" },
353         {},
354 };
355 MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
356 #else
357 static inline int rmi_spi_of_probe(struct spi_device *spi,
358                                 struct rmi_device_platform_data *pdata)
359 {
360         return -ENODEV;
361 }
362 #endif
363
364 static void rmi_spi_unregister_transport(void *data)
365 {
366         struct rmi_spi_xport *rmi_spi = data;
367
368         rmi_unregister_transport_device(&rmi_spi->xport);
369 }
370
371 static int rmi_spi_probe(struct spi_device *spi)
372 {
373         struct rmi_spi_xport *rmi_spi;
374         struct rmi_device_platform_data *pdata;
375         struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
376         int error;
377
378         if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
379                 return -EINVAL;
380
381         rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
382                         GFP_KERNEL);
383         if (!rmi_spi)
384                 return -ENOMEM;
385
386         pdata = &rmi_spi->xport.pdata;
387
388         if (spi->dev.of_node) {
389                 error = rmi_spi_of_probe(spi, pdata);
390                 if (error)
391                         return error;
392         } else if (spi_pdata) {
393                 *pdata = *spi_pdata;
394         }
395
396         if (pdata->spi_data.bits_per_word)
397                 spi->bits_per_word = pdata->spi_data.bits_per_word;
398
399         if (pdata->spi_data.mode)
400                 spi->mode = pdata->spi_data.mode;
401
402         error = spi_setup(spi);
403         if (error < 0) {
404                 dev_err(&spi->dev, "spi_setup failed!\n");
405                 return error;
406         }
407
408         pdata->irq = spi->irq;
409
410         rmi_spi->spi = spi;
411         mutex_init(&rmi_spi->page_mutex);
412
413         rmi_spi->xport.dev = &spi->dev;
414         rmi_spi->xport.proto_name = "spi";
415         rmi_spi->xport.ops = &rmi_spi_ops;
416
417         spi_set_drvdata(spi, rmi_spi);
418
419         error = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
420         if (error)
421                 return error;
422
423         /*
424          * Setting the page to zero will (a) make sure the PSR is in a
425          * known state, and (b) make sure we can talk to the device.
426          */
427         error = rmi_set_page(rmi_spi, 0);
428         if (error) {
429                 dev_err(&spi->dev, "Failed to set page select to 0.\n");
430                 return error;
431         }
432
433         dev_info(&spi->dev, "registering SPI-connected sensor\n");
434
435         error = rmi_register_transport_device(&rmi_spi->xport);
436         if (error) {
437                 dev_err(&spi->dev, "failed to register sensor: %d\n", error);
438                 return error;
439         }
440
441         error = devm_add_action_or_reset(&spi->dev,
442                                           rmi_spi_unregister_transport,
443                                           rmi_spi);
444         if (error)
445                 return error;
446
447         return 0;
448 }
449
450 #ifdef CONFIG_PM_SLEEP
451 static int rmi_spi_suspend(struct device *dev)
452 {
453         struct spi_device *spi = to_spi_device(dev);
454         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
455         int ret;
456
457         ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
458         if (ret)
459                 dev_warn(dev, "Failed to resume device: %d\n", ret);
460
461         return ret;
462 }
463
464 static int rmi_spi_resume(struct device *dev)
465 {
466         struct spi_device *spi = to_spi_device(dev);
467         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
468         int ret;
469
470         ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
471         if (ret)
472                 dev_warn(dev, "Failed to resume device: %d\n", ret);
473
474         return ret;
475 }
476 #endif
477
478 #ifdef CONFIG_PM
479 static int rmi_spi_runtime_suspend(struct device *dev)
480 {
481         struct spi_device *spi = to_spi_device(dev);
482         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
483         int ret;
484
485         ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
486         if (ret)
487                 dev_warn(dev, "Failed to resume device: %d\n", ret);
488
489         return 0;
490 }
491
492 static int rmi_spi_runtime_resume(struct device *dev)
493 {
494         struct spi_device *spi = to_spi_device(dev);
495         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
496         int ret;
497
498         ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
499         if (ret)
500                 dev_warn(dev, "Failed to resume device: %d\n", ret);
501
502         return 0;
503 }
504 #endif
505
506 static const struct dev_pm_ops rmi_spi_pm = {
507         SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
508         SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
509                            NULL)
510 };
511
512 static const struct spi_device_id rmi_id[] = {
513         { "rmi4_spi", 0 },
514         { }
515 };
516 MODULE_DEVICE_TABLE(spi, rmi_id);
517
518 static struct spi_driver rmi_spi_driver = {
519         .driver = {
520                 .name   = "rmi4_spi",
521                 .pm     = &rmi_spi_pm,
522                 .of_match_table = of_match_ptr(rmi_spi_of_match),
523         },
524         .id_table       = rmi_id,
525         .probe          = rmi_spi_probe,
526 };
527
528 module_spi_driver(rmi_spi_driver);
529
530 MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
531 MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
532 MODULE_DESCRIPTION("RMI SPI driver");
533 MODULE_LICENSE("GPL");
534 MODULE_VERSION(RMI_DRIVER_VERSION);