GNU Linux-libre 4.4.288-gnu1
[releases.git] / drivers / spi / spi.c
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/spi.h>
43
44 static void spidev_release(struct device *dev)
45 {
46         struct spi_device       *spi = to_spi_device(dev);
47
48         /* spi masters may cleanup for released devices */
49         if (spi->master->cleanup)
50                 spi->master->cleanup(spi);
51
52         spi_master_put(spi->master);
53         kfree(spi);
54 }
55
56 static ssize_t
57 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
58 {
59         const struct spi_device *spi = to_spi_device(dev);
60         int len;
61
62         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
63         if (len != -ENODEV)
64                 return len;
65
66         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
67 }
68 static DEVICE_ATTR_RO(modalias);
69
70 #define SPI_STATISTICS_ATTRS(field, file)                               \
71 static ssize_t spi_master_##field##_show(struct device *dev,            \
72                                          struct device_attribute *attr, \
73                                          char *buf)                     \
74 {                                                                       \
75         struct spi_master *master = container_of(dev,                   \
76                                                  struct spi_master, dev); \
77         return spi_statistics_##field##_show(&master->statistics, buf); \
78 }                                                                       \
79 static struct device_attribute dev_attr_spi_master_##field = {          \
80         .attr = { .name = file, .mode = S_IRUGO },                      \
81         .show = spi_master_##field##_show,                              \
82 };                                                                      \
83 static ssize_t spi_device_##field##_show(struct device *dev,            \
84                                          struct device_attribute *attr, \
85                                         char *buf)                      \
86 {                                                                       \
87         struct spi_device *spi = container_of(dev,                      \
88                                               struct spi_device, dev);  \
89         return spi_statistics_##field##_show(&spi->statistics, buf);    \
90 }                                                                       \
91 static struct device_attribute dev_attr_spi_device_##field = {          \
92         .attr = { .name = file, .mode = S_IRUGO },                      \
93         .show = spi_device_##field##_show,                              \
94 }
95
96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
98                                             char *buf)                  \
99 {                                                                       \
100         unsigned long flags;                                            \
101         ssize_t len;                                                    \
102         spin_lock_irqsave(&stat->lock, flags);                          \
103         len = sprintf(buf, format_string, stat->field);                 \
104         spin_unlock_irqrestore(&stat->lock, flags);                     \
105         return len;                                                     \
106 }                                                                       \
107 SPI_STATISTICS_ATTRS(name, file)
108
109 #define SPI_STATISTICS_SHOW(field, format_string)                       \
110         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
111                                  field, format_string)
112
113 SPI_STATISTICS_SHOW(messages, "%lu");
114 SPI_STATISTICS_SHOW(transfers, "%lu");
115 SPI_STATISTICS_SHOW(errors, "%lu");
116 SPI_STATISTICS_SHOW(timedout, "%lu");
117
118 SPI_STATISTICS_SHOW(spi_sync, "%lu");
119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
120 SPI_STATISTICS_SHOW(spi_async, "%lu");
121
122 SPI_STATISTICS_SHOW(bytes, "%llu");
123 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
124 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
125
126 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
127         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
128                                  "transfer_bytes_histo_" number,        \
129                                  transfer_bytes_histo[index],  "%lu")
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
147
148 static struct attribute *spi_dev_attrs[] = {
149         &dev_attr_modalias.attr,
150         NULL,
151 };
152
153 static const struct attribute_group spi_dev_group = {
154         .attrs  = spi_dev_attrs,
155 };
156
157 static struct attribute *spi_device_statistics_attrs[] = {
158         &dev_attr_spi_device_messages.attr,
159         &dev_attr_spi_device_transfers.attr,
160         &dev_attr_spi_device_errors.attr,
161         &dev_attr_spi_device_timedout.attr,
162         &dev_attr_spi_device_spi_sync.attr,
163         &dev_attr_spi_device_spi_sync_immediate.attr,
164         &dev_attr_spi_device_spi_async.attr,
165         &dev_attr_spi_device_bytes.attr,
166         &dev_attr_spi_device_bytes_rx.attr,
167         &dev_attr_spi_device_bytes_tx.attr,
168         &dev_attr_spi_device_transfer_bytes_histo0.attr,
169         &dev_attr_spi_device_transfer_bytes_histo1.attr,
170         &dev_attr_spi_device_transfer_bytes_histo2.attr,
171         &dev_attr_spi_device_transfer_bytes_histo3.attr,
172         &dev_attr_spi_device_transfer_bytes_histo4.attr,
173         &dev_attr_spi_device_transfer_bytes_histo5.attr,
174         &dev_attr_spi_device_transfer_bytes_histo6.attr,
175         &dev_attr_spi_device_transfer_bytes_histo7.attr,
176         &dev_attr_spi_device_transfer_bytes_histo8.attr,
177         &dev_attr_spi_device_transfer_bytes_histo9.attr,
178         &dev_attr_spi_device_transfer_bytes_histo10.attr,
179         &dev_attr_spi_device_transfer_bytes_histo11.attr,
180         &dev_attr_spi_device_transfer_bytes_histo12.attr,
181         &dev_attr_spi_device_transfer_bytes_histo13.attr,
182         &dev_attr_spi_device_transfer_bytes_histo14.attr,
183         &dev_attr_spi_device_transfer_bytes_histo15.attr,
184         &dev_attr_spi_device_transfer_bytes_histo16.attr,
185         NULL,
186 };
187
188 static const struct attribute_group spi_device_statistics_group = {
189         .name  = "statistics",
190         .attrs  = spi_device_statistics_attrs,
191 };
192
193 static const struct attribute_group *spi_dev_groups[] = {
194         &spi_dev_group,
195         &spi_device_statistics_group,
196         NULL,
197 };
198
199 static struct attribute *spi_master_statistics_attrs[] = {
200         &dev_attr_spi_master_messages.attr,
201         &dev_attr_spi_master_transfers.attr,
202         &dev_attr_spi_master_errors.attr,
203         &dev_attr_spi_master_timedout.attr,
204         &dev_attr_spi_master_spi_sync.attr,
205         &dev_attr_spi_master_spi_sync_immediate.attr,
206         &dev_attr_spi_master_spi_async.attr,
207         &dev_attr_spi_master_bytes.attr,
208         &dev_attr_spi_master_bytes_rx.attr,
209         &dev_attr_spi_master_bytes_tx.attr,
210         &dev_attr_spi_master_transfer_bytes_histo0.attr,
211         &dev_attr_spi_master_transfer_bytes_histo1.attr,
212         &dev_attr_spi_master_transfer_bytes_histo2.attr,
213         &dev_attr_spi_master_transfer_bytes_histo3.attr,
214         &dev_attr_spi_master_transfer_bytes_histo4.attr,
215         &dev_attr_spi_master_transfer_bytes_histo5.attr,
216         &dev_attr_spi_master_transfer_bytes_histo6.attr,
217         &dev_attr_spi_master_transfer_bytes_histo7.attr,
218         &dev_attr_spi_master_transfer_bytes_histo8.attr,
219         &dev_attr_spi_master_transfer_bytes_histo9.attr,
220         &dev_attr_spi_master_transfer_bytes_histo10.attr,
221         &dev_attr_spi_master_transfer_bytes_histo11.attr,
222         &dev_attr_spi_master_transfer_bytes_histo12.attr,
223         &dev_attr_spi_master_transfer_bytes_histo13.attr,
224         &dev_attr_spi_master_transfer_bytes_histo14.attr,
225         &dev_attr_spi_master_transfer_bytes_histo15.attr,
226         &dev_attr_spi_master_transfer_bytes_histo16.attr,
227         NULL,
228 };
229
230 static const struct attribute_group spi_master_statistics_group = {
231         .name  = "statistics",
232         .attrs  = spi_master_statistics_attrs,
233 };
234
235 static const struct attribute_group *spi_master_groups[] = {
236         &spi_master_statistics_group,
237         NULL,
238 };
239
240 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
241                                        struct spi_transfer *xfer,
242                                        struct spi_master *master)
243 {
244         unsigned long flags;
245         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
246
247         if (l2len < 0)
248                 l2len = 0;
249
250         spin_lock_irqsave(&stats->lock, flags);
251
252         stats->transfers++;
253         stats->transfer_bytes_histo[l2len]++;
254
255         stats->bytes += xfer->len;
256         if ((xfer->tx_buf) &&
257             (xfer->tx_buf != master->dummy_tx))
258                 stats->bytes_tx += xfer->len;
259         if ((xfer->rx_buf) &&
260             (xfer->rx_buf != master->dummy_rx))
261                 stats->bytes_rx += xfer->len;
262
263         spin_unlock_irqrestore(&stats->lock, flags);
264 }
265 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
266
267 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
268  * and the sysfs version makes coldplug work too.
269  */
270
271 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
272                                                 const struct spi_device *sdev)
273 {
274         while (id->name[0]) {
275                 if (!strcmp(sdev->modalias, id->name))
276                         return id;
277                 id++;
278         }
279         return NULL;
280 }
281
282 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
283 {
284         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
285
286         return spi_match_id(sdrv->id_table, sdev);
287 }
288 EXPORT_SYMBOL_GPL(spi_get_device_id);
289
290 static int spi_match_device(struct device *dev, struct device_driver *drv)
291 {
292         const struct spi_device *spi = to_spi_device(dev);
293         const struct spi_driver *sdrv = to_spi_driver(drv);
294
295         /* Attempt an OF style match */
296         if (of_driver_match_device(dev, drv))
297                 return 1;
298
299         /* Then try ACPI */
300         if (acpi_driver_match_device(dev, drv))
301                 return 1;
302
303         if (sdrv->id_table)
304                 return !!spi_match_id(sdrv->id_table, spi);
305
306         return strcmp(spi->modalias, drv->name) == 0;
307 }
308
309 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
310 {
311         const struct spi_device         *spi = to_spi_device(dev);
312         int rc;
313
314         rc = acpi_device_uevent_modalias(dev, env);
315         if (rc != -ENODEV)
316                 return rc;
317
318         add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
319         return 0;
320 }
321
322 struct bus_type spi_bus_type = {
323         .name           = "spi",
324         .dev_groups     = spi_dev_groups,
325         .match          = spi_match_device,
326         .uevent         = spi_uevent,
327 };
328 EXPORT_SYMBOL_GPL(spi_bus_type);
329
330
331 static int spi_drv_probe(struct device *dev)
332 {
333         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
334         struct spi_device               *spi = to_spi_device(dev);
335         int ret;
336
337         ret = of_clk_set_defaults(dev->of_node, false);
338         if (ret)
339                 return ret;
340
341         if (dev->of_node) {
342                 spi->irq = of_irq_get(dev->of_node, 0);
343                 if (spi->irq == -EPROBE_DEFER)
344                         return -EPROBE_DEFER;
345                 if (spi->irq < 0)
346                         spi->irq = 0;
347         }
348
349         ret = dev_pm_domain_attach(dev, true);
350         if (ret != -EPROBE_DEFER) {
351                 ret = sdrv->probe(spi);
352                 if (ret)
353                         dev_pm_domain_detach(dev, true);
354         }
355
356         return ret;
357 }
358
359 static int spi_drv_remove(struct device *dev)
360 {
361         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
362         int ret;
363
364         ret = sdrv->remove(to_spi_device(dev));
365         dev_pm_domain_detach(dev, true);
366
367         return ret;
368 }
369
370 static void spi_drv_shutdown(struct device *dev)
371 {
372         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
373
374         sdrv->shutdown(to_spi_device(dev));
375 }
376
377 /**
378  * __spi_register_driver - register a SPI driver
379  * @owner: owner module of the driver to register
380  * @sdrv: the driver to register
381  * Context: can sleep
382  *
383  * Return: zero on success, else a negative error code.
384  */
385 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
386 {
387         sdrv->driver.owner = owner;
388         sdrv->driver.bus = &spi_bus_type;
389         if (sdrv->probe)
390                 sdrv->driver.probe = spi_drv_probe;
391         if (sdrv->remove)
392                 sdrv->driver.remove = spi_drv_remove;
393         if (sdrv->shutdown)
394                 sdrv->driver.shutdown = spi_drv_shutdown;
395         return driver_register(&sdrv->driver);
396 }
397 EXPORT_SYMBOL_GPL(__spi_register_driver);
398
399 /*-------------------------------------------------------------------------*/
400
401 /* SPI devices should normally not be created by SPI device drivers; that
402  * would make them board-specific.  Similarly with SPI master drivers.
403  * Device registration normally goes into like arch/.../mach.../board-YYY.c
404  * with other readonly (flashable) information about mainboard devices.
405  */
406
407 struct boardinfo {
408         struct list_head        list;
409         struct spi_board_info   board_info;
410 };
411
412 static LIST_HEAD(board_list);
413 static LIST_HEAD(spi_master_list);
414
415 /*
416  * Used to protect add/del opertion for board_info list and
417  * spi_master list, and their matching process
418  */
419 static DEFINE_MUTEX(board_lock);
420
421 /*
422  * Prevents addition of devices with same chip select and
423  * addition of devices below an unregistering controller.
424  */
425 static DEFINE_MUTEX(spi_add_lock);
426
427 /**
428  * spi_alloc_device - Allocate a new SPI device
429  * @master: Controller to which device is connected
430  * Context: can sleep
431  *
432  * Allows a driver to allocate and initialize a spi_device without
433  * registering it immediately.  This allows a driver to directly
434  * fill the spi_device with device parameters before calling
435  * spi_add_device() on it.
436  *
437  * Caller is responsible to call spi_add_device() on the returned
438  * spi_device structure to add it to the SPI master.  If the caller
439  * needs to discard the spi_device without adding it, then it should
440  * call spi_dev_put() on it.
441  *
442  * Return: a pointer to the new device, or NULL.
443  */
444 struct spi_device *spi_alloc_device(struct spi_master *master)
445 {
446         struct spi_device       *spi;
447
448         if (!spi_master_get(master))
449                 return NULL;
450
451         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
452         if (!spi) {
453                 spi_master_put(master);
454                 return NULL;
455         }
456
457         spi->master = master;
458         spi->dev.parent = &master->dev;
459         spi->dev.bus = &spi_bus_type;
460         spi->dev.release = spidev_release;
461         spi->cs_gpio = -ENOENT;
462
463         spin_lock_init(&spi->statistics.lock);
464
465         device_initialize(&spi->dev);
466         return spi;
467 }
468 EXPORT_SYMBOL_GPL(spi_alloc_device);
469
470 static void spi_dev_set_name(struct spi_device *spi)
471 {
472         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
473
474         if (adev) {
475                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
476                 return;
477         }
478
479         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
480                      spi->chip_select);
481 }
482
483 static int spi_dev_check(struct device *dev, void *data)
484 {
485         struct spi_device *spi = to_spi_device(dev);
486         struct spi_device *new_spi = data;
487
488         if (spi->master == new_spi->master &&
489             spi->chip_select == new_spi->chip_select)
490                 return -EBUSY;
491         return 0;
492 }
493
494 /**
495  * spi_add_device - Add spi_device allocated with spi_alloc_device
496  * @spi: spi_device to register
497  *
498  * Companion function to spi_alloc_device.  Devices allocated with
499  * spi_alloc_device can be added onto the spi bus with this function.
500  *
501  * Return: 0 on success; negative errno on failure
502  */
503 int spi_add_device(struct spi_device *spi)
504 {
505         struct spi_master *master = spi->master;
506         struct device *dev = master->dev.parent;
507         int status;
508
509         /* Chipselects are numbered 0..max; validate. */
510         if (spi->chip_select >= master->num_chipselect) {
511                 dev_err(dev, "cs%d >= max %d\n",
512                         spi->chip_select,
513                         master->num_chipselect);
514                 return -EINVAL;
515         }
516
517         /* Set the bus ID string */
518         spi_dev_set_name(spi);
519
520         /* We need to make sure there's no other device with this
521          * chipselect **BEFORE** we call setup(), else we'll trash
522          * its configuration.  Lock against concurrent add() calls.
523          */
524         mutex_lock(&spi_add_lock);
525
526         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
527         if (status) {
528                 dev_err(dev, "chipselect %d already in use\n",
529                                 spi->chip_select);
530                 goto done;
531         }
532
533         /* Controller may unregister concurrently */
534         if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
535             !device_is_registered(&master->dev)) {
536                 status = -ENODEV;
537                 goto done;
538         }
539
540         if (master->cs_gpios)
541                 spi->cs_gpio = master->cs_gpios[spi->chip_select];
542
543         /* Drivers may modify this initial i/o setup, but will
544          * normally rely on the device being setup.  Devices
545          * using SPI_CS_HIGH can't coexist well otherwise...
546          */
547         status = spi_setup(spi);
548         if (status < 0) {
549                 dev_err(dev, "can't setup %s, status %d\n",
550                                 dev_name(&spi->dev), status);
551                 goto done;
552         }
553
554         /* Device may be bound to an active driver when this returns */
555         status = device_add(&spi->dev);
556         if (status < 0)
557                 dev_err(dev, "can't add %s, status %d\n",
558                                 dev_name(&spi->dev), status);
559         else
560                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
561
562 done:
563         mutex_unlock(&spi_add_lock);
564         return status;
565 }
566 EXPORT_SYMBOL_GPL(spi_add_device);
567
568 /**
569  * spi_new_device - instantiate one new SPI device
570  * @master: Controller to which device is connected
571  * @chip: Describes the SPI device
572  * Context: can sleep
573  *
574  * On typical mainboards, this is purely internal; and it's not needed
575  * after board init creates the hard-wired devices.  Some development
576  * platforms may not be able to use spi_register_board_info though, and
577  * this is exported so that for example a USB or parport based adapter
578  * driver could add devices (which it would learn about out-of-band).
579  *
580  * Return: the new device, or NULL.
581  */
582 struct spi_device *spi_new_device(struct spi_master *master,
583                                   struct spi_board_info *chip)
584 {
585         struct spi_device       *proxy;
586         int                     status;
587
588         /* NOTE:  caller did any chip->bus_num checks necessary.
589          *
590          * Also, unless we change the return value convention to use
591          * error-or-pointer (not NULL-or-pointer), troubleshootability
592          * suggests syslogged diagnostics are best here (ugh).
593          */
594
595         proxy = spi_alloc_device(master);
596         if (!proxy)
597                 return NULL;
598
599         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
600
601         proxy->chip_select = chip->chip_select;
602         proxy->max_speed_hz = chip->max_speed_hz;
603         proxy->mode = chip->mode;
604         proxy->irq = chip->irq;
605         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
606         proxy->dev.platform_data = (void *) chip->platform_data;
607         proxy->controller_data = chip->controller_data;
608         proxy->controller_state = NULL;
609
610         status = spi_add_device(proxy);
611         if (status < 0) {
612                 spi_dev_put(proxy);
613                 return NULL;
614         }
615
616         return proxy;
617 }
618 EXPORT_SYMBOL_GPL(spi_new_device);
619
620 static void spi_match_master_to_boardinfo(struct spi_master *master,
621                                 struct spi_board_info *bi)
622 {
623         struct spi_device *dev;
624
625         if (master->bus_num != bi->bus_num)
626                 return;
627
628         dev = spi_new_device(master, bi);
629         if (!dev)
630                 dev_err(master->dev.parent, "can't create new device for %s\n",
631                         bi->modalias);
632 }
633
634 /**
635  * spi_register_board_info - register SPI devices for a given board
636  * @info: array of chip descriptors
637  * @n: how many descriptors are provided
638  * Context: can sleep
639  *
640  * Board-specific early init code calls this (probably during arch_initcall)
641  * with segments of the SPI device table.  Any device nodes are created later,
642  * after the relevant parent SPI controller (bus_num) is defined.  We keep
643  * this table of devices forever, so that reloading a controller driver will
644  * not make Linux forget about these hard-wired devices.
645  *
646  * Other code can also call this, e.g. a particular add-on board might provide
647  * SPI devices through its expansion connector, so code initializing that board
648  * would naturally declare its SPI devices.
649  *
650  * The board info passed can safely be __initdata ... but be careful of
651  * any embedded pointers (platform_data, etc), they're copied as-is.
652  *
653  * Return: zero on success, else a negative error code.
654  */
655 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
656 {
657         struct boardinfo *bi;
658         int i;
659
660         if (!n)
661                 return -EINVAL;
662
663         bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
664         if (!bi)
665                 return -ENOMEM;
666
667         for (i = 0; i < n; i++, bi++, info++) {
668                 struct spi_master *master;
669
670                 memcpy(&bi->board_info, info, sizeof(*info));
671                 mutex_lock(&board_lock);
672                 list_add_tail(&bi->list, &board_list);
673                 list_for_each_entry(master, &spi_master_list, list)
674                         spi_match_master_to_boardinfo(master, &bi->board_info);
675                 mutex_unlock(&board_lock);
676         }
677
678         return 0;
679 }
680
681 /*-------------------------------------------------------------------------*/
682
683 static void spi_set_cs(struct spi_device *spi, bool enable)
684 {
685         if (spi->mode & SPI_CS_HIGH)
686                 enable = !enable;
687
688         if (gpio_is_valid(spi->cs_gpio))
689                 gpio_set_value(spi->cs_gpio, !enable);
690         else if (spi->master->set_cs)
691                 spi->master->set_cs(spi, !enable);
692 }
693
694 #ifdef CONFIG_HAS_DMA
695 static int spi_map_buf(struct spi_master *master, struct device *dev,
696                        struct sg_table *sgt, void *buf, size_t len,
697                        enum dma_data_direction dir)
698 {
699         const bool vmalloced_buf = is_vmalloc_addr(buf);
700         int desc_len;
701         int sgs;
702         struct page *vm_page;
703         void *sg_buf;
704         size_t min;
705         int i, ret;
706
707         if (vmalloced_buf) {
708                 desc_len = PAGE_SIZE;
709                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
710         } else {
711                 desc_len = master->max_dma_len;
712                 sgs = DIV_ROUND_UP(len, desc_len);
713         }
714
715         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
716         if (ret != 0)
717                 return ret;
718
719         for (i = 0; i < sgs; i++) {
720
721                 if (vmalloced_buf) {
722                         /*
723                          * Next scatterlist entry size is the minimum between
724                          * the desc_len and the remaining buffer length that
725                          * fits in a page.
726                          */
727                         min = min_t(size_t, desc_len,
728                                     min_t(size_t, len,
729                                           PAGE_SIZE - offset_in_page(buf)));
730                         vm_page = vmalloc_to_page(buf);
731                         if (!vm_page) {
732                                 sg_free_table(sgt);
733                                 return -ENOMEM;
734                         }
735                         sg_set_page(&sgt->sgl[i], vm_page,
736                                     min, offset_in_page(buf));
737                 } else {
738                         min = min_t(size_t, len, desc_len);
739                         sg_buf = buf;
740                         sg_set_buf(&sgt->sgl[i], sg_buf, min);
741                 }
742
743
744                 buf += min;
745                 len -= min;
746         }
747
748         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
749         if (!ret)
750                 ret = -ENOMEM;
751         if (ret < 0) {
752                 sg_free_table(sgt);
753                 return ret;
754         }
755
756         sgt->nents = ret;
757
758         return 0;
759 }
760
761 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
762                           struct sg_table *sgt, enum dma_data_direction dir)
763 {
764         if (sgt->orig_nents) {
765                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
766                 sg_free_table(sgt);
767         }
768 }
769
770 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
771 {
772         struct device *tx_dev, *rx_dev;
773         struct spi_transfer *xfer;
774         int ret;
775
776         if (!master->can_dma)
777                 return 0;
778
779         if (master->dma_tx)
780                 tx_dev = master->dma_tx->device->dev;
781         else
782                 tx_dev = &master->dev;
783
784         if (master->dma_rx)
785                 rx_dev = master->dma_rx->device->dev;
786         else
787                 rx_dev = &master->dev;
788
789         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
790                 if (!master->can_dma(master, msg->spi, xfer))
791                         continue;
792
793                 if (xfer->tx_buf != NULL) {
794                         ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
795                                           (void *)xfer->tx_buf, xfer->len,
796                                           DMA_TO_DEVICE);
797                         if (ret != 0)
798                                 return ret;
799                 }
800
801                 if (xfer->rx_buf != NULL) {
802                         ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
803                                           xfer->rx_buf, xfer->len,
804                                           DMA_FROM_DEVICE);
805                         if (ret != 0) {
806                                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
807                                               DMA_TO_DEVICE);
808                                 return ret;
809                         }
810                 }
811         }
812
813         master->cur_msg_mapped = true;
814
815         return 0;
816 }
817
818 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
819 {
820         struct spi_transfer *xfer;
821         struct device *tx_dev, *rx_dev;
822
823         if (!master->cur_msg_mapped || !master->can_dma)
824                 return 0;
825
826         if (master->dma_tx)
827                 tx_dev = master->dma_tx->device->dev;
828         else
829                 tx_dev = &master->dev;
830
831         if (master->dma_rx)
832                 rx_dev = master->dma_rx->device->dev;
833         else
834                 rx_dev = &master->dev;
835
836         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
837                 if (!master->can_dma(master, msg->spi, xfer))
838                         continue;
839
840                 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
841                 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
842         }
843
844         return 0;
845 }
846 #else /* !CONFIG_HAS_DMA */
847 static inline int __spi_map_msg(struct spi_master *master,
848                                 struct spi_message *msg)
849 {
850         return 0;
851 }
852
853 static inline int __spi_unmap_msg(struct spi_master *master,
854                                   struct spi_message *msg)
855 {
856         return 0;
857 }
858 #endif /* !CONFIG_HAS_DMA */
859
860 static inline int spi_unmap_msg(struct spi_master *master,
861                                 struct spi_message *msg)
862 {
863         struct spi_transfer *xfer;
864
865         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
866                 /*
867                  * Restore the original value of tx_buf or rx_buf if they are
868                  * NULL.
869                  */
870                 if (xfer->tx_buf == master->dummy_tx)
871                         xfer->tx_buf = NULL;
872                 if (xfer->rx_buf == master->dummy_rx)
873                         xfer->rx_buf = NULL;
874         }
875
876         return __spi_unmap_msg(master, msg);
877 }
878
879 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
880 {
881         struct spi_transfer *xfer;
882         void *tmp;
883         unsigned int max_tx, max_rx;
884
885         if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
886                 max_tx = 0;
887                 max_rx = 0;
888
889                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
890                         if ((master->flags & SPI_MASTER_MUST_TX) &&
891                             !xfer->tx_buf)
892                                 max_tx = max(xfer->len, max_tx);
893                         if ((master->flags & SPI_MASTER_MUST_RX) &&
894                             !xfer->rx_buf)
895                                 max_rx = max(xfer->len, max_rx);
896                 }
897
898                 if (max_tx) {
899                         tmp = krealloc(master->dummy_tx, max_tx,
900                                        GFP_KERNEL | GFP_DMA);
901                         if (!tmp)
902                                 return -ENOMEM;
903                         master->dummy_tx = tmp;
904                         memset(tmp, 0, max_tx);
905                 }
906
907                 if (max_rx) {
908                         tmp = krealloc(master->dummy_rx, max_rx,
909                                        GFP_KERNEL | GFP_DMA);
910                         if (!tmp)
911                                 return -ENOMEM;
912                         master->dummy_rx = tmp;
913                 }
914
915                 if (max_tx || max_rx) {
916                         list_for_each_entry(xfer, &msg->transfers,
917                                             transfer_list) {
918                                 if (!xfer->len)
919                                         continue;
920                                 if (!xfer->tx_buf)
921                                         xfer->tx_buf = master->dummy_tx;
922                                 if (!xfer->rx_buf)
923                                         xfer->rx_buf = master->dummy_rx;
924                         }
925                 }
926         }
927
928         return __spi_map_msg(master, msg);
929 }
930
931 /*
932  * spi_transfer_one_message - Default implementation of transfer_one_message()
933  *
934  * This is a standard implementation of transfer_one_message() for
935  * drivers which impelment a transfer_one() operation.  It provides
936  * standard handling of delays and chip select management.
937  */
938 static int spi_transfer_one_message(struct spi_master *master,
939                                     struct spi_message *msg)
940 {
941         struct spi_transfer *xfer;
942         bool keep_cs = false;
943         int ret = 0;
944         unsigned long ms = 1;
945         struct spi_statistics *statm = &master->statistics;
946         struct spi_statistics *stats = &msg->spi->statistics;
947
948         spi_set_cs(msg->spi, true);
949
950         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
951         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
952
953         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
954                 trace_spi_transfer_start(msg, xfer);
955
956                 spi_statistics_add_transfer_stats(statm, xfer, master);
957                 spi_statistics_add_transfer_stats(stats, xfer, master);
958
959                 if (xfer->tx_buf || xfer->rx_buf) {
960                         reinit_completion(&master->xfer_completion);
961
962                         ret = master->transfer_one(master, msg->spi, xfer);
963                         if (ret < 0) {
964                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
965                                                                errors);
966                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
967                                                                errors);
968                                 dev_err(&msg->spi->dev,
969                                         "SPI transfer failed: %d\n", ret);
970                                 goto out;
971                         }
972
973                         if (ret > 0) {
974                                 ret = 0;
975                                 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
976                                 ms += ms + 100; /* some tolerance */
977
978                                 ms = wait_for_completion_timeout(&master->xfer_completion,
979                                                                  msecs_to_jiffies(ms));
980                         }
981
982                         if (ms == 0) {
983                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
984                                                                timedout);
985                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
986                                                                timedout);
987                                 dev_err(&msg->spi->dev,
988                                         "SPI transfer timed out\n");
989                                 msg->status = -ETIMEDOUT;
990                         }
991                 } else {
992                         if (xfer->len)
993                                 dev_err(&msg->spi->dev,
994                                         "Bufferless transfer has length %u\n",
995                                         xfer->len);
996                 }
997
998                 trace_spi_transfer_stop(msg, xfer);
999
1000                 if (msg->status != -EINPROGRESS)
1001                         goto out;
1002
1003                 if (xfer->delay_usecs)
1004                         udelay(xfer->delay_usecs);
1005
1006                 if (xfer->cs_change) {
1007                         if (list_is_last(&xfer->transfer_list,
1008                                          &msg->transfers)) {
1009                                 keep_cs = true;
1010                         } else {
1011                                 spi_set_cs(msg->spi, false);
1012                                 udelay(10);
1013                                 spi_set_cs(msg->spi, true);
1014                         }
1015                 }
1016
1017                 msg->actual_length += xfer->len;
1018         }
1019
1020 out:
1021         if (ret != 0 || !keep_cs)
1022                 spi_set_cs(msg->spi, false);
1023
1024         if (msg->status == -EINPROGRESS)
1025                 msg->status = ret;
1026
1027         if (msg->status && master->handle_err)
1028                 master->handle_err(master, msg);
1029
1030         spi_finalize_current_message(master);
1031
1032         return ret;
1033 }
1034
1035 /**
1036  * spi_finalize_current_transfer - report completion of a transfer
1037  * @master: the master reporting completion
1038  *
1039  * Called by SPI drivers using the core transfer_one_message()
1040  * implementation to notify it that the current interrupt driven
1041  * transfer has finished and the next one may be scheduled.
1042  */
1043 void spi_finalize_current_transfer(struct spi_master *master)
1044 {
1045         complete(&master->xfer_completion);
1046 }
1047 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1048
1049 /**
1050  * __spi_pump_messages - function which processes spi message queue
1051  * @master: master to process queue for
1052  * @in_kthread: true if we are in the context of the message pump thread
1053  *
1054  * This function checks if there is any spi message in the queue that
1055  * needs processing and if so call out to the driver to initialize hardware
1056  * and transfer each message.
1057  *
1058  * Note that it is called both from the kthread itself and also from
1059  * inside spi_sync(); the queue extraction handling at the top of the
1060  * function should deal with this safely.
1061  */
1062 static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1063 {
1064         unsigned long flags;
1065         bool was_busy = false;
1066         int ret;
1067
1068         /* Lock queue */
1069         spin_lock_irqsave(&master->queue_lock, flags);
1070
1071         /* Make sure we are not already running a message */
1072         if (master->cur_msg) {
1073                 spin_unlock_irqrestore(&master->queue_lock, flags);
1074                 return;
1075         }
1076
1077         /* If another context is idling the device then defer */
1078         if (master->idling) {
1079                 queue_kthread_work(&master->kworker, &master->pump_messages);
1080                 spin_unlock_irqrestore(&master->queue_lock, flags);
1081                 return;
1082         }
1083
1084         /* Check if the queue is idle */
1085         if (list_empty(&master->queue) || !master->running) {
1086                 if (!master->busy) {
1087                         spin_unlock_irqrestore(&master->queue_lock, flags);
1088                         return;
1089                 }
1090
1091                 /* Only do teardown in the thread */
1092                 if (!in_kthread) {
1093                         queue_kthread_work(&master->kworker,
1094                                            &master->pump_messages);
1095                         spin_unlock_irqrestore(&master->queue_lock, flags);
1096                         return;
1097                 }
1098
1099                 master->busy = false;
1100                 master->idling = true;
1101                 spin_unlock_irqrestore(&master->queue_lock, flags);
1102
1103                 kfree(master->dummy_rx);
1104                 master->dummy_rx = NULL;
1105                 kfree(master->dummy_tx);
1106                 master->dummy_tx = NULL;
1107                 if (master->unprepare_transfer_hardware &&
1108                     master->unprepare_transfer_hardware(master))
1109                         dev_err(&master->dev,
1110                                 "failed to unprepare transfer hardware\n");
1111                 if (master->auto_runtime_pm) {
1112                         pm_runtime_mark_last_busy(master->dev.parent);
1113                         pm_runtime_put_autosuspend(master->dev.parent);
1114                 }
1115                 trace_spi_master_idle(master);
1116
1117                 spin_lock_irqsave(&master->queue_lock, flags);
1118                 master->idling = false;
1119                 spin_unlock_irqrestore(&master->queue_lock, flags);
1120                 return;
1121         }
1122
1123         /* Extract head of queue */
1124         master->cur_msg =
1125                 list_first_entry(&master->queue, struct spi_message, queue);
1126
1127         list_del_init(&master->cur_msg->queue);
1128         if (master->busy)
1129                 was_busy = true;
1130         else
1131                 master->busy = true;
1132         spin_unlock_irqrestore(&master->queue_lock, flags);
1133
1134         if (!was_busy && master->auto_runtime_pm) {
1135                 ret = pm_runtime_get_sync(master->dev.parent);
1136                 if (ret < 0) {
1137                         dev_err(&master->dev, "Failed to power device: %d\n",
1138                                 ret);
1139                         return;
1140                 }
1141         }
1142
1143         if (!was_busy)
1144                 trace_spi_master_busy(master);
1145
1146         if (!was_busy && master->prepare_transfer_hardware) {
1147                 ret = master->prepare_transfer_hardware(master);
1148                 if (ret) {
1149                         dev_err(&master->dev,
1150                                 "failed to prepare transfer hardware\n");
1151
1152                         if (master->auto_runtime_pm)
1153                                 pm_runtime_put(master->dev.parent);
1154                         return;
1155                 }
1156         }
1157
1158         trace_spi_message_start(master->cur_msg);
1159
1160         if (master->prepare_message) {
1161                 ret = master->prepare_message(master, master->cur_msg);
1162                 if (ret) {
1163                         dev_err(&master->dev,
1164                                 "failed to prepare message: %d\n", ret);
1165                         master->cur_msg->status = ret;
1166                         spi_finalize_current_message(master);
1167                         return;
1168                 }
1169                 master->cur_msg_prepared = true;
1170         }
1171
1172         ret = spi_map_msg(master, master->cur_msg);
1173         if (ret) {
1174                 master->cur_msg->status = ret;
1175                 spi_finalize_current_message(master);
1176                 return;
1177         }
1178
1179         ret = master->transfer_one_message(master, master->cur_msg);
1180         if (ret) {
1181                 dev_err(&master->dev,
1182                         "failed to transfer one message from queue\n");
1183                 return;
1184         }
1185 }
1186
1187 /**
1188  * spi_pump_messages - kthread work function which processes spi message queue
1189  * @work: pointer to kthread work struct contained in the master struct
1190  */
1191 static void spi_pump_messages(struct kthread_work *work)
1192 {
1193         struct spi_master *master =
1194                 container_of(work, struct spi_master, pump_messages);
1195
1196         __spi_pump_messages(master, true);
1197 }
1198
1199 static int spi_init_queue(struct spi_master *master)
1200 {
1201         struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1202
1203         master->running = false;
1204         master->busy = false;
1205
1206         init_kthread_worker(&master->kworker);
1207         master->kworker_task = kthread_run(kthread_worker_fn,
1208                                            &master->kworker, "%s",
1209                                            dev_name(&master->dev));
1210         if (IS_ERR(master->kworker_task)) {
1211                 dev_err(&master->dev, "failed to create message pump task\n");
1212                 return PTR_ERR(master->kworker_task);
1213         }
1214         init_kthread_work(&master->pump_messages, spi_pump_messages);
1215
1216         /*
1217          * Master config will indicate if this controller should run the
1218          * message pump with high (realtime) priority to reduce the transfer
1219          * latency on the bus by minimising the delay between a transfer
1220          * request and the scheduling of the message pump thread. Without this
1221          * setting the message pump thread will remain at default priority.
1222          */
1223         if (master->rt) {
1224                 dev_info(&master->dev,
1225                         "will run message pump with realtime priority\n");
1226                 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1227         }
1228
1229         return 0;
1230 }
1231
1232 /**
1233  * spi_get_next_queued_message() - called by driver to check for queued
1234  * messages
1235  * @master: the master to check for queued messages
1236  *
1237  * If there are more messages in the queue, the next message is returned from
1238  * this call.
1239  *
1240  * Return: the next message in the queue, else NULL if the queue is empty.
1241  */
1242 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1243 {
1244         struct spi_message *next;
1245         unsigned long flags;
1246
1247         /* get a pointer to the next message, if any */
1248         spin_lock_irqsave(&master->queue_lock, flags);
1249         next = list_first_entry_or_null(&master->queue, struct spi_message,
1250                                         queue);
1251         spin_unlock_irqrestore(&master->queue_lock, flags);
1252
1253         return next;
1254 }
1255 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1256
1257 /**
1258  * spi_finalize_current_message() - the current message is complete
1259  * @master: the master to return the message to
1260  *
1261  * Called by the driver to notify the core that the message in the front of the
1262  * queue is complete and can be removed from the queue.
1263  */
1264 void spi_finalize_current_message(struct spi_master *master)
1265 {
1266         struct spi_message *mesg;
1267         unsigned long flags;
1268         int ret;
1269
1270         spin_lock_irqsave(&master->queue_lock, flags);
1271         mesg = master->cur_msg;
1272         spin_unlock_irqrestore(&master->queue_lock, flags);
1273
1274         spi_unmap_msg(master, mesg);
1275
1276         if (master->cur_msg_prepared && master->unprepare_message) {
1277                 ret = master->unprepare_message(master, mesg);
1278                 if (ret) {
1279                         dev_err(&master->dev,
1280                                 "failed to unprepare message: %d\n", ret);
1281                 }
1282         }
1283
1284         spin_lock_irqsave(&master->queue_lock, flags);
1285         master->cur_msg = NULL;
1286         master->cur_msg_prepared = false;
1287         queue_kthread_work(&master->kworker, &master->pump_messages);
1288         spin_unlock_irqrestore(&master->queue_lock, flags);
1289
1290         trace_spi_message_done(mesg);
1291
1292         mesg->state = NULL;
1293         if (mesg->complete)
1294                 mesg->complete(mesg->context);
1295 }
1296 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1297
1298 static int spi_start_queue(struct spi_master *master)
1299 {
1300         unsigned long flags;
1301
1302         spin_lock_irqsave(&master->queue_lock, flags);
1303
1304         if (master->running || master->busy) {
1305                 spin_unlock_irqrestore(&master->queue_lock, flags);
1306                 return -EBUSY;
1307         }
1308
1309         master->running = true;
1310         master->cur_msg = NULL;
1311         spin_unlock_irqrestore(&master->queue_lock, flags);
1312
1313         queue_kthread_work(&master->kworker, &master->pump_messages);
1314
1315         return 0;
1316 }
1317
1318 static int spi_stop_queue(struct spi_master *master)
1319 {
1320         unsigned long flags;
1321         unsigned limit = 500;
1322         int ret = 0;
1323
1324         spin_lock_irqsave(&master->queue_lock, flags);
1325
1326         /*
1327          * This is a bit lame, but is optimized for the common execution path.
1328          * A wait_queue on the master->busy could be used, but then the common
1329          * execution path (pump_messages) would be required to call wake_up or
1330          * friends on every SPI message. Do this instead.
1331          */
1332         while ((!list_empty(&master->queue) || master->busy) && limit--) {
1333                 spin_unlock_irqrestore(&master->queue_lock, flags);
1334                 usleep_range(10000, 11000);
1335                 spin_lock_irqsave(&master->queue_lock, flags);
1336         }
1337
1338         if (!list_empty(&master->queue) || master->busy)
1339                 ret = -EBUSY;
1340         else
1341                 master->running = false;
1342
1343         spin_unlock_irqrestore(&master->queue_lock, flags);
1344
1345         if (ret) {
1346                 dev_warn(&master->dev,
1347                          "could not stop message queue\n");
1348                 return ret;
1349         }
1350         return ret;
1351 }
1352
1353 static int spi_destroy_queue(struct spi_master *master)
1354 {
1355         int ret;
1356
1357         ret = spi_stop_queue(master);
1358
1359         /*
1360          * flush_kthread_worker will block until all work is done.
1361          * If the reason that stop_queue timed out is that the work will never
1362          * finish, then it does no good to call flush/stop thread, so
1363          * return anyway.
1364          */
1365         if (ret) {
1366                 dev_err(&master->dev, "problem destroying queue\n");
1367                 return ret;
1368         }
1369
1370         flush_kthread_worker(&master->kworker);
1371         kthread_stop(master->kworker_task);
1372
1373         return 0;
1374 }
1375
1376 static int __spi_queued_transfer(struct spi_device *spi,
1377                                  struct spi_message *msg,
1378                                  bool need_pump)
1379 {
1380         struct spi_master *master = spi->master;
1381         unsigned long flags;
1382
1383         spin_lock_irqsave(&master->queue_lock, flags);
1384
1385         if (!master->running) {
1386                 spin_unlock_irqrestore(&master->queue_lock, flags);
1387                 return -ESHUTDOWN;
1388         }
1389         msg->actual_length = 0;
1390         msg->status = -EINPROGRESS;
1391
1392         list_add_tail(&msg->queue, &master->queue);
1393         if (!master->busy && need_pump)
1394                 queue_kthread_work(&master->kworker, &master->pump_messages);
1395
1396         spin_unlock_irqrestore(&master->queue_lock, flags);
1397         return 0;
1398 }
1399
1400 /**
1401  * spi_queued_transfer - transfer function for queued transfers
1402  * @spi: spi device which is requesting transfer
1403  * @msg: spi message which is to handled is queued to driver queue
1404  *
1405  * Return: zero on success, else a negative error code.
1406  */
1407 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1408 {
1409         return __spi_queued_transfer(spi, msg, true);
1410 }
1411
1412 static int spi_master_initialize_queue(struct spi_master *master)
1413 {
1414         int ret;
1415
1416         master->transfer = spi_queued_transfer;
1417         if (!master->transfer_one_message)
1418                 master->transfer_one_message = spi_transfer_one_message;
1419
1420         /* Initialize and start queue */
1421         ret = spi_init_queue(master);
1422         if (ret) {
1423                 dev_err(&master->dev, "problem initializing queue\n");
1424                 goto err_init_queue;
1425         }
1426         master->queued = true;
1427         ret = spi_start_queue(master);
1428         if (ret) {
1429                 dev_err(&master->dev, "problem starting queue\n");
1430                 goto err_start_queue;
1431         }
1432
1433         return 0;
1434
1435 err_start_queue:
1436         spi_destroy_queue(master);
1437 err_init_queue:
1438         return ret;
1439 }
1440
1441 /*-------------------------------------------------------------------------*/
1442
1443 #if defined(CONFIG_OF)
1444 static struct spi_device *
1445 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1446 {
1447         struct spi_device *spi;
1448         int rc;
1449         u32 value;
1450
1451         /* Alloc an spi_device */
1452         spi = spi_alloc_device(master);
1453         if (!spi) {
1454                 dev_err(&master->dev, "spi_device alloc error for %s\n",
1455                         nc->full_name);
1456                 rc = -ENOMEM;
1457                 goto err_out;
1458         }
1459
1460         /* Select device driver */
1461         rc = of_modalias_node(nc, spi->modalias,
1462                                 sizeof(spi->modalias));
1463         if (rc < 0) {
1464                 dev_err(&master->dev, "cannot find modalias for %s\n",
1465                         nc->full_name);
1466                 goto err_out;
1467         }
1468
1469         /* Device address */
1470         rc = of_property_read_u32(nc, "reg", &value);
1471         if (rc) {
1472                 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1473                         nc->full_name, rc);
1474                 goto err_out;
1475         }
1476         spi->chip_select = value;
1477
1478         /* Mode (clock phase/polarity/etc.) */
1479         if (of_find_property(nc, "spi-cpha", NULL))
1480                 spi->mode |= SPI_CPHA;
1481         if (of_find_property(nc, "spi-cpol", NULL))
1482                 spi->mode |= SPI_CPOL;
1483         if (of_find_property(nc, "spi-cs-high", NULL))
1484                 spi->mode |= SPI_CS_HIGH;
1485         if (of_find_property(nc, "spi-3wire", NULL))
1486                 spi->mode |= SPI_3WIRE;
1487         if (of_find_property(nc, "spi-lsb-first", NULL))
1488                 spi->mode |= SPI_LSB_FIRST;
1489
1490         /* Device DUAL/QUAD mode */
1491         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1492                 switch (value) {
1493                 case 1:
1494                         break;
1495                 case 2:
1496                         spi->mode |= SPI_TX_DUAL;
1497                         break;
1498                 case 4:
1499                         spi->mode |= SPI_TX_QUAD;
1500                         break;
1501                 default:
1502                         dev_warn(&master->dev,
1503                                 "spi-tx-bus-width %d not supported\n",
1504                                 value);
1505                         break;
1506                 }
1507         }
1508
1509         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1510                 switch (value) {
1511                 case 1:
1512                         break;
1513                 case 2:
1514                         spi->mode |= SPI_RX_DUAL;
1515                         break;
1516                 case 4:
1517                         spi->mode |= SPI_RX_QUAD;
1518                         break;
1519                 default:
1520                         dev_warn(&master->dev,
1521                                 "spi-rx-bus-width %d not supported\n",
1522                                 value);
1523                         break;
1524                 }
1525         }
1526
1527         /* Device speed */
1528         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1529         if (rc) {
1530                 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1531                         nc->full_name, rc);
1532                 goto err_out;
1533         }
1534         spi->max_speed_hz = value;
1535
1536         /* Store a pointer to the node in the device structure */
1537         of_node_get(nc);
1538         spi->dev.of_node = nc;
1539
1540         /* Register the new device */
1541         rc = spi_add_device(spi);
1542         if (rc) {
1543                 dev_err(&master->dev, "spi_device register error %s\n",
1544                         nc->full_name);
1545                 goto err_out;
1546         }
1547
1548         return spi;
1549
1550 err_out:
1551         spi_dev_put(spi);
1552         return ERR_PTR(rc);
1553 }
1554
1555 /**
1556  * of_register_spi_devices() - Register child devices onto the SPI bus
1557  * @master:     Pointer to spi_master device
1558  *
1559  * Registers an spi_device for each child node of master node which has a 'reg'
1560  * property.
1561  */
1562 static void of_register_spi_devices(struct spi_master *master)
1563 {
1564         struct spi_device *spi;
1565         struct device_node *nc;
1566
1567         if (!master->dev.of_node)
1568                 return;
1569
1570         for_each_available_child_of_node(master->dev.of_node, nc) {
1571                 spi = of_register_spi_device(master, nc);
1572                 if (IS_ERR(spi))
1573                         dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1574                                 nc->full_name);
1575         }
1576 }
1577 #else
1578 static void of_register_spi_devices(struct spi_master *master) { }
1579 #endif
1580
1581 #ifdef CONFIG_ACPI
1582 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1583 {
1584         struct spi_device *spi = data;
1585
1586         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1587                 struct acpi_resource_spi_serialbus *sb;
1588
1589                 sb = &ares->data.spi_serial_bus;
1590                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1591                         spi->chip_select = sb->device_selection;
1592                         spi->max_speed_hz = sb->connection_speed;
1593
1594                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1595                                 spi->mode |= SPI_CPHA;
1596                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1597                                 spi->mode |= SPI_CPOL;
1598                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1599                                 spi->mode |= SPI_CS_HIGH;
1600                 }
1601         } else if (spi->irq < 0) {
1602                 struct resource r;
1603
1604                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1605                         spi->irq = r.start;
1606         }
1607
1608         /* Always tell the ACPI core to skip this resource */
1609         return 1;
1610 }
1611
1612 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1613                                        void *data, void **return_value)
1614 {
1615         struct spi_master *master = data;
1616         struct list_head resource_list;
1617         struct acpi_device *adev;
1618         struct spi_device *spi;
1619         int ret;
1620
1621         if (acpi_bus_get_device(handle, &adev))
1622                 return AE_OK;
1623         if (acpi_bus_get_status(adev) || !adev->status.present)
1624                 return AE_OK;
1625
1626         spi = spi_alloc_device(master);
1627         if (!spi) {
1628                 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1629                         dev_name(&adev->dev));
1630                 return AE_NO_MEMORY;
1631         }
1632
1633         ACPI_COMPANION_SET(&spi->dev, adev);
1634         spi->irq = -1;
1635
1636         INIT_LIST_HEAD(&resource_list);
1637         ret = acpi_dev_get_resources(adev, &resource_list,
1638                                      acpi_spi_add_resource, spi);
1639         acpi_dev_free_resource_list(&resource_list);
1640
1641         if (ret < 0 || !spi->max_speed_hz) {
1642                 spi_dev_put(spi);
1643                 return AE_OK;
1644         }
1645
1646         adev->power.flags.ignore_parent = true;
1647         strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1648         if (spi_add_device(spi)) {
1649                 adev->power.flags.ignore_parent = false;
1650                 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1651                         dev_name(&adev->dev));
1652                 spi_dev_put(spi);
1653         }
1654
1655         return AE_OK;
1656 }
1657
1658 static void acpi_register_spi_devices(struct spi_master *master)
1659 {
1660         acpi_status status;
1661         acpi_handle handle;
1662
1663         handle = ACPI_HANDLE(master->dev.parent);
1664         if (!handle)
1665                 return;
1666
1667         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1668                                      acpi_spi_add_device, NULL,
1669                                      master, NULL);
1670         if (ACPI_FAILURE(status))
1671                 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1672 }
1673 #else
1674 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1675 #endif /* CONFIG_ACPI */
1676
1677 static void spi_master_release(struct device *dev)
1678 {
1679         struct spi_master *master;
1680
1681         master = container_of(dev, struct spi_master, dev);
1682         kfree(master);
1683 }
1684
1685 static struct class spi_master_class = {
1686         .name           = "spi_master",
1687         .owner          = THIS_MODULE,
1688         .dev_release    = spi_master_release,
1689         .dev_groups     = spi_master_groups,
1690 };
1691
1692
1693 /**
1694  * spi_alloc_master - allocate SPI master controller
1695  * @dev: the controller, possibly using the platform_bus
1696  * @size: how much zeroed driver-private data to allocate; the pointer to this
1697  *      memory is in the driver_data field of the returned device,
1698  *      accessible with spi_master_get_devdata().
1699  * Context: can sleep
1700  *
1701  * This call is used only by SPI master controller drivers, which are the
1702  * only ones directly touching chip registers.  It's how they allocate
1703  * an spi_master structure, prior to calling spi_register_master().
1704  *
1705  * This must be called from context that can sleep.
1706  *
1707  * The caller is responsible for assigning the bus number and initializing
1708  * the master's methods before calling spi_register_master(); and (after errors
1709  * adding the device) calling spi_master_put() to prevent a memory leak.
1710  *
1711  * Return: the SPI master structure on success, else NULL.
1712  */
1713 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1714 {
1715         struct spi_master       *master;
1716
1717         if (!dev)
1718                 return NULL;
1719
1720         master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1721         if (!master)
1722                 return NULL;
1723
1724         device_initialize(&master->dev);
1725         master->bus_num = -1;
1726         master->num_chipselect = 1;
1727         master->dev.class = &spi_master_class;
1728         master->dev.parent = dev;
1729         spi_master_set_devdata(master, &master[1]);
1730
1731         return master;
1732 }
1733 EXPORT_SYMBOL_GPL(spi_alloc_master);
1734
1735 static void devm_spi_release_master(struct device *dev, void *master)
1736 {
1737         spi_master_put(*(struct spi_master **)master);
1738 }
1739
1740 /**
1741  * devm_spi_alloc_master - resource-managed spi_alloc_master()
1742  * @dev: physical device of SPI master
1743  * @size: how much zeroed driver-private data to allocate
1744  * Context: can sleep
1745  *
1746  * Allocate an SPI master and automatically release a reference on it
1747  * when @dev is unbound from its driver.  Drivers are thus relieved from
1748  * having to call spi_master_put().
1749  *
1750  * The arguments to this function are identical to spi_alloc_master().
1751  *
1752  * Return: the SPI master structure on success, else NULL.
1753  */
1754 struct spi_master *devm_spi_alloc_master(struct device *dev, unsigned int size)
1755 {
1756         struct spi_master **ptr, *master;
1757
1758         ptr = devres_alloc(devm_spi_release_master, sizeof(*ptr),
1759                            GFP_KERNEL);
1760         if (!ptr)
1761                 return NULL;
1762
1763         master = spi_alloc_master(dev, size);
1764         if (master) {
1765                 master->devm_allocated = true;
1766                 *ptr = master;
1767                 devres_add(dev, ptr);
1768         } else {
1769                 devres_free(ptr);
1770         }
1771
1772         return master;
1773 }
1774 EXPORT_SYMBOL_GPL(devm_spi_alloc_master);
1775
1776 #ifdef CONFIG_OF
1777 static int of_spi_register_master(struct spi_master *master)
1778 {
1779         int nb, i, *cs;
1780         struct device_node *np = master->dev.of_node;
1781
1782         if (!np)
1783                 return 0;
1784
1785         nb = of_gpio_named_count(np, "cs-gpios");
1786         master->num_chipselect = max_t(int, nb, master->num_chipselect);
1787
1788         /* Return error only for an incorrectly formed cs-gpios property */
1789         if (nb == 0 || nb == -ENOENT)
1790                 return 0;
1791         else if (nb < 0)
1792                 return nb;
1793
1794         cs = devm_kzalloc(&master->dev,
1795                           sizeof(int) * master->num_chipselect,
1796                           GFP_KERNEL);
1797         master->cs_gpios = cs;
1798
1799         if (!master->cs_gpios)
1800                 return -ENOMEM;
1801
1802         for (i = 0; i < master->num_chipselect; i++)
1803                 cs[i] = -ENOENT;
1804
1805         for (i = 0; i < nb; i++)
1806                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1807
1808         return 0;
1809 }
1810 #else
1811 static int of_spi_register_master(struct spi_master *master)
1812 {
1813         return 0;
1814 }
1815 #endif
1816
1817 /**
1818  * spi_register_master - register SPI master controller
1819  * @master: initialized master, originally from spi_alloc_master()
1820  * Context: can sleep
1821  *
1822  * SPI master controllers connect to their drivers using some non-SPI bus,
1823  * such as the platform bus.  The final stage of probe() in that code
1824  * includes calling spi_register_master() to hook up to this SPI bus glue.
1825  *
1826  * SPI controllers use board specific (often SOC specific) bus numbers,
1827  * and board-specific addressing for SPI devices combines those numbers
1828  * with chip select numbers.  Since SPI does not directly support dynamic
1829  * device identification, boards need configuration tables telling which
1830  * chip is at which address.
1831  *
1832  * This must be called from context that can sleep.  It returns zero on
1833  * success, else a negative error code (dropping the master's refcount).
1834  * After a successful return, the caller is responsible for calling
1835  * spi_unregister_master().
1836  *
1837  * Return: zero on success, else a negative error code.
1838  */
1839 int spi_register_master(struct spi_master *master)
1840 {
1841         static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1842         struct device           *dev = master->dev.parent;
1843         struct boardinfo        *bi;
1844         int                     status = -ENODEV;
1845         int                     dynamic = 0;
1846
1847         if (!dev)
1848                 return -ENODEV;
1849
1850         status = of_spi_register_master(master);
1851         if (status)
1852                 return status;
1853
1854         /* even if it's just one always-selected device, there must
1855          * be at least one chipselect
1856          */
1857         if (master->num_chipselect == 0)
1858                 return -EINVAL;
1859
1860         if ((master->bus_num < 0) && master->dev.of_node)
1861                 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1862
1863         /* convention:  dynamically assigned bus IDs count down from the max */
1864         if (master->bus_num < 0) {
1865                 /* FIXME switch to an IDR based scheme, something like
1866                  * I2C now uses, so we can't run out of "dynamic" IDs
1867                  */
1868                 master->bus_num = atomic_dec_return(&dyn_bus_id);
1869                 dynamic = 1;
1870         }
1871
1872         INIT_LIST_HEAD(&master->queue);
1873         spin_lock_init(&master->queue_lock);
1874         spin_lock_init(&master->bus_lock_spinlock);
1875         mutex_init(&master->bus_lock_mutex);
1876         master->bus_lock_flag = 0;
1877         init_completion(&master->xfer_completion);
1878         if (!master->max_dma_len)
1879                 master->max_dma_len = INT_MAX;
1880
1881         /* register the device, then userspace will see it.
1882          * registration fails if the bus ID is in use.
1883          */
1884         dev_set_name(&master->dev, "spi%u", master->bus_num);
1885         status = device_add(&master->dev);
1886         if (status < 0)
1887                 goto done;
1888         dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1889                         dynamic ? " (dynamic)" : "");
1890
1891         /* If we're using a queued driver, start the queue */
1892         if (master->transfer)
1893                 dev_info(dev, "master is unqueued, this is deprecated\n");
1894         else {
1895                 status = spi_master_initialize_queue(master);
1896                 if (status) {
1897                         device_del(&master->dev);
1898                         goto done;
1899                 }
1900         }
1901         /* add statistics */
1902         spin_lock_init(&master->statistics.lock);
1903
1904         mutex_lock(&board_lock);
1905         list_add_tail(&master->list, &spi_master_list);
1906         list_for_each_entry(bi, &board_list, list)
1907                 spi_match_master_to_boardinfo(master, &bi->board_info);
1908         mutex_unlock(&board_lock);
1909
1910         /* Register devices from the device tree and ACPI */
1911         of_register_spi_devices(master);
1912         acpi_register_spi_devices(master);
1913 done:
1914         return status;
1915 }
1916 EXPORT_SYMBOL_GPL(spi_register_master);
1917
1918 static void devm_spi_unregister(struct device *dev, void *res)
1919 {
1920         spi_unregister_master(*(struct spi_master **)res);
1921 }
1922
1923 /**
1924  * dev_spi_register_master - register managed SPI master controller
1925  * @dev:    device managing SPI master
1926  * @master: initialized master, originally from spi_alloc_master()
1927  * Context: can sleep
1928  *
1929  * Register a SPI device as with spi_register_master() which will
1930  * automatically be unregister
1931  *
1932  * Return: zero on success, else a negative error code.
1933  */
1934 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1935 {
1936         struct spi_master **ptr;
1937         int ret;
1938
1939         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1940         if (!ptr)
1941                 return -ENOMEM;
1942
1943         ret = spi_register_master(master);
1944         if (!ret) {
1945                 *ptr = master;
1946                 devres_add(dev, ptr);
1947         } else {
1948                 devres_free(ptr);
1949         }
1950
1951         return ret;
1952 }
1953 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1954
1955 static int __unregister(struct device *dev, void *null)
1956 {
1957         spi_unregister_device(to_spi_device(dev));
1958         return 0;
1959 }
1960
1961 /**
1962  * spi_unregister_master - unregister SPI master controller
1963  * @master: the master being unregistered
1964  * Context: can sleep
1965  *
1966  * This call is used only by SPI master controller drivers, which are the
1967  * only ones directly touching chip registers.
1968  *
1969  * This must be called from context that can sleep.
1970  */
1971 void spi_unregister_master(struct spi_master *master)
1972 {
1973         /* Prevent addition of new devices, unregister existing ones */
1974         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
1975                 mutex_lock(&spi_add_lock);
1976
1977         device_for_each_child(&master->dev, NULL, __unregister);
1978
1979         if (master->queued) {
1980                 if (spi_destroy_queue(master))
1981                         dev_err(&master->dev, "queue remove failed\n");
1982         }
1983
1984         mutex_lock(&board_lock);
1985         list_del(&master->list);
1986         mutex_unlock(&board_lock);
1987
1988         device_del(&master->dev);
1989
1990         /* Release the last reference on the master if its driver
1991          * has not yet been converted to devm_spi_alloc_master().
1992          */
1993         if (!master->devm_allocated)
1994                 put_device(&master->dev);
1995
1996         if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
1997                 mutex_unlock(&spi_add_lock);
1998 }
1999 EXPORT_SYMBOL_GPL(spi_unregister_master);
2000
2001 int spi_master_suspend(struct spi_master *master)
2002 {
2003         int ret;
2004
2005         /* Basically no-ops for non-queued masters */
2006         if (!master->queued)
2007                 return 0;
2008
2009         ret = spi_stop_queue(master);
2010         if (ret)
2011                 dev_err(&master->dev, "queue stop failed\n");
2012
2013         return ret;
2014 }
2015 EXPORT_SYMBOL_GPL(spi_master_suspend);
2016
2017 int spi_master_resume(struct spi_master *master)
2018 {
2019         int ret;
2020
2021         if (!master->queued)
2022                 return 0;
2023
2024         ret = spi_start_queue(master);
2025         if (ret)
2026                 dev_err(&master->dev, "queue restart failed\n");
2027
2028         return ret;
2029 }
2030 EXPORT_SYMBOL_GPL(spi_master_resume);
2031
2032 static int __spi_master_match(struct device *dev, const void *data)
2033 {
2034         struct spi_master *m;
2035         const u16 *bus_num = data;
2036
2037         m = container_of(dev, struct spi_master, dev);
2038         return m->bus_num == *bus_num;
2039 }
2040
2041 /**
2042  * spi_busnum_to_master - look up master associated with bus_num
2043  * @bus_num: the master's bus number
2044  * Context: can sleep
2045  *
2046  * This call may be used with devices that are registered after
2047  * arch init time.  It returns a refcounted pointer to the relevant
2048  * spi_master (which the caller must release), or NULL if there is
2049  * no such master registered.
2050  *
2051  * Return: the SPI master structure on success, else NULL.
2052  */
2053 struct spi_master *spi_busnum_to_master(u16 bus_num)
2054 {
2055         struct device           *dev;
2056         struct spi_master       *master = NULL;
2057
2058         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2059                                 __spi_master_match);
2060         if (dev)
2061                 master = container_of(dev, struct spi_master, dev);
2062         /* reference got in class_find_device */
2063         return master;
2064 }
2065 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2066
2067
2068 /*-------------------------------------------------------------------------*/
2069
2070 /* Core methods for SPI master protocol drivers.  Some of the
2071  * other core methods are currently defined as inline functions.
2072  */
2073
2074 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2075 {
2076         if (master->bits_per_word_mask) {
2077                 /* Only 32 bits fit in the mask */
2078                 if (bits_per_word > 32)
2079                         return -EINVAL;
2080                 if (!(master->bits_per_word_mask &
2081                                 SPI_BPW_MASK(bits_per_word)))
2082                         return -EINVAL;
2083         }
2084
2085         return 0;
2086 }
2087
2088 /**
2089  * spi_setup - setup SPI mode and clock rate
2090  * @spi: the device whose settings are being modified
2091  * Context: can sleep, and no requests are queued to the device
2092  *
2093  * SPI protocol drivers may need to update the transfer mode if the
2094  * device doesn't work with its default.  They may likewise need
2095  * to update clock rates or word sizes from initial values.  This function
2096  * changes those settings, and must be called from a context that can sleep.
2097  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2098  * effect the next time the device is selected and data is transferred to
2099  * or from it.  When this function returns, the spi device is deselected.
2100  *
2101  * Note that this call will fail if the protocol driver specifies an option
2102  * that the underlying controller or its driver does not support.  For
2103  * example, not all hardware supports wire transfers using nine bit words,
2104  * LSB-first wire encoding, or active-high chipselects.
2105  *
2106  * Return: zero on success, else a negative error code.
2107  */
2108 int spi_setup(struct spi_device *spi)
2109 {
2110         unsigned        bad_bits, ugly_bits;
2111         int             status;
2112
2113         /* check mode to prevent that DUAL and QUAD set at the same time
2114          */
2115         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2116                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2117                 dev_err(&spi->dev,
2118                 "setup: can not select dual and quad at the same time\n");
2119                 return -EINVAL;
2120         }
2121         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2122          */
2123         if ((spi->mode & SPI_3WIRE) && (spi->mode &
2124                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2125                 return -EINVAL;
2126         /* help drivers fail *cleanly* when they need options
2127          * that aren't supported with their current master
2128          */
2129         bad_bits = spi->mode & ~spi->master->mode_bits;
2130         ugly_bits = bad_bits &
2131                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2132         if (ugly_bits) {
2133                 dev_warn(&spi->dev,
2134                          "setup: ignoring unsupported mode bits %x\n",
2135                          ugly_bits);
2136                 spi->mode &= ~ugly_bits;
2137                 bad_bits &= ~ugly_bits;
2138         }
2139         if (bad_bits) {
2140                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2141                         bad_bits);
2142                 return -EINVAL;
2143         }
2144
2145         if (!spi->bits_per_word)
2146                 spi->bits_per_word = 8;
2147
2148         status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2149         if (status)
2150                 return status;
2151
2152         if (!spi->max_speed_hz)
2153                 spi->max_speed_hz = spi->master->max_speed_hz;
2154
2155         if (spi->master->setup)
2156                 status = spi->master->setup(spi);
2157
2158         spi_set_cs(spi, false);
2159
2160         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2161                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2162                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2163                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2164                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2165                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
2166                         spi->bits_per_word, spi->max_speed_hz,
2167                         status);
2168
2169         return status;
2170 }
2171 EXPORT_SYMBOL_GPL(spi_setup);
2172
2173 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2174 {
2175         struct spi_master *master = spi->master;
2176         struct spi_transfer *xfer;
2177         int w_size;
2178
2179         if (list_empty(&message->transfers))
2180                 return -EINVAL;
2181
2182         /* Half-duplex links include original MicroWire, and ones with
2183          * only one data pin like SPI_3WIRE (switches direction) or where
2184          * either MOSI or MISO is missing.  They can also be caused by
2185          * software limitations.
2186          */
2187         if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2188                         || (spi->mode & SPI_3WIRE)) {
2189                 unsigned flags = master->flags;
2190
2191                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2192                         if (xfer->rx_buf && xfer->tx_buf)
2193                                 return -EINVAL;
2194                         if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2195                                 return -EINVAL;
2196                         if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2197                                 return -EINVAL;
2198                 }
2199         }
2200
2201         /**
2202          * Set transfer bits_per_word and max speed as spi device default if
2203          * it is not set for this transfer.
2204          * Set transfer tx_nbits and rx_nbits as single transfer default
2205          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2206          */
2207         message->frame_length = 0;
2208         list_for_each_entry(xfer, &message->transfers, transfer_list) {
2209                 message->frame_length += xfer->len;
2210                 if (!xfer->bits_per_word)
2211                         xfer->bits_per_word = spi->bits_per_word;
2212
2213                 if (!xfer->speed_hz)
2214                         xfer->speed_hz = spi->max_speed_hz;
2215                 if (!xfer->speed_hz)
2216                         xfer->speed_hz = master->max_speed_hz;
2217
2218                 if (master->max_speed_hz &&
2219                     xfer->speed_hz > master->max_speed_hz)
2220                         xfer->speed_hz = master->max_speed_hz;
2221
2222                 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2223                         return -EINVAL;
2224
2225                 /*
2226                  * SPI transfer length should be multiple of SPI word size
2227                  * where SPI word size should be power-of-two multiple
2228                  */
2229                 if (xfer->bits_per_word <= 8)
2230                         w_size = 1;
2231                 else if (xfer->bits_per_word <= 16)
2232                         w_size = 2;
2233                 else
2234                         w_size = 4;
2235
2236                 /* No partial transfers accepted */
2237                 if (xfer->len % w_size)
2238                         return -EINVAL;
2239
2240                 if (xfer->speed_hz && master->min_speed_hz &&
2241                     xfer->speed_hz < master->min_speed_hz)
2242                         return -EINVAL;
2243
2244                 if (xfer->tx_buf && !xfer->tx_nbits)
2245                         xfer->tx_nbits = SPI_NBITS_SINGLE;
2246                 if (xfer->rx_buf && !xfer->rx_nbits)
2247                         xfer->rx_nbits = SPI_NBITS_SINGLE;
2248                 /* check transfer tx/rx_nbits:
2249                  * 1. check the value matches one of single, dual and quad
2250                  * 2. check tx/rx_nbits match the mode in spi_device
2251                  */
2252                 if (xfer->tx_buf) {
2253                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2254                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
2255                                 xfer->tx_nbits != SPI_NBITS_QUAD)
2256                                 return -EINVAL;
2257                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2258                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2259                                 return -EINVAL;
2260                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2261                                 !(spi->mode & SPI_TX_QUAD))
2262                                 return -EINVAL;
2263                 }
2264                 /* check transfer rx_nbits */
2265                 if (xfer->rx_buf) {
2266                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2267                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
2268                                 xfer->rx_nbits != SPI_NBITS_QUAD)
2269                                 return -EINVAL;
2270                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2271                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2272                                 return -EINVAL;
2273                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2274                                 !(spi->mode & SPI_RX_QUAD))
2275                                 return -EINVAL;
2276                 }
2277         }
2278
2279         message->status = -EINPROGRESS;
2280
2281         return 0;
2282 }
2283
2284 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2285 {
2286         struct spi_master *master = spi->master;
2287
2288         message->spi = spi;
2289
2290         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2291         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2292
2293         trace_spi_message_submit(message);
2294
2295         return master->transfer(spi, message);
2296 }
2297
2298 /**
2299  * spi_async - asynchronous SPI transfer
2300  * @spi: device with which data will be exchanged
2301  * @message: describes the data transfers, including completion callback
2302  * Context: any (irqs may be blocked, etc)
2303  *
2304  * This call may be used in_irq and other contexts which can't sleep,
2305  * as well as from task contexts which can sleep.
2306  *
2307  * The completion callback is invoked in a context which can't sleep.
2308  * Before that invocation, the value of message->status is undefined.
2309  * When the callback is issued, message->status holds either zero (to
2310  * indicate complete success) or a negative error code.  After that
2311  * callback returns, the driver which issued the transfer request may
2312  * deallocate the associated memory; it's no longer in use by any SPI
2313  * core or controller driver code.
2314  *
2315  * Note that although all messages to a spi_device are handled in
2316  * FIFO order, messages may go to different devices in other orders.
2317  * Some device might be higher priority, or have various "hard" access
2318  * time requirements, for example.
2319  *
2320  * On detection of any fault during the transfer, processing of
2321  * the entire message is aborted, and the device is deselected.
2322  * Until returning from the associated message completion callback,
2323  * no other spi_message queued to that device will be processed.
2324  * (This rule applies equally to all the synchronous transfer calls,
2325  * which are wrappers around this core asynchronous primitive.)
2326  *
2327  * Return: zero on success, else a negative error code.
2328  */
2329 int spi_async(struct spi_device *spi, struct spi_message *message)
2330 {
2331         struct spi_master *master = spi->master;
2332         int ret;
2333         unsigned long flags;
2334
2335         ret = __spi_validate(spi, message);
2336         if (ret != 0)
2337                 return ret;
2338
2339         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2340
2341         if (master->bus_lock_flag)
2342                 ret = -EBUSY;
2343         else
2344                 ret = __spi_async(spi, message);
2345
2346         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2347
2348         return ret;
2349 }
2350 EXPORT_SYMBOL_GPL(spi_async);
2351
2352 /**
2353  * spi_async_locked - version of spi_async with exclusive bus usage
2354  * @spi: device with which data will be exchanged
2355  * @message: describes the data transfers, including completion callback
2356  * Context: any (irqs may be blocked, etc)
2357  *
2358  * This call may be used in_irq and other contexts which can't sleep,
2359  * as well as from task contexts which can sleep.
2360  *
2361  * The completion callback is invoked in a context which can't sleep.
2362  * Before that invocation, the value of message->status is undefined.
2363  * When the callback is issued, message->status holds either zero (to
2364  * indicate complete success) or a negative error code.  After that
2365  * callback returns, the driver which issued the transfer request may
2366  * deallocate the associated memory; it's no longer in use by any SPI
2367  * core or controller driver code.
2368  *
2369  * Note that although all messages to a spi_device are handled in
2370  * FIFO order, messages may go to different devices in other orders.
2371  * Some device might be higher priority, or have various "hard" access
2372  * time requirements, for example.
2373  *
2374  * On detection of any fault during the transfer, processing of
2375  * the entire message is aborted, and the device is deselected.
2376  * Until returning from the associated message completion callback,
2377  * no other spi_message queued to that device will be processed.
2378  * (This rule applies equally to all the synchronous transfer calls,
2379  * which are wrappers around this core asynchronous primitive.)
2380  *
2381  * Return: zero on success, else a negative error code.
2382  */
2383 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2384 {
2385         struct spi_master *master = spi->master;
2386         int ret;
2387         unsigned long flags;
2388
2389         ret = __spi_validate(spi, message);
2390         if (ret != 0)
2391                 return ret;
2392
2393         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2394
2395         ret = __spi_async(spi, message);
2396
2397         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2398
2399         return ret;
2400
2401 }
2402 EXPORT_SYMBOL_GPL(spi_async_locked);
2403
2404
2405 /*-------------------------------------------------------------------------*/
2406
2407 /* Utility methods for SPI master protocol drivers, layered on
2408  * top of the core.  Some other utility methods are defined as
2409  * inline functions.
2410  */
2411
2412 static void spi_complete(void *arg)
2413 {
2414         complete(arg);
2415 }
2416
2417 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2418                       int bus_locked)
2419 {
2420         DECLARE_COMPLETION_ONSTACK(done);
2421         int status;
2422         struct spi_master *master = spi->master;
2423         unsigned long flags;
2424
2425         status = __spi_validate(spi, message);
2426         if (status != 0)
2427                 return status;
2428
2429         message->complete = spi_complete;
2430         message->context = &done;
2431         message->spi = spi;
2432
2433         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2434         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2435
2436         if (!bus_locked)
2437                 mutex_lock(&master->bus_lock_mutex);
2438
2439         /* If we're not using the legacy transfer method then we will
2440          * try to transfer in the calling context so special case.
2441          * This code would be less tricky if we could remove the
2442          * support for driver implemented message queues.
2443          */
2444         if (master->transfer == spi_queued_transfer) {
2445                 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2446
2447                 trace_spi_message_submit(message);
2448
2449                 status = __spi_queued_transfer(spi, message, false);
2450
2451                 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2452         } else {
2453                 status = spi_async_locked(spi, message);
2454         }
2455
2456         if (!bus_locked)
2457                 mutex_unlock(&master->bus_lock_mutex);
2458
2459         if (status == 0) {
2460                 /* Push out the messages in the calling context if we
2461                  * can.
2462                  */
2463                 if (master->transfer == spi_queued_transfer) {
2464                         SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2465                                                        spi_sync_immediate);
2466                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2467                                                        spi_sync_immediate);
2468                         __spi_pump_messages(master, false);
2469                 }
2470
2471                 wait_for_completion(&done);
2472                 status = message->status;
2473         }
2474         message->context = NULL;
2475         return status;
2476 }
2477
2478 /**
2479  * spi_sync - blocking/synchronous SPI data transfers
2480  * @spi: device with which data will be exchanged
2481  * @message: describes the data transfers
2482  * Context: can sleep
2483  *
2484  * This call may only be used from a context that may sleep.  The sleep
2485  * is non-interruptible, and has no timeout.  Low-overhead controller
2486  * drivers may DMA directly into and out of the message buffers.
2487  *
2488  * Note that the SPI device's chip select is active during the message,
2489  * and then is normally disabled between messages.  Drivers for some
2490  * frequently-used devices may want to minimize costs of selecting a chip,
2491  * by leaving it selected in anticipation that the next message will go
2492  * to the same chip.  (That may increase power usage.)
2493  *
2494  * Also, the caller is guaranteeing that the memory associated with the
2495  * message will not be freed before this call returns.
2496  *
2497  * Return: zero on success, else a negative error code.
2498  */
2499 int spi_sync(struct spi_device *spi, struct spi_message *message)
2500 {
2501         return __spi_sync(spi, message, 0);
2502 }
2503 EXPORT_SYMBOL_GPL(spi_sync);
2504
2505 /**
2506  * spi_sync_locked - version of spi_sync with exclusive bus usage
2507  * @spi: device with which data will be exchanged
2508  * @message: describes the data transfers
2509  * Context: can sleep
2510  *
2511  * This call may only be used from a context that may sleep.  The sleep
2512  * is non-interruptible, and has no timeout.  Low-overhead controller
2513  * drivers may DMA directly into and out of the message buffers.
2514  *
2515  * This call should be used by drivers that require exclusive access to the
2516  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2517  * be released by a spi_bus_unlock call when the exclusive access is over.
2518  *
2519  * Return: zero on success, else a negative error code.
2520  */
2521 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2522 {
2523         return __spi_sync(spi, message, 1);
2524 }
2525 EXPORT_SYMBOL_GPL(spi_sync_locked);
2526
2527 /**
2528  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2529  * @master: SPI bus master that should be locked for exclusive bus access
2530  * Context: can sleep
2531  *
2532  * This call may only be used from a context that may sleep.  The sleep
2533  * is non-interruptible, and has no timeout.
2534  *
2535  * This call should be used by drivers that require exclusive access to the
2536  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2537  * exclusive access is over. Data transfer must be done by spi_sync_locked
2538  * and spi_async_locked calls when the SPI bus lock is held.
2539  *
2540  * Return: always zero.
2541  */
2542 int spi_bus_lock(struct spi_master *master)
2543 {
2544         unsigned long flags;
2545
2546         mutex_lock(&master->bus_lock_mutex);
2547
2548         spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2549         master->bus_lock_flag = 1;
2550         spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2551
2552         /* mutex remains locked until spi_bus_unlock is called */
2553
2554         return 0;
2555 }
2556 EXPORT_SYMBOL_GPL(spi_bus_lock);
2557
2558 /**
2559  * spi_bus_unlock - release the lock for exclusive SPI bus usage
2560  * @master: SPI bus master that was locked for exclusive bus access
2561  * Context: can sleep
2562  *
2563  * This call may only be used from a context that may sleep.  The sleep
2564  * is non-interruptible, and has no timeout.
2565  *
2566  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2567  * call.
2568  *
2569  * Return: always zero.
2570  */
2571 int spi_bus_unlock(struct spi_master *master)
2572 {
2573         master->bus_lock_flag = 0;
2574
2575         mutex_unlock(&master->bus_lock_mutex);
2576
2577         return 0;
2578 }
2579 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2580
2581 /* portable code must never pass more than 32 bytes */
2582 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
2583
2584 static u8       *buf;
2585
2586 /**
2587  * spi_write_then_read - SPI synchronous write followed by read
2588  * @spi: device with which data will be exchanged
2589  * @txbuf: data to be written (need not be dma-safe)
2590  * @n_tx: size of txbuf, in bytes
2591  * @rxbuf: buffer into which data will be read (need not be dma-safe)
2592  * @n_rx: size of rxbuf, in bytes
2593  * Context: can sleep
2594  *
2595  * This performs a half duplex MicroWire style transaction with the
2596  * device, sending txbuf and then reading rxbuf.  The return value
2597  * is zero for success, else a negative errno status code.
2598  * This call may only be used from a context that may sleep.
2599  *
2600  * Parameters to this routine are always copied using a small buffer;
2601  * portable code should never use this for more than 32 bytes.
2602  * Performance-sensitive or bulk transfer code should instead use
2603  * spi_{async,sync}() calls with dma-safe buffers.
2604  *
2605  * Return: zero on success, else a negative error code.
2606  */
2607 int spi_write_then_read(struct spi_device *spi,
2608                 const void *txbuf, unsigned n_tx,
2609                 void *rxbuf, unsigned n_rx)
2610 {
2611         static DEFINE_MUTEX(lock);
2612
2613         int                     status;
2614         struct spi_message      message;
2615         struct spi_transfer     x[2];
2616         u8                      *local_buf;
2617
2618         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
2619          * copying here, (as a pure convenience thing), but we can
2620          * keep heap costs out of the hot path unless someone else is
2621          * using the pre-allocated buffer or the transfer is too large.
2622          */
2623         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2624                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
2625                                     GFP_KERNEL | GFP_DMA);
2626                 if (!local_buf)
2627                         return -ENOMEM;
2628         } else {
2629                 local_buf = buf;
2630         }
2631
2632         spi_message_init(&message);
2633         memset(x, 0, sizeof(x));
2634         if (n_tx) {
2635                 x[0].len = n_tx;
2636                 spi_message_add_tail(&x[0], &message);
2637         }
2638         if (n_rx) {
2639                 x[1].len = n_rx;
2640                 spi_message_add_tail(&x[1], &message);
2641         }
2642
2643         memcpy(local_buf, txbuf, n_tx);
2644         x[0].tx_buf = local_buf;
2645         x[1].rx_buf = local_buf + n_tx;
2646
2647         /* do the i/o */
2648         status = spi_sync(spi, &message);
2649         if (status == 0)
2650                 memcpy(rxbuf, x[1].rx_buf, n_rx);
2651
2652         if (x[0].tx_buf == buf)
2653                 mutex_unlock(&lock);
2654         else
2655                 kfree(local_buf);
2656
2657         return status;
2658 }
2659 EXPORT_SYMBOL_GPL(spi_write_then_read);
2660
2661 /*-------------------------------------------------------------------------*/
2662
2663 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
2664 static int __spi_of_device_match(struct device *dev, void *data)
2665 {
2666         return dev->of_node == data;
2667 }
2668
2669 /* must call put_device() when done with returned spi_device device */
2670 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
2671 {
2672         struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
2673                                                 __spi_of_device_match);
2674         return dev ? to_spi_device(dev) : NULL;
2675 }
2676
2677 static int __spi_of_master_match(struct device *dev, const void *data)
2678 {
2679         return dev->of_node == data;
2680 }
2681
2682 /* the spi masters are not using spi_bus, so we find it with another way */
2683 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
2684 {
2685         struct device *dev;
2686
2687         dev = class_find_device(&spi_master_class, NULL, node,
2688                                 __spi_of_master_match);
2689         if (!dev)
2690                 return NULL;
2691
2692         /* reference got in class_find_device */
2693         return container_of(dev, struct spi_master, dev);
2694 }
2695
2696 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
2697                          void *arg)
2698 {
2699         struct of_reconfig_data *rd = arg;
2700         struct spi_master *master;
2701         struct spi_device *spi;
2702
2703         switch (of_reconfig_get_state_change(action, arg)) {
2704         case OF_RECONFIG_CHANGE_ADD:
2705                 master = of_find_spi_master_by_node(rd->dn->parent);
2706                 if (master == NULL)
2707                         return NOTIFY_OK;       /* not for us */
2708
2709                 spi = of_register_spi_device(master, rd->dn);
2710                 put_device(&master->dev);
2711
2712                 if (IS_ERR(spi)) {
2713                         pr_err("%s: failed to create for '%s'\n",
2714                                         __func__, rd->dn->full_name);
2715                         return notifier_from_errno(PTR_ERR(spi));
2716                 }
2717                 break;
2718
2719         case OF_RECONFIG_CHANGE_REMOVE:
2720                 /* find our device by node */
2721                 spi = of_find_spi_device_by_node(rd->dn);
2722                 if (spi == NULL)
2723                         return NOTIFY_OK;       /* no? not meant for us */
2724
2725                 /* unregister takes one ref away */
2726                 spi_unregister_device(spi);
2727
2728                 /* and put the reference of the find */
2729                 put_device(&spi->dev);
2730                 break;
2731         }
2732
2733         return NOTIFY_OK;
2734 }
2735
2736 static struct notifier_block spi_of_notifier = {
2737         .notifier_call = of_spi_notify,
2738 };
2739 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2740 extern struct notifier_block spi_of_notifier;
2741 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2742
2743 static int __init spi_init(void)
2744 {
2745         int     status;
2746
2747         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2748         if (!buf) {
2749                 status = -ENOMEM;
2750                 goto err0;
2751         }
2752
2753         status = bus_register(&spi_bus_type);
2754         if (status < 0)
2755                 goto err1;
2756
2757         status = class_register(&spi_master_class);
2758         if (status < 0)
2759                 goto err2;
2760
2761         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
2762                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
2763
2764         return 0;
2765
2766 err2:
2767         bus_unregister(&spi_bus_type);
2768 err1:
2769         kfree(buf);
2770         buf = NULL;
2771 err0:
2772         return status;
2773 }
2774
2775 /* board_info is normally registered in arch_initcall(),
2776  * but even essential drivers wait till later
2777  *
2778  * REVISIT only boardinfo really needs static linking. the rest (device and
2779  * driver registration) _could_ be dynamically linked (modular) ... costs
2780  * include needing to have boardinfo data structures be much more public.
2781  */
2782 postcore_initcall(spi_init);
2783