1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/kernel.h>
8 #include <linux/device.h>
9 #include <linux/init.h>
10 #include <linux/cache.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/clk/clk-conf.h>
17 #include <linux/slab.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pm_domain.h>
24 #include <linux/property.h>
25 #include <linux/export.h>
26 #include <linux/sched/rt.h>
27 #include <uapi/linux/sched/types.h>
28 #include <linux/delay.h>
29 #include <linux/kthread.h>
30 #include <linux/ioport.h>
31 #include <linux/acpi.h>
32 #include <linux/highmem.h>
33 #include <linux/idr.h>
34 #include <linux/platform_data/x86/apple.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <linux/percpu.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43 #include "internals.h"
44
45 static DEFINE_IDR(spi_master_idr);
46
spidev_release(struct device * dev)47 static void spidev_release(struct device *dev)
48 {
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55 }
56
57 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)71 static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74 {
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83 }
84
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)85 static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87 {
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95 }
96 static DEVICE_ATTR_RW(driver_override);
97
spi_alloc_pcpu_stats(struct device * dev)98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99 {
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118 }
119
120 #define spi_pcpu_stats_totalize(ret, in, field) \
121 do { \
122 int i; \
123 ret = 0; \
124 for_each_possible_cpu(i) { \
125 const struct spi_statistics *pcpu_stats; \
126 u64 inc; \
127 unsigned int start; \
128 pcpu_stats = per_cpu_ptr(in, i); \
129 do { \
130 start = u64_stats_fetch_begin_irq( \
131 &pcpu_stats->syncp); \
132 inc = u64_stats_read(&pcpu_stats->field); \
133 } while (u64_stats_fetch_retry_irq( \
134 &pcpu_stats->syncp, start)); \
135 ret += inc; \
136 } \
137 } while (0)
138
139 #define SPI_STATISTICS_ATTRS(field, file) \
140 static ssize_t spi_controller_##field##_show(struct device *dev, \
141 struct device_attribute *attr, \
142 char *buf) \
143 { \
144 struct spi_controller *ctlr = container_of(dev, \
145 struct spi_controller, dev); \
146 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
147 } \
148 static struct device_attribute dev_attr_spi_controller_##field = { \
149 .attr = { .name = file, .mode = 0444 }, \
150 .show = spi_controller_##field##_show, \
151 }; \
152 static ssize_t spi_device_##field##_show(struct device *dev, \
153 struct device_attribute *attr, \
154 char *buf) \
155 { \
156 struct spi_device *spi = to_spi_device(dev); \
157 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
158 } \
159 static struct device_attribute dev_attr_spi_device_##field = { \
160 .attr = { .name = file, .mode = 0444 }, \
161 .show = spi_device_##field##_show, \
162 }
163
164 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \
165 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
166 char *buf) \
167 { \
168 ssize_t len; \
169 u64 val; \
170 spi_pcpu_stats_totalize(val, stat, field); \
171 len = sysfs_emit(buf, "%llu\n", val); \
172 return len; \
173 } \
174 SPI_STATISTICS_ATTRS(name, file)
175
176 #define SPI_STATISTICS_SHOW(field) \
177 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
178 field)
179
180 SPI_STATISTICS_SHOW(messages);
181 SPI_STATISTICS_SHOW(transfers);
182 SPI_STATISTICS_SHOW(errors);
183 SPI_STATISTICS_SHOW(timedout);
184
185 SPI_STATISTICS_SHOW(spi_sync);
186 SPI_STATISTICS_SHOW(spi_sync_immediate);
187 SPI_STATISTICS_SHOW(spi_async);
188
189 SPI_STATISTICS_SHOW(bytes);
190 SPI_STATISTICS_SHOW(bytes_rx);
191 SPI_STATISTICS_SHOW(bytes_tx);
192
193 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
194 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
195 "transfer_bytes_histo_" number, \
196 transfer_bytes_histo[index])
197 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
214
215 SPI_STATISTICS_SHOW(transfers_split_maxsize);
216
217 static struct attribute *spi_dev_attrs[] = {
218 &dev_attr_modalias.attr,
219 &dev_attr_driver_override.attr,
220 NULL,
221 };
222
223 static const struct attribute_group spi_dev_group = {
224 .attrs = spi_dev_attrs,
225 };
226
227 static struct attribute *spi_device_statistics_attrs[] = {
228 &dev_attr_spi_device_messages.attr,
229 &dev_attr_spi_device_transfers.attr,
230 &dev_attr_spi_device_errors.attr,
231 &dev_attr_spi_device_timedout.attr,
232 &dev_attr_spi_device_spi_sync.attr,
233 &dev_attr_spi_device_spi_sync_immediate.attr,
234 &dev_attr_spi_device_spi_async.attr,
235 &dev_attr_spi_device_bytes.attr,
236 &dev_attr_spi_device_bytes_rx.attr,
237 &dev_attr_spi_device_bytes_tx.attr,
238 &dev_attr_spi_device_transfer_bytes_histo0.attr,
239 &dev_attr_spi_device_transfer_bytes_histo1.attr,
240 &dev_attr_spi_device_transfer_bytes_histo2.attr,
241 &dev_attr_spi_device_transfer_bytes_histo3.attr,
242 &dev_attr_spi_device_transfer_bytes_histo4.attr,
243 &dev_attr_spi_device_transfer_bytes_histo5.attr,
244 &dev_attr_spi_device_transfer_bytes_histo6.attr,
245 &dev_attr_spi_device_transfer_bytes_histo7.attr,
246 &dev_attr_spi_device_transfer_bytes_histo8.attr,
247 &dev_attr_spi_device_transfer_bytes_histo9.attr,
248 &dev_attr_spi_device_transfer_bytes_histo10.attr,
249 &dev_attr_spi_device_transfer_bytes_histo11.attr,
250 &dev_attr_spi_device_transfer_bytes_histo12.attr,
251 &dev_attr_spi_device_transfer_bytes_histo13.attr,
252 &dev_attr_spi_device_transfer_bytes_histo14.attr,
253 &dev_attr_spi_device_transfer_bytes_histo15.attr,
254 &dev_attr_spi_device_transfer_bytes_histo16.attr,
255 &dev_attr_spi_device_transfers_split_maxsize.attr,
256 NULL,
257 };
258
259 static const struct attribute_group spi_device_statistics_group = {
260 .name = "statistics",
261 .attrs = spi_device_statistics_attrs,
262 };
263
264 static const struct attribute_group *spi_dev_groups[] = {
265 &spi_dev_group,
266 &spi_device_statistics_group,
267 NULL,
268 };
269
270 static struct attribute *spi_controller_statistics_attrs[] = {
271 &dev_attr_spi_controller_messages.attr,
272 &dev_attr_spi_controller_transfers.attr,
273 &dev_attr_spi_controller_errors.attr,
274 &dev_attr_spi_controller_timedout.attr,
275 &dev_attr_spi_controller_spi_sync.attr,
276 &dev_attr_spi_controller_spi_sync_immediate.attr,
277 &dev_attr_spi_controller_spi_async.attr,
278 &dev_attr_spi_controller_bytes.attr,
279 &dev_attr_spi_controller_bytes_rx.attr,
280 &dev_attr_spi_controller_bytes_tx.attr,
281 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
298 &dev_attr_spi_controller_transfers_split_maxsize.attr,
299 NULL,
300 };
301
302 static const struct attribute_group spi_controller_statistics_group = {
303 .name = "statistics",
304 .attrs = spi_controller_statistics_attrs,
305 };
306
307 static const struct attribute_group *spi_master_groups[] = {
308 &spi_controller_statistics_group,
309 NULL,
310 };
311
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_controller * ctlr)312 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
313 struct spi_transfer *xfer,
314 struct spi_controller *ctlr)
315 {
316 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
317 struct spi_statistics *stats;
318
319 if (l2len < 0)
320 l2len = 0;
321
322 get_cpu();
323 stats = this_cpu_ptr(pcpu_stats);
324 u64_stats_update_begin(&stats->syncp);
325
326 u64_stats_inc(&stats->transfers);
327 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
328
329 u64_stats_add(&stats->bytes, xfer->len);
330 if ((xfer->tx_buf) &&
331 (xfer->tx_buf != ctlr->dummy_tx))
332 u64_stats_add(&stats->bytes_tx, xfer->len);
333 if ((xfer->rx_buf) &&
334 (xfer->rx_buf != ctlr->dummy_rx))
335 u64_stats_add(&stats->bytes_rx, xfer->len);
336
337 u64_stats_update_end(&stats->syncp);
338 put_cpu();
339 }
340
341 /*
342 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
343 * and the sysfs version makes coldplug work too.
344 */
spi_match_id(const struct spi_device_id * id,const char * name)345 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
346 {
347 while (id->name[0]) {
348 if (!strcmp(name, id->name))
349 return id;
350 id++;
351 }
352 return NULL;
353 }
354
spi_get_device_id(const struct spi_device * sdev)355 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
356 {
357 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
358
359 return spi_match_id(sdrv->id_table, sdev->modalias);
360 }
361 EXPORT_SYMBOL_GPL(spi_get_device_id);
362
spi_match_device(struct device * dev,struct device_driver * drv)363 static int spi_match_device(struct device *dev, struct device_driver *drv)
364 {
365 const struct spi_device *spi = to_spi_device(dev);
366 const struct spi_driver *sdrv = to_spi_driver(drv);
367
368 /* Check override first, and if set, only use the named driver */
369 if (spi->driver_override)
370 return strcmp(spi->driver_override, drv->name) == 0;
371
372 /* Attempt an OF style match */
373 if (of_driver_match_device(dev, drv))
374 return 1;
375
376 /* Then try ACPI */
377 if (acpi_driver_match_device(dev, drv))
378 return 1;
379
380 if (sdrv->id_table)
381 return !!spi_match_id(sdrv->id_table, spi->modalias);
382
383 return strcmp(spi->modalias, drv->name) == 0;
384 }
385
spi_uevent(struct device * dev,struct kobj_uevent_env * env)386 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
387 {
388 const struct spi_device *spi = to_spi_device(dev);
389 int rc;
390
391 rc = acpi_device_uevent_modalias(dev, env);
392 if (rc != -ENODEV)
393 return rc;
394
395 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
396 }
397
spi_probe(struct device * dev)398 static int spi_probe(struct device *dev)
399 {
400 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
401 struct spi_device *spi = to_spi_device(dev);
402 int ret;
403
404 ret = of_clk_set_defaults(dev->of_node, false);
405 if (ret)
406 return ret;
407
408 if (dev->of_node) {
409 spi->irq = of_irq_get(dev->of_node, 0);
410 if (spi->irq == -EPROBE_DEFER)
411 return -EPROBE_DEFER;
412 if (spi->irq < 0)
413 spi->irq = 0;
414 }
415
416 ret = dev_pm_domain_attach(dev, true);
417 if (ret)
418 return ret;
419
420 if (sdrv->probe) {
421 ret = sdrv->probe(spi);
422 if (ret)
423 dev_pm_domain_detach(dev, true);
424 }
425
426 return ret;
427 }
428
spi_remove(struct device * dev)429 static void spi_remove(struct device *dev)
430 {
431 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
432
433 if (sdrv->remove)
434 sdrv->remove(to_spi_device(dev));
435
436 dev_pm_domain_detach(dev, true);
437 }
438
spi_shutdown(struct device * dev)439 static void spi_shutdown(struct device *dev)
440 {
441 if (dev->driver) {
442 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
443
444 if (sdrv->shutdown)
445 sdrv->shutdown(to_spi_device(dev));
446 }
447 }
448
449 struct bus_type spi_bus_type = {
450 .name = "spi",
451 .dev_groups = spi_dev_groups,
452 .match = spi_match_device,
453 .uevent = spi_uevent,
454 .probe = spi_probe,
455 .remove = spi_remove,
456 .shutdown = spi_shutdown,
457 };
458 EXPORT_SYMBOL_GPL(spi_bus_type);
459
460 /**
461 * __spi_register_driver - register a SPI driver
462 * @owner: owner module of the driver to register
463 * @sdrv: the driver to register
464 * Context: can sleep
465 *
466 * Return: zero on success, else a negative error code.
467 */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)468 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
469 {
470 sdrv->driver.owner = owner;
471 sdrv->driver.bus = &spi_bus_type;
472
473 /*
474 * For Really Good Reasons we use spi: modaliases not of:
475 * modaliases for DT so module autoloading won't work if we
476 * don't have a spi_device_id as well as a compatible string.
477 */
478 if (sdrv->driver.of_match_table) {
479 const struct of_device_id *of_id;
480
481 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
482 of_id++) {
483 const char *of_name;
484
485 /* Strip off any vendor prefix */
486 of_name = strnchr(of_id->compatible,
487 sizeof(of_id->compatible), ',');
488 if (of_name)
489 of_name++;
490 else
491 of_name = of_id->compatible;
492
493 if (sdrv->id_table) {
494 const struct spi_device_id *spi_id;
495
496 spi_id = spi_match_id(sdrv->id_table, of_name);
497 if (spi_id)
498 continue;
499 } else {
500 if (strcmp(sdrv->driver.name, of_name) == 0)
501 continue;
502 }
503
504 pr_warn("SPI driver %s has no spi_device_id for %s\n",
505 sdrv->driver.name, of_id->compatible);
506 }
507 }
508
509 return driver_register(&sdrv->driver);
510 }
511 EXPORT_SYMBOL_GPL(__spi_register_driver);
512
513 /*-------------------------------------------------------------------------*/
514
515 /*
516 * SPI devices should normally not be created by SPI device drivers; that
517 * would make them board-specific. Similarly with SPI controller drivers.
518 * Device registration normally goes into like arch/.../mach.../board-YYY.c
519 * with other readonly (flashable) information about mainboard devices.
520 */
521
522 struct boardinfo {
523 struct list_head list;
524 struct spi_board_info board_info;
525 };
526
527 static LIST_HEAD(board_list);
528 static LIST_HEAD(spi_controller_list);
529
530 /*
531 * Used to protect add/del operation for board_info list and
532 * spi_controller list, and their matching process also used
533 * to protect object of type struct idr.
534 */
535 static DEFINE_MUTEX(board_lock);
536
537 /**
538 * spi_alloc_device - Allocate a new SPI device
539 * @ctlr: Controller to which device is connected
540 * Context: can sleep
541 *
542 * Allows a driver to allocate and initialize a spi_device without
543 * registering it immediately. This allows a driver to directly
544 * fill the spi_device with device parameters before calling
545 * spi_add_device() on it.
546 *
547 * Caller is responsible to call spi_add_device() on the returned
548 * spi_device structure to add it to the SPI controller. If the caller
549 * needs to discard the spi_device without adding it, then it should
550 * call spi_dev_put() on it.
551 *
552 * Return: a pointer to the new device, or NULL.
553 */
spi_alloc_device(struct spi_controller * ctlr)554 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
555 {
556 struct spi_device *spi;
557
558 if (!spi_controller_get(ctlr))
559 return NULL;
560
561 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
562 if (!spi) {
563 spi_controller_put(ctlr);
564 return NULL;
565 }
566
567 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
568 if (!spi->pcpu_statistics) {
569 kfree(spi);
570 spi_controller_put(ctlr);
571 return NULL;
572 }
573
574 spi->master = spi->controller = ctlr;
575 spi->dev.parent = &ctlr->dev;
576 spi->dev.bus = &spi_bus_type;
577 spi->dev.release = spidev_release;
578 spi->mode = ctlr->buswidth_override_bits;
579
580 device_initialize(&spi->dev);
581 return spi;
582 }
583 EXPORT_SYMBOL_GPL(spi_alloc_device);
584
spi_dev_set_name(struct spi_device * spi)585 static void spi_dev_set_name(struct spi_device *spi)
586 {
587 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
588
589 if (adev) {
590 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
591 return;
592 }
593
594 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
595 spi->chip_select);
596 }
597
spi_dev_check(struct device * dev,void * data)598 static int spi_dev_check(struct device *dev, void *data)
599 {
600 struct spi_device *spi = to_spi_device(dev);
601 struct spi_device *new_spi = data;
602
603 if (spi->controller == new_spi->controller &&
604 spi->chip_select == new_spi->chip_select)
605 return -EBUSY;
606 return 0;
607 }
608
spi_cleanup(struct spi_device * spi)609 static void spi_cleanup(struct spi_device *spi)
610 {
611 if (spi->controller->cleanup)
612 spi->controller->cleanup(spi);
613 }
614
__spi_add_device(struct spi_device * spi)615 static int __spi_add_device(struct spi_device *spi)
616 {
617 struct spi_controller *ctlr = spi->controller;
618 struct device *dev = ctlr->dev.parent;
619 int status;
620
621 /*
622 * We need to make sure there's no other device with this
623 * chipselect **BEFORE** we call setup(), else we'll trash
624 * its configuration.
625 */
626 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
627 if (status) {
628 dev_err(dev, "chipselect %d already in use\n",
629 spi->chip_select);
630 return status;
631 }
632
633 /* Controller may unregister concurrently */
634 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
635 !device_is_registered(&ctlr->dev)) {
636 return -ENODEV;
637 }
638
639 if (ctlr->cs_gpiods)
640 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
641
642 /*
643 * Drivers may modify this initial i/o setup, but will
644 * normally rely on the device being setup. Devices
645 * using SPI_CS_HIGH can't coexist well otherwise...
646 */
647 status = spi_setup(spi);
648 if (status < 0) {
649 dev_err(dev, "can't setup %s, status %d\n",
650 dev_name(&spi->dev), status);
651 return status;
652 }
653
654 /* Device may be bound to an active driver when this returns */
655 status = device_add(&spi->dev);
656 if (status < 0) {
657 dev_err(dev, "can't add %s, status %d\n",
658 dev_name(&spi->dev), status);
659 spi_cleanup(spi);
660 } else {
661 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
662 }
663
664 return status;
665 }
666
667 /**
668 * spi_add_device - Add spi_device allocated with spi_alloc_device
669 * @spi: spi_device to register
670 *
671 * Companion function to spi_alloc_device. Devices allocated with
672 * spi_alloc_device can be added onto the spi bus with this function.
673 *
674 * Return: 0 on success; negative errno on failure
675 */
spi_add_device(struct spi_device * spi)676 int spi_add_device(struct spi_device *spi)
677 {
678 struct spi_controller *ctlr = spi->controller;
679 struct device *dev = ctlr->dev.parent;
680 int status;
681
682 /* Chipselects are numbered 0..max; validate. */
683 if (spi->chip_select >= ctlr->num_chipselect) {
684 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
685 ctlr->num_chipselect);
686 return -EINVAL;
687 }
688
689 /* Set the bus ID string */
690 spi_dev_set_name(spi);
691
692 mutex_lock(&ctlr->add_lock);
693 status = __spi_add_device(spi);
694 mutex_unlock(&ctlr->add_lock);
695 return status;
696 }
697 EXPORT_SYMBOL_GPL(spi_add_device);
698
spi_add_device_locked(struct spi_device * spi)699 static int spi_add_device_locked(struct spi_device *spi)
700 {
701 struct spi_controller *ctlr = spi->controller;
702 struct device *dev = ctlr->dev.parent;
703
704 /* Chipselects are numbered 0..max; validate. */
705 if (spi->chip_select >= ctlr->num_chipselect) {
706 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
707 ctlr->num_chipselect);
708 return -EINVAL;
709 }
710
711 /* Set the bus ID string */
712 spi_dev_set_name(spi);
713
714 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
715 return __spi_add_device(spi);
716 }
717
718 /**
719 * spi_new_device - instantiate one new SPI device
720 * @ctlr: Controller to which device is connected
721 * @chip: Describes the SPI device
722 * Context: can sleep
723 *
724 * On typical mainboards, this is purely internal; and it's not needed
725 * after board init creates the hard-wired devices. Some development
726 * platforms may not be able to use spi_register_board_info though, and
727 * this is exported so that for example a USB or parport based adapter
728 * driver could add devices (which it would learn about out-of-band).
729 *
730 * Return: the new device, or NULL.
731 */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)732 struct spi_device *spi_new_device(struct spi_controller *ctlr,
733 struct spi_board_info *chip)
734 {
735 struct spi_device *proxy;
736 int status;
737
738 /*
739 * NOTE: caller did any chip->bus_num checks necessary.
740 *
741 * Also, unless we change the return value convention to use
742 * error-or-pointer (not NULL-or-pointer), troubleshootability
743 * suggests syslogged diagnostics are best here (ugh).
744 */
745
746 proxy = spi_alloc_device(ctlr);
747 if (!proxy)
748 return NULL;
749
750 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
751
752 proxy->chip_select = chip->chip_select;
753 proxy->max_speed_hz = chip->max_speed_hz;
754 proxy->mode = chip->mode;
755 proxy->irq = chip->irq;
756 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
757 proxy->dev.platform_data = (void *) chip->platform_data;
758 proxy->controller_data = chip->controller_data;
759 proxy->controller_state = NULL;
760
761 if (chip->swnode) {
762 status = device_add_software_node(&proxy->dev, chip->swnode);
763 if (status) {
764 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
765 chip->modalias, status);
766 goto err_dev_put;
767 }
768 }
769
770 status = spi_add_device(proxy);
771 if (status < 0)
772 goto err_dev_put;
773
774 return proxy;
775
776 err_dev_put:
777 device_remove_software_node(&proxy->dev);
778 spi_dev_put(proxy);
779 return NULL;
780 }
781 EXPORT_SYMBOL_GPL(spi_new_device);
782
783 /**
784 * spi_unregister_device - unregister a single SPI device
785 * @spi: spi_device to unregister
786 *
787 * Start making the passed SPI device vanish. Normally this would be handled
788 * by spi_unregister_controller().
789 */
spi_unregister_device(struct spi_device * spi)790 void spi_unregister_device(struct spi_device *spi)
791 {
792 if (!spi)
793 return;
794
795 if (spi->dev.of_node) {
796 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
797 of_node_put(spi->dev.of_node);
798 }
799 if (ACPI_COMPANION(&spi->dev))
800 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
801 device_remove_software_node(&spi->dev);
802 device_del(&spi->dev);
803 spi_cleanup(spi);
804 put_device(&spi->dev);
805 }
806 EXPORT_SYMBOL_GPL(spi_unregister_device);
807
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)808 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
809 struct spi_board_info *bi)
810 {
811 struct spi_device *dev;
812
813 if (ctlr->bus_num != bi->bus_num)
814 return;
815
816 dev = spi_new_device(ctlr, bi);
817 if (!dev)
818 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
819 bi->modalias);
820 }
821
822 /**
823 * spi_register_board_info - register SPI devices for a given board
824 * @info: array of chip descriptors
825 * @n: how many descriptors are provided
826 * Context: can sleep
827 *
828 * Board-specific early init code calls this (probably during arch_initcall)
829 * with segments of the SPI device table. Any device nodes are created later,
830 * after the relevant parent SPI controller (bus_num) is defined. We keep
831 * this table of devices forever, so that reloading a controller driver will
832 * not make Linux forget about these hard-wired devices.
833 *
834 * Other code can also call this, e.g. a particular add-on board might provide
835 * SPI devices through its expansion connector, so code initializing that board
836 * would naturally declare its SPI devices.
837 *
838 * The board info passed can safely be __initdata ... but be careful of
839 * any embedded pointers (platform_data, etc), they're copied as-is.
840 *
841 * Return: zero on success, else a negative error code.
842 */
spi_register_board_info(struct spi_board_info const * info,unsigned n)843 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
844 {
845 struct boardinfo *bi;
846 int i;
847
848 if (!n)
849 return 0;
850
851 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
852 if (!bi)
853 return -ENOMEM;
854
855 for (i = 0; i < n; i++, bi++, info++) {
856 struct spi_controller *ctlr;
857
858 memcpy(&bi->board_info, info, sizeof(*info));
859
860 mutex_lock(&board_lock);
861 list_add_tail(&bi->list, &board_list);
862 list_for_each_entry(ctlr, &spi_controller_list, list)
863 spi_match_controller_to_boardinfo(ctlr,
864 &bi->board_info);
865 mutex_unlock(&board_lock);
866 }
867
868 return 0;
869 }
870
871 /*-------------------------------------------------------------------------*/
872
873 /* Core methods for SPI resource management */
874
875 /**
876 * spi_res_alloc - allocate a spi resource that is life-cycle managed
877 * during the processing of a spi_message while using
878 * spi_transfer_one
879 * @spi: the spi device for which we allocate memory
880 * @release: the release code to execute for this resource
881 * @size: size to alloc and return
882 * @gfp: GFP allocation flags
883 *
884 * Return: the pointer to the allocated data
885 *
886 * This may get enhanced in the future to allocate from a memory pool
887 * of the @spi_device or @spi_controller to avoid repeated allocations.
888 */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)889 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
890 size_t size, gfp_t gfp)
891 {
892 struct spi_res *sres;
893
894 sres = kzalloc(sizeof(*sres) + size, gfp);
895 if (!sres)
896 return NULL;
897
898 INIT_LIST_HEAD(&sres->entry);
899 sres->release = release;
900
901 return sres->data;
902 }
903
904 /**
905 * spi_res_free - free an spi resource
906 * @res: pointer to the custom data of a resource
907 */
spi_res_free(void * res)908 static void spi_res_free(void *res)
909 {
910 struct spi_res *sres = container_of(res, struct spi_res, data);
911
912 if (!res)
913 return;
914
915 WARN_ON(!list_empty(&sres->entry));
916 kfree(sres);
917 }
918
919 /**
920 * spi_res_add - add a spi_res to the spi_message
921 * @message: the spi message
922 * @res: the spi_resource
923 */
spi_res_add(struct spi_message * message,void * res)924 static void spi_res_add(struct spi_message *message, void *res)
925 {
926 struct spi_res *sres = container_of(res, struct spi_res, data);
927
928 WARN_ON(!list_empty(&sres->entry));
929 list_add_tail(&sres->entry, &message->resources);
930 }
931
932 /**
933 * spi_res_release - release all spi resources for this message
934 * @ctlr: the @spi_controller
935 * @message: the @spi_message
936 */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)937 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
938 {
939 struct spi_res *res, *tmp;
940
941 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
942 if (res->release)
943 res->release(ctlr, message, res->data);
944
945 list_del(&res->entry);
946
947 kfree(res);
948 }
949 }
950
951 /*-------------------------------------------------------------------------*/
952
spi_set_cs(struct spi_device * spi,bool enable,bool force)953 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
954 {
955 bool activate = enable;
956
957 /*
958 * Avoid calling into the driver (or doing delays) if the chip select
959 * isn't actually changing from the last time this was called.
960 */
961 if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
962 (!enable && spi->controller->last_cs != spi->chip_select)) &&
963 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
964 return;
965
966 trace_spi_set_cs(spi, activate);
967
968 spi->controller->last_cs = enable ? spi->chip_select : -1;
969 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
970
971 if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
972 spi_delay_exec(&spi->cs_hold, NULL);
973 }
974
975 if (spi->mode & SPI_CS_HIGH)
976 enable = !enable;
977
978 if (spi->cs_gpiod) {
979 if (!(spi->mode & SPI_NO_CS)) {
980 /*
981 * Historically ACPI has no means of the GPIO polarity and
982 * thus the SPISerialBus() resource defines it on the per-chip
983 * basis. In order to avoid a chain of negations, the GPIO
984 * polarity is considered being Active High. Even for the cases
985 * when _DSD() is involved (in the updated versions of ACPI)
986 * the GPIO CS polarity must be defined Active High to avoid
987 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
988 * into account.
989 */
990 if (has_acpi_companion(&spi->dev))
991 gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
992 else
993 /* Polarity handled by GPIO library */
994 gpiod_set_value_cansleep(spi->cs_gpiod, activate);
995 }
996 /* Some SPI masters need both GPIO CS & slave_select */
997 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
998 spi->controller->set_cs)
999 spi->controller->set_cs(spi, !enable);
1000 } else if (spi->controller->set_cs) {
1001 spi->controller->set_cs(spi, !enable);
1002 }
1003
1004 if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
1005 if (activate)
1006 spi_delay_exec(&spi->cs_setup, NULL);
1007 else
1008 spi_delay_exec(&spi->cs_inactive, NULL);
1009 }
1010 }
1011
1012 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1013 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1014 struct sg_table *sgt, void *buf, size_t len,
1015 enum dma_data_direction dir, unsigned long attrs)
1016 {
1017 const bool vmalloced_buf = is_vmalloc_addr(buf);
1018 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1019 #ifdef CONFIG_HIGHMEM
1020 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1021 (unsigned long)buf < (PKMAP_BASE +
1022 (LAST_PKMAP * PAGE_SIZE)));
1023 #else
1024 const bool kmap_buf = false;
1025 #endif
1026 int desc_len;
1027 int sgs;
1028 struct page *vm_page;
1029 struct scatterlist *sg;
1030 void *sg_buf;
1031 size_t min;
1032 int i, ret;
1033
1034 if (vmalloced_buf || kmap_buf) {
1035 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1036 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1037 } else if (virt_addr_valid(buf)) {
1038 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1039 sgs = DIV_ROUND_UP(len, desc_len);
1040 } else {
1041 return -EINVAL;
1042 }
1043
1044 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1045 if (ret != 0)
1046 return ret;
1047
1048 sg = &sgt->sgl[0];
1049 for (i = 0; i < sgs; i++) {
1050
1051 if (vmalloced_buf || kmap_buf) {
1052 /*
1053 * Next scatterlist entry size is the minimum between
1054 * the desc_len and the remaining buffer length that
1055 * fits in a page.
1056 */
1057 min = min_t(size_t, desc_len,
1058 min_t(size_t, len,
1059 PAGE_SIZE - offset_in_page(buf)));
1060 if (vmalloced_buf)
1061 vm_page = vmalloc_to_page(buf);
1062 else
1063 vm_page = kmap_to_page(buf);
1064 if (!vm_page) {
1065 sg_free_table(sgt);
1066 return -ENOMEM;
1067 }
1068 sg_set_page(sg, vm_page,
1069 min, offset_in_page(buf));
1070 } else {
1071 min = min_t(size_t, len, desc_len);
1072 sg_buf = buf;
1073 sg_set_buf(sg, sg_buf, min);
1074 }
1075
1076 buf += min;
1077 len -= min;
1078 sg = sg_next(sg);
1079 }
1080
1081 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1082 if (ret < 0) {
1083 sg_free_table(sgt);
1084 return ret;
1085 }
1086
1087 return 0;
1088 }
1089
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1090 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1091 struct sg_table *sgt, void *buf, size_t len,
1092 enum dma_data_direction dir)
1093 {
1094 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1095 }
1096
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1097 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1098 struct device *dev, struct sg_table *sgt,
1099 enum dma_data_direction dir,
1100 unsigned long attrs)
1101 {
1102 if (sgt->orig_nents) {
1103 dma_unmap_sgtable(dev, sgt, dir, attrs);
1104 sg_free_table(sgt);
1105 sgt->orig_nents = 0;
1106 sgt->nents = 0;
1107 }
1108 }
1109
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1110 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1111 struct sg_table *sgt, enum dma_data_direction dir)
1112 {
1113 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1114 }
1115
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1116 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1117 {
1118 struct device *tx_dev, *rx_dev;
1119 struct spi_transfer *xfer;
1120 int ret;
1121
1122 if (!ctlr->can_dma)
1123 return 0;
1124
1125 if (ctlr->dma_tx)
1126 tx_dev = ctlr->dma_tx->device->dev;
1127 else if (ctlr->dma_map_dev)
1128 tx_dev = ctlr->dma_map_dev;
1129 else
1130 tx_dev = ctlr->dev.parent;
1131
1132 if (ctlr->dma_rx)
1133 rx_dev = ctlr->dma_rx->device->dev;
1134 else if (ctlr->dma_map_dev)
1135 rx_dev = ctlr->dma_map_dev;
1136 else
1137 rx_dev = ctlr->dev.parent;
1138
1139 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1140 /* The sync is done before each transfer. */
1141 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1142
1143 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1144 continue;
1145
1146 if (xfer->tx_buf != NULL) {
1147 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1148 (void *)xfer->tx_buf,
1149 xfer->len, DMA_TO_DEVICE,
1150 attrs);
1151 if (ret != 0)
1152 return ret;
1153 }
1154
1155 if (xfer->rx_buf != NULL) {
1156 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1157 xfer->rx_buf, xfer->len,
1158 DMA_FROM_DEVICE, attrs);
1159 if (ret != 0) {
1160 spi_unmap_buf_attrs(ctlr, tx_dev,
1161 &xfer->tx_sg, DMA_TO_DEVICE,
1162 attrs);
1163
1164 return ret;
1165 }
1166 }
1167 }
1168
1169 ctlr->cur_rx_dma_dev = rx_dev;
1170 ctlr->cur_tx_dma_dev = tx_dev;
1171 ctlr->cur_msg_mapped = true;
1172
1173 return 0;
1174 }
1175
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1176 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1177 {
1178 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1179 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1180 struct spi_transfer *xfer;
1181
1182 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1183 return 0;
1184
1185 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1186 /* The sync has already been done after each transfer. */
1187 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1188
1189 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1190 continue;
1191
1192 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1193 DMA_FROM_DEVICE, attrs);
1194 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1195 DMA_TO_DEVICE, attrs);
1196 }
1197
1198 ctlr->cur_msg_mapped = false;
1199
1200 return 0;
1201 }
1202
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1203 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1204 struct spi_transfer *xfer)
1205 {
1206 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1207 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1208
1209 if (!ctlr->cur_msg_mapped)
1210 return;
1211
1212 if (xfer->tx_sg.orig_nents)
1213 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1214 if (xfer->rx_sg.orig_nents)
1215 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1216 }
1217
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1218 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1219 struct spi_transfer *xfer)
1220 {
1221 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1222 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1223
1224 if (!ctlr->cur_msg_mapped)
1225 return;
1226
1227 if (xfer->rx_sg.orig_nents)
1228 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1229 if (xfer->tx_sg.orig_nents)
1230 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1231 }
1232 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1233 static inline int __spi_map_msg(struct spi_controller *ctlr,
1234 struct spi_message *msg)
1235 {
1236 return 0;
1237 }
1238
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1239 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1240 struct spi_message *msg)
1241 {
1242 return 0;
1243 }
1244
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1245 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1246 struct spi_transfer *xfer)
1247 {
1248 }
1249
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1250 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1251 struct spi_transfer *xfer)
1252 {
1253 }
1254 #endif /* !CONFIG_HAS_DMA */
1255
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1256 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1257 struct spi_message *msg)
1258 {
1259 struct spi_transfer *xfer;
1260
1261 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1262 /*
1263 * Restore the original value of tx_buf or rx_buf if they are
1264 * NULL.
1265 */
1266 if (xfer->tx_buf == ctlr->dummy_tx)
1267 xfer->tx_buf = NULL;
1268 if (xfer->rx_buf == ctlr->dummy_rx)
1269 xfer->rx_buf = NULL;
1270 }
1271
1272 return __spi_unmap_msg(ctlr, msg);
1273 }
1274
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1275 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1276 {
1277 struct spi_transfer *xfer;
1278 void *tmp;
1279 unsigned int max_tx, max_rx;
1280
1281 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1282 && !(msg->spi->mode & SPI_3WIRE)) {
1283 max_tx = 0;
1284 max_rx = 0;
1285
1286 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1287 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1288 !xfer->tx_buf)
1289 max_tx = max(xfer->len, max_tx);
1290 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1291 !xfer->rx_buf)
1292 max_rx = max(xfer->len, max_rx);
1293 }
1294
1295 if (max_tx) {
1296 tmp = krealloc(ctlr->dummy_tx, max_tx,
1297 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1298 if (!tmp)
1299 return -ENOMEM;
1300 ctlr->dummy_tx = tmp;
1301 }
1302
1303 if (max_rx) {
1304 tmp = krealloc(ctlr->dummy_rx, max_rx,
1305 GFP_KERNEL | GFP_DMA);
1306 if (!tmp)
1307 return -ENOMEM;
1308 ctlr->dummy_rx = tmp;
1309 }
1310
1311 if (max_tx || max_rx) {
1312 list_for_each_entry(xfer, &msg->transfers,
1313 transfer_list) {
1314 if (!xfer->len)
1315 continue;
1316 if (!xfer->tx_buf)
1317 xfer->tx_buf = ctlr->dummy_tx;
1318 if (!xfer->rx_buf)
1319 xfer->rx_buf = ctlr->dummy_rx;
1320 }
1321 }
1322 }
1323
1324 return __spi_map_msg(ctlr, msg);
1325 }
1326
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1327 static int spi_transfer_wait(struct spi_controller *ctlr,
1328 struct spi_message *msg,
1329 struct spi_transfer *xfer)
1330 {
1331 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1332 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1333 u32 speed_hz = xfer->speed_hz;
1334 unsigned long long ms;
1335
1336 if (spi_controller_is_slave(ctlr)) {
1337 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1338 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1339 return -EINTR;
1340 }
1341 } else {
1342 if (!speed_hz)
1343 speed_hz = 100000;
1344
1345 /*
1346 * For each byte we wait for 8 cycles of the SPI clock.
1347 * Since speed is defined in Hz and we want milliseconds,
1348 * use respective multiplier, but before the division,
1349 * otherwise we may get 0 for short transfers.
1350 */
1351 ms = 8LL * MSEC_PER_SEC * xfer->len;
1352 do_div(ms, speed_hz);
1353
1354 /*
1355 * Increase it twice and add 200 ms tolerance, use
1356 * predefined maximum in case of overflow.
1357 */
1358 ms += ms + 200;
1359 if (ms > UINT_MAX)
1360 ms = UINT_MAX;
1361
1362 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1363 msecs_to_jiffies(ms));
1364
1365 if (ms == 0) {
1366 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1367 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1368 dev_err(&msg->spi->dev,
1369 "SPI transfer timed out\n");
1370 return -ETIMEDOUT;
1371 }
1372 }
1373
1374 return 0;
1375 }
1376
_spi_transfer_delay_ns(u32 ns)1377 static void _spi_transfer_delay_ns(u32 ns)
1378 {
1379 if (!ns)
1380 return;
1381 if (ns <= NSEC_PER_USEC) {
1382 ndelay(ns);
1383 } else {
1384 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1385
1386 if (us <= 10)
1387 udelay(us);
1388 else
1389 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1390 }
1391 }
1392
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1393 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1394 {
1395 u32 delay = _delay->value;
1396 u32 unit = _delay->unit;
1397 u32 hz;
1398
1399 if (!delay)
1400 return 0;
1401
1402 switch (unit) {
1403 case SPI_DELAY_UNIT_USECS:
1404 delay *= NSEC_PER_USEC;
1405 break;
1406 case SPI_DELAY_UNIT_NSECS:
1407 /* Nothing to do here */
1408 break;
1409 case SPI_DELAY_UNIT_SCK:
1410 /* Clock cycles need to be obtained from spi_transfer */
1411 if (!xfer)
1412 return -EINVAL;
1413 /*
1414 * If there is unknown effective speed, approximate it
1415 * by underestimating with half of the requested hz.
1416 */
1417 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1418 if (!hz)
1419 return -EINVAL;
1420
1421 /* Convert delay to nanoseconds */
1422 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1423 break;
1424 default:
1425 return -EINVAL;
1426 }
1427
1428 return delay;
1429 }
1430 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1431
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1432 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1433 {
1434 int delay;
1435
1436 might_sleep();
1437
1438 if (!_delay)
1439 return -EINVAL;
1440
1441 delay = spi_delay_to_ns(_delay, xfer);
1442 if (delay < 0)
1443 return delay;
1444
1445 _spi_transfer_delay_ns(delay);
1446
1447 return 0;
1448 }
1449 EXPORT_SYMBOL_GPL(spi_delay_exec);
1450
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1451 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1452 struct spi_transfer *xfer)
1453 {
1454 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1455 u32 delay = xfer->cs_change_delay.value;
1456 u32 unit = xfer->cs_change_delay.unit;
1457 int ret;
1458
1459 /* Return early on "fast" mode - for everything but USECS */
1460 if (!delay) {
1461 if (unit == SPI_DELAY_UNIT_USECS)
1462 _spi_transfer_delay_ns(default_delay_ns);
1463 return;
1464 }
1465
1466 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1467 if (ret) {
1468 dev_err_once(&msg->spi->dev,
1469 "Use of unsupported delay unit %i, using default of %luus\n",
1470 unit, default_delay_ns / NSEC_PER_USEC);
1471 _spi_transfer_delay_ns(default_delay_ns);
1472 }
1473 }
1474
1475 /*
1476 * spi_transfer_one_message - Default implementation of transfer_one_message()
1477 *
1478 * This is a standard implementation of transfer_one_message() for
1479 * drivers which implement a transfer_one() operation. It provides
1480 * standard handling of delays and chip select management.
1481 */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1482 static int spi_transfer_one_message(struct spi_controller *ctlr,
1483 struct spi_message *msg)
1484 {
1485 struct spi_transfer *xfer;
1486 bool keep_cs = false;
1487 int ret = 0;
1488 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1489 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1490
1491 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1492 spi_set_cs(msg->spi, !xfer->cs_off, false);
1493
1494 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1495 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1496
1497 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1498 trace_spi_transfer_start(msg, xfer);
1499
1500 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1501 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1502
1503 if (!ctlr->ptp_sts_supported) {
1504 xfer->ptp_sts_word_pre = 0;
1505 ptp_read_system_prets(xfer->ptp_sts);
1506 }
1507
1508 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1509 reinit_completion(&ctlr->xfer_completion);
1510
1511 fallback_pio:
1512 spi_dma_sync_for_device(ctlr, xfer);
1513 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1514 if (ret < 0) {
1515 spi_dma_sync_for_cpu(ctlr, xfer);
1516
1517 if (ctlr->cur_msg_mapped &&
1518 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1519 __spi_unmap_msg(ctlr, msg);
1520 ctlr->fallback = true;
1521 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1522 goto fallback_pio;
1523 }
1524
1525 SPI_STATISTICS_INCREMENT_FIELD(statm,
1526 errors);
1527 SPI_STATISTICS_INCREMENT_FIELD(stats,
1528 errors);
1529 dev_err(&msg->spi->dev,
1530 "SPI transfer failed: %d\n", ret);
1531 goto out;
1532 }
1533
1534 if (ret > 0) {
1535 ret = spi_transfer_wait(ctlr, msg, xfer);
1536 if (ret < 0)
1537 msg->status = ret;
1538 }
1539
1540 spi_dma_sync_for_cpu(ctlr, xfer);
1541 } else {
1542 if (xfer->len)
1543 dev_err(&msg->spi->dev,
1544 "Bufferless transfer has length %u\n",
1545 xfer->len);
1546 }
1547
1548 if (!ctlr->ptp_sts_supported) {
1549 ptp_read_system_postts(xfer->ptp_sts);
1550 xfer->ptp_sts_word_post = xfer->len;
1551 }
1552
1553 trace_spi_transfer_stop(msg, xfer);
1554
1555 if (msg->status != -EINPROGRESS)
1556 goto out;
1557
1558 spi_transfer_delay_exec(xfer);
1559
1560 if (xfer->cs_change) {
1561 if (list_is_last(&xfer->transfer_list,
1562 &msg->transfers)) {
1563 keep_cs = true;
1564 } else {
1565 if (!xfer->cs_off)
1566 spi_set_cs(msg->spi, false, false);
1567 _spi_transfer_cs_change_delay(msg, xfer);
1568 if (!list_next_entry(xfer, transfer_list)->cs_off)
1569 spi_set_cs(msg->spi, true, false);
1570 }
1571 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1572 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1573 spi_set_cs(msg->spi, xfer->cs_off, false);
1574 }
1575
1576 msg->actual_length += xfer->len;
1577 }
1578
1579 out:
1580 if (ret != 0 || !keep_cs)
1581 spi_set_cs(msg->spi, false, false);
1582
1583 if (msg->status == -EINPROGRESS)
1584 msg->status = ret;
1585
1586 if (msg->status && ctlr->handle_err)
1587 ctlr->handle_err(ctlr, msg);
1588
1589 spi_finalize_current_message(ctlr);
1590
1591 return ret;
1592 }
1593
1594 /**
1595 * spi_finalize_current_transfer - report completion of a transfer
1596 * @ctlr: the controller reporting completion
1597 *
1598 * Called by SPI drivers using the core transfer_one_message()
1599 * implementation to notify it that the current interrupt driven
1600 * transfer has finished and the next one may be scheduled.
1601 */
spi_finalize_current_transfer(struct spi_controller * ctlr)1602 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1603 {
1604 complete(&ctlr->xfer_completion);
1605 }
1606 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1607
spi_idle_runtime_pm(struct spi_controller * ctlr)1608 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1609 {
1610 if (ctlr->auto_runtime_pm) {
1611 pm_runtime_mark_last_busy(ctlr->dev.parent);
1612 pm_runtime_put_autosuspend(ctlr->dev.parent);
1613 }
1614 }
1615
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1616 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1617 struct spi_message *msg, bool was_busy)
1618 {
1619 struct spi_transfer *xfer;
1620 int ret;
1621
1622 if (!was_busy && ctlr->auto_runtime_pm) {
1623 ret = pm_runtime_get_sync(ctlr->dev.parent);
1624 if (ret < 0) {
1625 pm_runtime_put_noidle(ctlr->dev.parent);
1626 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1627 ret);
1628 return ret;
1629 }
1630 }
1631
1632 if (!was_busy)
1633 trace_spi_controller_busy(ctlr);
1634
1635 if (!was_busy && ctlr->prepare_transfer_hardware) {
1636 ret = ctlr->prepare_transfer_hardware(ctlr);
1637 if (ret) {
1638 dev_err(&ctlr->dev,
1639 "failed to prepare transfer hardware: %d\n",
1640 ret);
1641
1642 if (ctlr->auto_runtime_pm)
1643 pm_runtime_put(ctlr->dev.parent);
1644
1645 msg->status = ret;
1646 spi_finalize_current_message(ctlr);
1647
1648 return ret;
1649 }
1650 }
1651
1652 trace_spi_message_start(msg);
1653
1654 ret = spi_split_transfers_maxsize(ctlr, msg,
1655 spi_max_transfer_size(msg->spi),
1656 GFP_KERNEL | GFP_DMA);
1657 if (ret) {
1658 msg->status = ret;
1659 spi_finalize_current_message(ctlr);
1660 return ret;
1661 }
1662
1663 if (ctlr->prepare_message) {
1664 ret = ctlr->prepare_message(ctlr, msg);
1665 if (ret) {
1666 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1667 ret);
1668 msg->status = ret;
1669 spi_finalize_current_message(ctlr);
1670 return ret;
1671 }
1672 msg->prepared = true;
1673 }
1674
1675 ret = spi_map_msg(ctlr, msg);
1676 if (ret) {
1677 msg->status = ret;
1678 spi_finalize_current_message(ctlr);
1679 return ret;
1680 }
1681
1682 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1683 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1684 xfer->ptp_sts_word_pre = 0;
1685 ptp_read_system_prets(xfer->ptp_sts);
1686 }
1687 }
1688
1689 /*
1690 * Drivers implementation of transfer_one_message() must arrange for
1691 * spi_finalize_current_message() to get called. Most drivers will do
1692 * this in the calling context, but some don't. For those cases, a
1693 * completion is used to guarantee that this function does not return
1694 * until spi_finalize_current_message() is done accessing
1695 * ctlr->cur_msg.
1696 * Use of the following two flags enable to opportunistically skip the
1697 * use of the completion since its use involves expensive spin locks.
1698 * In case of a race with the context that calls
1699 * spi_finalize_current_message() the completion will always be used,
1700 * due to strict ordering of these flags using barriers.
1701 */
1702 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1703 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1704 reinit_completion(&ctlr->cur_msg_completion);
1705 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1706
1707 ret = ctlr->transfer_one_message(ctlr, msg);
1708 if (ret) {
1709 dev_err(&ctlr->dev,
1710 "failed to transfer one message from queue\n");
1711 return ret;
1712 }
1713
1714 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1715 smp_mb(); /* See spi_finalize_current_message()... */
1716 if (READ_ONCE(ctlr->cur_msg_incomplete))
1717 wait_for_completion(&ctlr->cur_msg_completion);
1718
1719 return 0;
1720 }
1721
1722 /**
1723 * __spi_pump_messages - function which processes spi message queue
1724 * @ctlr: controller to process queue for
1725 * @in_kthread: true if we are in the context of the message pump thread
1726 *
1727 * This function checks if there is any spi message in the queue that
1728 * needs processing and if so call out to the driver to initialize hardware
1729 * and transfer each message.
1730 *
1731 * Note that it is called both from the kthread itself and also from
1732 * inside spi_sync(); the queue extraction handling at the top of the
1733 * function should deal with this safely.
1734 */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1735 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1736 {
1737 struct spi_message *msg;
1738 bool was_busy = false;
1739 unsigned long flags;
1740 int ret;
1741
1742 /* Take the IO mutex */
1743 mutex_lock(&ctlr->io_mutex);
1744
1745 /* Lock queue */
1746 spin_lock_irqsave(&ctlr->queue_lock, flags);
1747
1748 /* Make sure we are not already running a message */
1749 if (ctlr->cur_msg)
1750 goto out_unlock;
1751
1752 /* Check if the queue is idle */
1753 if (list_empty(&ctlr->queue) || !ctlr->running) {
1754 if (!ctlr->busy)
1755 goto out_unlock;
1756
1757 /* Defer any non-atomic teardown to the thread */
1758 if (!in_kthread) {
1759 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1760 !ctlr->unprepare_transfer_hardware) {
1761 spi_idle_runtime_pm(ctlr);
1762 ctlr->busy = false;
1763 ctlr->queue_empty = true;
1764 trace_spi_controller_idle(ctlr);
1765 } else {
1766 kthread_queue_work(ctlr->kworker,
1767 &ctlr->pump_messages);
1768 }
1769 goto out_unlock;
1770 }
1771
1772 ctlr->busy = false;
1773 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1774
1775 kfree(ctlr->dummy_rx);
1776 ctlr->dummy_rx = NULL;
1777 kfree(ctlr->dummy_tx);
1778 ctlr->dummy_tx = NULL;
1779 if (ctlr->unprepare_transfer_hardware &&
1780 ctlr->unprepare_transfer_hardware(ctlr))
1781 dev_err(&ctlr->dev,
1782 "failed to unprepare transfer hardware\n");
1783 spi_idle_runtime_pm(ctlr);
1784 trace_spi_controller_idle(ctlr);
1785
1786 spin_lock_irqsave(&ctlr->queue_lock, flags);
1787 ctlr->queue_empty = true;
1788 goto out_unlock;
1789 }
1790
1791 /* Extract head of queue */
1792 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1793 ctlr->cur_msg = msg;
1794
1795 list_del_init(&msg->queue);
1796 if (ctlr->busy)
1797 was_busy = true;
1798 else
1799 ctlr->busy = true;
1800 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1801
1802 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1803 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1804
1805 ctlr->cur_msg = NULL;
1806 ctlr->fallback = false;
1807
1808 mutex_unlock(&ctlr->io_mutex);
1809
1810 /* Prod the scheduler in case transfer_one() was busy waiting */
1811 if (!ret)
1812 cond_resched();
1813 return;
1814
1815 out_unlock:
1816 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1817 mutex_unlock(&ctlr->io_mutex);
1818 }
1819
1820 /**
1821 * spi_pump_messages - kthread work function which processes spi message queue
1822 * @work: pointer to kthread work struct contained in the controller struct
1823 */
spi_pump_messages(struct kthread_work * work)1824 static void spi_pump_messages(struct kthread_work *work)
1825 {
1826 struct spi_controller *ctlr =
1827 container_of(work, struct spi_controller, pump_messages);
1828
1829 __spi_pump_messages(ctlr, true);
1830 }
1831
1832 /**
1833 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1834 * @ctlr: Pointer to the spi_controller structure of the driver
1835 * @xfer: Pointer to the transfer being timestamped
1836 * @progress: How many words (not bytes) have been transferred so far
1837 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1838 * transfer, for less jitter in time measurement. Only compatible
1839 * with PIO drivers. If true, must follow up with
1840 * spi_take_timestamp_post or otherwise system will crash.
1841 * WARNING: for fully predictable results, the CPU frequency must
1842 * also be under control (governor).
1843 *
1844 * This is a helper for drivers to collect the beginning of the TX timestamp
1845 * for the requested byte from the SPI transfer. The frequency with which this
1846 * function must be called (once per word, once for the whole transfer, once
1847 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1848 * greater than or equal to the requested byte at the time of the call. The
1849 * timestamp is only taken once, at the first such call. It is assumed that
1850 * the driver advances its @tx buffer pointer monotonically.
1851 */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1852 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1853 struct spi_transfer *xfer,
1854 size_t progress, bool irqs_off)
1855 {
1856 if (!xfer->ptp_sts)
1857 return;
1858
1859 if (xfer->timestamped)
1860 return;
1861
1862 if (progress > xfer->ptp_sts_word_pre)
1863 return;
1864
1865 /* Capture the resolution of the timestamp */
1866 xfer->ptp_sts_word_pre = progress;
1867
1868 if (irqs_off) {
1869 local_irq_save(ctlr->irq_flags);
1870 preempt_disable();
1871 }
1872
1873 ptp_read_system_prets(xfer->ptp_sts);
1874 }
1875 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1876
1877 /**
1878 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1879 * @ctlr: Pointer to the spi_controller structure of the driver
1880 * @xfer: Pointer to the transfer being timestamped
1881 * @progress: How many words (not bytes) have been transferred so far
1882 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1883 *
1884 * This is a helper for drivers to collect the end of the TX timestamp for
1885 * the requested byte from the SPI transfer. Can be called with an arbitrary
1886 * frequency: only the first call where @tx exceeds or is equal to the
1887 * requested word will be timestamped.
1888 */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1889 void spi_take_timestamp_post(struct spi_controller *ctlr,
1890 struct spi_transfer *xfer,
1891 size_t progress, bool irqs_off)
1892 {
1893 if (!xfer->ptp_sts)
1894 return;
1895
1896 if (xfer->timestamped)
1897 return;
1898
1899 if (progress < xfer->ptp_sts_word_post)
1900 return;
1901
1902 ptp_read_system_postts(xfer->ptp_sts);
1903
1904 if (irqs_off) {
1905 local_irq_restore(ctlr->irq_flags);
1906 preempt_enable();
1907 }
1908
1909 /* Capture the resolution of the timestamp */
1910 xfer->ptp_sts_word_post = progress;
1911
1912 xfer->timestamped = true;
1913 }
1914 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1915
1916 /**
1917 * spi_set_thread_rt - set the controller to pump at realtime priority
1918 * @ctlr: controller to boost priority of
1919 *
1920 * This can be called because the controller requested realtime priority
1921 * (by setting the ->rt value before calling spi_register_controller()) or
1922 * because a device on the bus said that its transfers needed realtime
1923 * priority.
1924 *
1925 * NOTE: at the moment if any device on a bus says it needs realtime then
1926 * the thread will be at realtime priority for all transfers on that
1927 * controller. If this eventually becomes a problem we may see if we can
1928 * find a way to boost the priority only temporarily during relevant
1929 * transfers.
1930 */
spi_set_thread_rt(struct spi_controller * ctlr)1931 static void spi_set_thread_rt(struct spi_controller *ctlr)
1932 {
1933 dev_info(&ctlr->dev,
1934 "will run message pump with realtime priority\n");
1935 sched_set_fifo(ctlr->kworker->task);
1936 }
1937
spi_init_queue(struct spi_controller * ctlr)1938 static int spi_init_queue(struct spi_controller *ctlr)
1939 {
1940 ctlr->running = false;
1941 ctlr->busy = false;
1942 ctlr->queue_empty = true;
1943
1944 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1945 if (IS_ERR(ctlr->kworker)) {
1946 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1947 return PTR_ERR(ctlr->kworker);
1948 }
1949
1950 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1951
1952 /*
1953 * Controller config will indicate if this controller should run the
1954 * message pump with high (realtime) priority to reduce the transfer
1955 * latency on the bus by minimising the delay between a transfer
1956 * request and the scheduling of the message pump thread. Without this
1957 * setting the message pump thread will remain at default priority.
1958 */
1959 if (ctlr->rt)
1960 spi_set_thread_rt(ctlr);
1961
1962 return 0;
1963 }
1964
1965 /**
1966 * spi_get_next_queued_message() - called by driver to check for queued
1967 * messages
1968 * @ctlr: the controller to check for queued messages
1969 *
1970 * If there are more messages in the queue, the next message is returned from
1971 * this call.
1972 *
1973 * Return: the next message in the queue, else NULL if the queue is empty.
1974 */
spi_get_next_queued_message(struct spi_controller * ctlr)1975 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1976 {
1977 struct spi_message *next;
1978 unsigned long flags;
1979
1980 /* Get a pointer to the next message, if any */
1981 spin_lock_irqsave(&ctlr->queue_lock, flags);
1982 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1983 queue);
1984 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1985
1986 return next;
1987 }
1988 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1989
1990 /**
1991 * spi_finalize_current_message() - the current message is complete
1992 * @ctlr: the controller to return the message to
1993 *
1994 * Called by the driver to notify the core that the message in the front of the
1995 * queue is complete and can be removed from the queue.
1996 */
spi_finalize_current_message(struct spi_controller * ctlr)1997 void spi_finalize_current_message(struct spi_controller *ctlr)
1998 {
1999 struct spi_transfer *xfer;
2000 struct spi_message *mesg;
2001 int ret;
2002
2003 mesg = ctlr->cur_msg;
2004
2005 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2006 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2007 ptp_read_system_postts(xfer->ptp_sts);
2008 xfer->ptp_sts_word_post = xfer->len;
2009 }
2010 }
2011
2012 if (unlikely(ctlr->ptp_sts_supported))
2013 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2014 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2015
2016 spi_unmap_msg(ctlr, mesg);
2017
2018 /*
2019 * In the prepare_messages callback the SPI bus has the opportunity
2020 * to split a transfer to smaller chunks.
2021 *
2022 * Release the split transfers here since spi_map_msg() is done on
2023 * the split transfers.
2024 */
2025 spi_res_release(ctlr, mesg);
2026
2027 if (mesg->prepared && ctlr->unprepare_message) {
2028 ret = ctlr->unprepare_message(ctlr, mesg);
2029 if (ret) {
2030 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2031 ret);
2032 }
2033 }
2034
2035 mesg->prepared = false;
2036
2037 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2038 smp_mb(); /* See __spi_pump_transfer_message()... */
2039 if (READ_ONCE(ctlr->cur_msg_need_completion))
2040 complete(&ctlr->cur_msg_completion);
2041
2042 trace_spi_message_done(mesg);
2043
2044 mesg->state = NULL;
2045 if (mesg->complete)
2046 mesg->complete(mesg->context);
2047 }
2048 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2049
spi_start_queue(struct spi_controller * ctlr)2050 static int spi_start_queue(struct spi_controller *ctlr)
2051 {
2052 unsigned long flags;
2053
2054 spin_lock_irqsave(&ctlr->queue_lock, flags);
2055
2056 if (ctlr->running || ctlr->busy) {
2057 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2058 return -EBUSY;
2059 }
2060
2061 ctlr->running = true;
2062 ctlr->cur_msg = NULL;
2063 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2064
2065 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2066
2067 return 0;
2068 }
2069
spi_stop_queue(struct spi_controller * ctlr)2070 static int spi_stop_queue(struct spi_controller *ctlr)
2071 {
2072 unsigned long flags;
2073 unsigned limit = 500;
2074 int ret = 0;
2075
2076 spin_lock_irqsave(&ctlr->queue_lock, flags);
2077
2078 /*
2079 * This is a bit lame, but is optimized for the common execution path.
2080 * A wait_queue on the ctlr->busy could be used, but then the common
2081 * execution path (pump_messages) would be required to call wake_up or
2082 * friends on every SPI message. Do this instead.
2083 */
2084 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2085 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2086 usleep_range(10000, 11000);
2087 spin_lock_irqsave(&ctlr->queue_lock, flags);
2088 }
2089
2090 if (!list_empty(&ctlr->queue) || ctlr->busy)
2091 ret = -EBUSY;
2092 else
2093 ctlr->running = false;
2094
2095 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2096
2097 if (ret) {
2098 dev_warn(&ctlr->dev, "could not stop message queue\n");
2099 return ret;
2100 }
2101 return ret;
2102 }
2103
spi_destroy_queue(struct spi_controller * ctlr)2104 static int spi_destroy_queue(struct spi_controller *ctlr)
2105 {
2106 int ret;
2107
2108 ret = spi_stop_queue(ctlr);
2109
2110 /*
2111 * kthread_flush_worker will block until all work is done.
2112 * If the reason that stop_queue timed out is that the work will never
2113 * finish, then it does no good to call flush/stop thread, so
2114 * return anyway.
2115 */
2116 if (ret) {
2117 dev_err(&ctlr->dev, "problem destroying queue\n");
2118 return ret;
2119 }
2120
2121 kthread_destroy_worker(ctlr->kworker);
2122
2123 return 0;
2124 }
2125
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2126 static int __spi_queued_transfer(struct spi_device *spi,
2127 struct spi_message *msg,
2128 bool need_pump)
2129 {
2130 struct spi_controller *ctlr = spi->controller;
2131 unsigned long flags;
2132
2133 spin_lock_irqsave(&ctlr->queue_lock, flags);
2134
2135 if (!ctlr->running) {
2136 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2137 return -ESHUTDOWN;
2138 }
2139 msg->actual_length = 0;
2140 msg->status = -EINPROGRESS;
2141
2142 list_add_tail(&msg->queue, &ctlr->queue);
2143 ctlr->queue_empty = false;
2144 if (!ctlr->busy && need_pump)
2145 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2146
2147 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2148 return 0;
2149 }
2150
2151 /**
2152 * spi_queued_transfer - transfer function for queued transfers
2153 * @spi: spi device which is requesting transfer
2154 * @msg: spi message which is to handled is queued to driver queue
2155 *
2156 * Return: zero on success, else a negative error code.
2157 */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2158 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2159 {
2160 return __spi_queued_transfer(spi, msg, true);
2161 }
2162
spi_controller_initialize_queue(struct spi_controller * ctlr)2163 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2164 {
2165 int ret;
2166
2167 ctlr->transfer = spi_queued_transfer;
2168 if (!ctlr->transfer_one_message)
2169 ctlr->transfer_one_message = spi_transfer_one_message;
2170
2171 /* Initialize and start queue */
2172 ret = spi_init_queue(ctlr);
2173 if (ret) {
2174 dev_err(&ctlr->dev, "problem initializing queue\n");
2175 goto err_init_queue;
2176 }
2177 ctlr->queued = true;
2178 ret = spi_start_queue(ctlr);
2179 if (ret) {
2180 dev_err(&ctlr->dev, "problem starting queue\n");
2181 goto err_start_queue;
2182 }
2183
2184 return 0;
2185
2186 err_start_queue:
2187 spi_destroy_queue(ctlr);
2188 err_init_queue:
2189 return ret;
2190 }
2191
2192 /**
2193 * spi_flush_queue - Send all pending messages in the queue from the callers'
2194 * context
2195 * @ctlr: controller to process queue for
2196 *
2197 * This should be used when one wants to ensure all pending messages have been
2198 * sent before doing something. Is used by the spi-mem code to make sure SPI
2199 * memory operations do not preempt regular SPI transfers that have been queued
2200 * before the spi-mem operation.
2201 */
spi_flush_queue(struct spi_controller * ctlr)2202 void spi_flush_queue(struct spi_controller *ctlr)
2203 {
2204 if (ctlr->transfer == spi_queued_transfer)
2205 __spi_pump_messages(ctlr, false);
2206 }
2207
2208 /*-------------------------------------------------------------------------*/
2209
2210 #if defined(CONFIG_OF)
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2211 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2212 struct device_node *nc)
2213 {
2214 u32 value;
2215 int rc;
2216
2217 /* Mode (clock phase/polarity/etc.) */
2218 if (of_property_read_bool(nc, "spi-cpha"))
2219 spi->mode |= SPI_CPHA;
2220 if (of_property_read_bool(nc, "spi-cpol"))
2221 spi->mode |= SPI_CPOL;
2222 if (of_property_read_bool(nc, "spi-3wire"))
2223 spi->mode |= SPI_3WIRE;
2224 if (of_property_read_bool(nc, "spi-lsb-first"))
2225 spi->mode |= SPI_LSB_FIRST;
2226 if (of_property_read_bool(nc, "spi-cs-high"))
2227 spi->mode |= SPI_CS_HIGH;
2228
2229 /* Device DUAL/QUAD mode */
2230 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2231 switch (value) {
2232 case 0:
2233 spi->mode |= SPI_NO_TX;
2234 break;
2235 case 1:
2236 break;
2237 case 2:
2238 spi->mode |= SPI_TX_DUAL;
2239 break;
2240 case 4:
2241 spi->mode |= SPI_TX_QUAD;
2242 break;
2243 case 8:
2244 spi->mode |= SPI_TX_OCTAL;
2245 break;
2246 default:
2247 dev_warn(&ctlr->dev,
2248 "spi-tx-bus-width %d not supported\n",
2249 value);
2250 break;
2251 }
2252 }
2253
2254 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2255 switch (value) {
2256 case 0:
2257 spi->mode |= SPI_NO_RX;
2258 break;
2259 case 1:
2260 break;
2261 case 2:
2262 spi->mode |= SPI_RX_DUAL;
2263 break;
2264 case 4:
2265 spi->mode |= SPI_RX_QUAD;
2266 break;
2267 case 8:
2268 spi->mode |= SPI_RX_OCTAL;
2269 break;
2270 default:
2271 dev_warn(&ctlr->dev,
2272 "spi-rx-bus-width %d not supported\n",
2273 value);
2274 break;
2275 }
2276 }
2277
2278 if (spi_controller_is_slave(ctlr)) {
2279 if (!of_node_name_eq(nc, "slave")) {
2280 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2281 nc);
2282 return -EINVAL;
2283 }
2284 return 0;
2285 }
2286
2287 /* Device address */
2288 rc = of_property_read_u32(nc, "reg", &value);
2289 if (rc) {
2290 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2291 nc, rc);
2292 return rc;
2293 }
2294 spi->chip_select = value;
2295
2296 /* Device speed */
2297 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2298 spi->max_speed_hz = value;
2299
2300 return 0;
2301 }
2302
2303 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2304 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2305 {
2306 struct spi_device *spi;
2307 int rc;
2308
2309 /* Alloc an spi_device */
2310 spi = spi_alloc_device(ctlr);
2311 if (!spi) {
2312 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2313 rc = -ENOMEM;
2314 goto err_out;
2315 }
2316
2317 /* Select device driver */
2318 rc = of_modalias_node(nc, spi->modalias,
2319 sizeof(spi->modalias));
2320 if (rc < 0) {
2321 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2322 goto err_out;
2323 }
2324
2325 rc = of_spi_parse_dt(ctlr, spi, nc);
2326 if (rc)
2327 goto err_out;
2328
2329 /* Store a pointer to the node in the device structure */
2330 of_node_get(nc);
2331 spi->dev.of_node = nc;
2332 spi->dev.fwnode = of_fwnode_handle(nc);
2333
2334 /* Register the new device */
2335 rc = spi_add_device(spi);
2336 if (rc) {
2337 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2338 goto err_of_node_put;
2339 }
2340
2341 return spi;
2342
2343 err_of_node_put:
2344 of_node_put(nc);
2345 err_out:
2346 spi_dev_put(spi);
2347 return ERR_PTR(rc);
2348 }
2349
2350 /**
2351 * of_register_spi_devices() - Register child devices onto the SPI bus
2352 * @ctlr: Pointer to spi_controller device
2353 *
2354 * Registers an spi_device for each child node of controller node which
2355 * represents a valid SPI slave.
2356 */
of_register_spi_devices(struct spi_controller * ctlr)2357 static void of_register_spi_devices(struct spi_controller *ctlr)
2358 {
2359 struct spi_device *spi;
2360 struct device_node *nc;
2361
2362 if (!ctlr->dev.of_node)
2363 return;
2364
2365 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2366 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2367 continue;
2368 spi = of_register_spi_device(ctlr, nc);
2369 if (IS_ERR(spi)) {
2370 dev_warn(&ctlr->dev,
2371 "Failed to create SPI device for %pOF\n", nc);
2372 of_node_clear_flag(nc, OF_POPULATED);
2373 }
2374 }
2375 }
2376 #else
of_register_spi_devices(struct spi_controller * ctlr)2377 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2378 #endif
2379
2380 /**
2381 * spi_new_ancillary_device() - Register ancillary SPI device
2382 * @spi: Pointer to the main SPI device registering the ancillary device
2383 * @chip_select: Chip Select of the ancillary device
2384 *
2385 * Register an ancillary SPI device; for example some chips have a chip-select
2386 * for normal device usage and another one for setup/firmware upload.
2387 *
2388 * This may only be called from main SPI device's probe routine.
2389 *
2390 * Return: 0 on success; negative errno on failure
2391 */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2392 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2393 u8 chip_select)
2394 {
2395 struct spi_device *ancillary;
2396 int rc = 0;
2397
2398 /* Alloc an spi_device */
2399 ancillary = spi_alloc_device(spi->controller);
2400 if (!ancillary) {
2401 rc = -ENOMEM;
2402 goto err_out;
2403 }
2404
2405 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2406
2407 /* Use provided chip-select for ancillary device */
2408 ancillary->chip_select = chip_select;
2409
2410 /* Take over SPI mode/speed from SPI main device */
2411 ancillary->max_speed_hz = spi->max_speed_hz;
2412 ancillary->mode = spi->mode;
2413
2414 /* Register the new device */
2415 rc = spi_add_device_locked(ancillary);
2416 if (rc) {
2417 dev_err(&spi->dev, "failed to register ancillary device\n");
2418 goto err_out;
2419 }
2420
2421 return ancillary;
2422
2423 err_out:
2424 spi_dev_put(ancillary);
2425 return ERR_PTR(rc);
2426 }
2427 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2428
2429 #ifdef CONFIG_ACPI
2430 struct acpi_spi_lookup {
2431 struct spi_controller *ctlr;
2432 u32 max_speed_hz;
2433 u32 mode;
2434 int irq;
2435 u8 bits_per_word;
2436 u8 chip_select;
2437 int n;
2438 int index;
2439 };
2440
acpi_spi_count(struct acpi_resource * ares,void * data)2441 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2442 {
2443 struct acpi_resource_spi_serialbus *sb;
2444 int *count = data;
2445
2446 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2447 return 1;
2448
2449 sb = &ares->data.spi_serial_bus;
2450 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2451 return 1;
2452
2453 *count = *count + 1;
2454
2455 return 1;
2456 }
2457
2458 /**
2459 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2460 * @adev: ACPI device
2461 *
2462 * Returns the number of SpiSerialBus resources in the ACPI-device's
2463 * resource-list; or a negative error code.
2464 */
acpi_spi_count_resources(struct acpi_device * adev)2465 int acpi_spi_count_resources(struct acpi_device *adev)
2466 {
2467 LIST_HEAD(r);
2468 int count = 0;
2469 int ret;
2470
2471 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2472 if (ret < 0)
2473 return ret;
2474
2475 acpi_dev_free_resource_list(&r);
2476
2477 return count;
2478 }
2479 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2480
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2481 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2482 struct acpi_spi_lookup *lookup)
2483 {
2484 const union acpi_object *obj;
2485
2486 if (!x86_apple_machine)
2487 return;
2488
2489 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2490 && obj->buffer.length >= 4)
2491 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2492
2493 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2494 && obj->buffer.length == 8)
2495 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2496
2497 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2498 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2499 lookup->mode |= SPI_LSB_FIRST;
2500
2501 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2502 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2503 lookup->mode |= SPI_CPOL;
2504
2505 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2506 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2507 lookup->mode |= SPI_CPHA;
2508 }
2509
2510 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2511
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2512 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2513 {
2514 struct acpi_spi_lookup *lookup = data;
2515 struct spi_controller *ctlr = lookup->ctlr;
2516
2517 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2518 struct acpi_resource_spi_serialbus *sb;
2519 acpi_handle parent_handle;
2520 acpi_status status;
2521
2522 sb = &ares->data.spi_serial_bus;
2523 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2524
2525 if (lookup->index != -1 && lookup->n++ != lookup->index)
2526 return 1;
2527
2528 status = acpi_get_handle(NULL,
2529 sb->resource_source.string_ptr,
2530 &parent_handle);
2531
2532 if (ACPI_FAILURE(status))
2533 return -ENODEV;
2534
2535 if (ctlr) {
2536 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2537 return -ENODEV;
2538 } else {
2539 struct acpi_device *adev;
2540
2541 adev = acpi_fetch_acpi_dev(parent_handle);
2542 if (!adev)
2543 return -ENODEV;
2544
2545 ctlr = acpi_spi_find_controller_by_adev(adev);
2546 if (!ctlr)
2547 return -EPROBE_DEFER;
2548
2549 lookup->ctlr = ctlr;
2550 }
2551
2552 /*
2553 * ACPI DeviceSelection numbering is handled by the
2554 * host controller driver in Windows and can vary
2555 * from driver to driver. In Linux we always expect
2556 * 0 .. max - 1 so we need to ask the driver to
2557 * translate between the two schemes.
2558 */
2559 if (ctlr->fw_translate_cs) {
2560 int cs = ctlr->fw_translate_cs(ctlr,
2561 sb->device_selection);
2562 if (cs < 0)
2563 return cs;
2564 lookup->chip_select = cs;
2565 } else {
2566 lookup->chip_select = sb->device_selection;
2567 }
2568
2569 lookup->max_speed_hz = sb->connection_speed;
2570 lookup->bits_per_word = sb->data_bit_length;
2571
2572 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2573 lookup->mode |= SPI_CPHA;
2574 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2575 lookup->mode |= SPI_CPOL;
2576 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2577 lookup->mode |= SPI_CS_HIGH;
2578 }
2579 } else if (lookup->irq < 0) {
2580 struct resource r;
2581
2582 if (acpi_dev_resource_interrupt(ares, 0, &r))
2583 lookup->irq = r.start;
2584 }
2585
2586 /* Always tell the ACPI core to skip this resource */
2587 return 1;
2588 }
2589
2590 /**
2591 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2592 * @ctlr: controller to which the spi device belongs
2593 * @adev: ACPI Device for the spi device
2594 * @index: Index of the spi resource inside the ACPI Node
2595 *
2596 * This should be used to allocate a new spi device from and ACPI Node.
2597 * The caller is responsible for calling spi_add_device to register the spi device.
2598 *
2599 * If ctlr is set to NULL, the Controller for the spi device will be looked up
2600 * using the resource.
2601 * If index is set to -1, index is not used.
2602 * Note: If index is -1, ctlr must be set.
2603 *
2604 * Return: a pointer to the new device, or ERR_PTR on error.
2605 */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2606 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2607 struct acpi_device *adev,
2608 int index)
2609 {
2610 acpi_handle parent_handle = NULL;
2611 struct list_head resource_list;
2612 struct acpi_spi_lookup lookup = {};
2613 struct spi_device *spi;
2614 int ret;
2615
2616 if (!ctlr && index == -1)
2617 return ERR_PTR(-EINVAL);
2618
2619 lookup.ctlr = ctlr;
2620 lookup.irq = -1;
2621 lookup.index = index;
2622 lookup.n = 0;
2623
2624 INIT_LIST_HEAD(&resource_list);
2625 ret = acpi_dev_get_resources(adev, &resource_list,
2626 acpi_spi_add_resource, &lookup);
2627 acpi_dev_free_resource_list(&resource_list);
2628
2629 if (ret < 0)
2630 /* Found SPI in _CRS but it points to another controller */
2631 return ERR_PTR(ret);
2632
2633 if (!lookup.max_speed_hz &&
2634 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2635 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2636 /* Apple does not use _CRS but nested devices for SPI slaves */
2637 acpi_spi_parse_apple_properties(adev, &lookup);
2638 }
2639
2640 if (!lookup.max_speed_hz)
2641 return ERR_PTR(-ENODEV);
2642
2643 spi = spi_alloc_device(lookup.ctlr);
2644 if (!spi) {
2645 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2646 dev_name(&adev->dev));
2647 return ERR_PTR(-ENOMEM);
2648 }
2649
2650 ACPI_COMPANION_SET(&spi->dev, adev);
2651 spi->max_speed_hz = lookup.max_speed_hz;
2652 spi->mode |= lookup.mode;
2653 spi->irq = lookup.irq;
2654 spi->bits_per_word = lookup.bits_per_word;
2655 spi->chip_select = lookup.chip_select;
2656
2657 return spi;
2658 }
2659 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2660
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2661 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2662 struct acpi_device *adev)
2663 {
2664 struct spi_device *spi;
2665
2666 if (acpi_bus_get_status(adev) || !adev->status.present ||
2667 acpi_device_enumerated(adev))
2668 return AE_OK;
2669
2670 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2671 if (IS_ERR(spi)) {
2672 if (PTR_ERR(spi) == -ENOMEM)
2673 return AE_NO_MEMORY;
2674 else
2675 return AE_OK;
2676 }
2677
2678 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2679 sizeof(spi->modalias));
2680
2681 if (spi->irq < 0)
2682 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2683
2684 acpi_device_set_enumerated(adev);
2685
2686 adev->power.flags.ignore_parent = true;
2687 if (spi_add_device(spi)) {
2688 adev->power.flags.ignore_parent = false;
2689 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2690 dev_name(&adev->dev));
2691 spi_dev_put(spi);
2692 }
2693
2694 return AE_OK;
2695 }
2696
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2697 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2698 void *data, void **return_value)
2699 {
2700 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2701 struct spi_controller *ctlr = data;
2702
2703 if (!adev)
2704 return AE_OK;
2705
2706 return acpi_register_spi_device(ctlr, adev);
2707 }
2708
2709 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2710
acpi_register_spi_devices(struct spi_controller * ctlr)2711 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2712 {
2713 acpi_status status;
2714 acpi_handle handle;
2715
2716 handle = ACPI_HANDLE(ctlr->dev.parent);
2717 if (!handle)
2718 return;
2719
2720 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2721 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2722 acpi_spi_add_device, NULL, ctlr, NULL);
2723 if (ACPI_FAILURE(status))
2724 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2725 }
2726 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2727 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2728 #endif /* CONFIG_ACPI */
2729
spi_controller_release(struct device * dev)2730 static void spi_controller_release(struct device *dev)
2731 {
2732 struct spi_controller *ctlr;
2733
2734 ctlr = container_of(dev, struct spi_controller, dev);
2735 kfree(ctlr);
2736 }
2737
2738 static struct class spi_master_class = {
2739 .name = "spi_master",
2740 .owner = THIS_MODULE,
2741 .dev_release = spi_controller_release,
2742 .dev_groups = spi_master_groups,
2743 };
2744
2745 #ifdef CONFIG_SPI_SLAVE
2746 /**
2747 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2748 * controller
2749 * @spi: device used for the current transfer
2750 */
spi_slave_abort(struct spi_device * spi)2751 int spi_slave_abort(struct spi_device *spi)
2752 {
2753 struct spi_controller *ctlr = spi->controller;
2754
2755 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2756 return ctlr->slave_abort(ctlr);
2757
2758 return -ENOTSUPP;
2759 }
2760 EXPORT_SYMBOL_GPL(spi_slave_abort);
2761
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2762 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2763 char *buf)
2764 {
2765 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2766 dev);
2767 struct device *child;
2768
2769 child = device_find_any_child(&ctlr->dev);
2770 return sprintf(buf, "%s\n",
2771 child ? to_spi_device(child)->modalias : NULL);
2772 }
2773
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2774 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2775 const char *buf, size_t count)
2776 {
2777 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2778 dev);
2779 struct spi_device *spi;
2780 struct device *child;
2781 char name[32];
2782 int rc;
2783
2784 rc = sscanf(buf, "%31s", name);
2785 if (rc != 1 || !name[0])
2786 return -EINVAL;
2787
2788 child = device_find_any_child(&ctlr->dev);
2789 if (child) {
2790 /* Remove registered slave */
2791 device_unregister(child);
2792 put_device(child);
2793 }
2794
2795 if (strcmp(name, "(null)")) {
2796 /* Register new slave */
2797 spi = spi_alloc_device(ctlr);
2798 if (!spi)
2799 return -ENOMEM;
2800
2801 strscpy(spi->modalias, name, sizeof(spi->modalias));
2802
2803 rc = spi_add_device(spi);
2804 if (rc) {
2805 spi_dev_put(spi);
2806 return rc;
2807 }
2808 }
2809
2810 return count;
2811 }
2812
2813 static DEVICE_ATTR_RW(slave);
2814
2815 static struct attribute *spi_slave_attrs[] = {
2816 &dev_attr_slave.attr,
2817 NULL,
2818 };
2819
2820 static const struct attribute_group spi_slave_group = {
2821 .attrs = spi_slave_attrs,
2822 };
2823
2824 static const struct attribute_group *spi_slave_groups[] = {
2825 &spi_controller_statistics_group,
2826 &spi_slave_group,
2827 NULL,
2828 };
2829
2830 static struct class spi_slave_class = {
2831 .name = "spi_slave",
2832 .owner = THIS_MODULE,
2833 .dev_release = spi_controller_release,
2834 .dev_groups = spi_slave_groups,
2835 };
2836 #else
2837 extern struct class spi_slave_class; /* dummy */
2838 #endif
2839
2840 /**
2841 * __spi_alloc_controller - allocate an SPI master or slave controller
2842 * @dev: the controller, possibly using the platform_bus
2843 * @size: how much zeroed driver-private data to allocate; the pointer to this
2844 * memory is in the driver_data field of the returned device, accessible
2845 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2846 * drivers granting DMA access to portions of their private data need to
2847 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2848 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2849 * slave (true) controller
2850 * Context: can sleep
2851 *
2852 * This call is used only by SPI controller drivers, which are the
2853 * only ones directly touching chip registers. It's how they allocate
2854 * an spi_controller structure, prior to calling spi_register_controller().
2855 *
2856 * This must be called from context that can sleep.
2857 *
2858 * The caller is responsible for assigning the bus number and initializing the
2859 * controller's methods before calling spi_register_controller(); and (after
2860 * errors adding the device) calling spi_controller_put() to prevent a memory
2861 * leak.
2862 *
2863 * Return: the SPI controller structure on success, else NULL.
2864 */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2865 struct spi_controller *__spi_alloc_controller(struct device *dev,
2866 unsigned int size, bool slave)
2867 {
2868 struct spi_controller *ctlr;
2869 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2870
2871 if (!dev)
2872 return NULL;
2873
2874 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2875 if (!ctlr)
2876 return NULL;
2877
2878 device_initialize(&ctlr->dev);
2879 INIT_LIST_HEAD(&ctlr->queue);
2880 spin_lock_init(&ctlr->queue_lock);
2881 spin_lock_init(&ctlr->bus_lock_spinlock);
2882 mutex_init(&ctlr->bus_lock_mutex);
2883 mutex_init(&ctlr->io_mutex);
2884 mutex_init(&ctlr->add_lock);
2885 ctlr->bus_num = -1;
2886 ctlr->num_chipselect = 1;
2887 ctlr->slave = slave;
2888 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2889 ctlr->dev.class = &spi_slave_class;
2890 else
2891 ctlr->dev.class = &spi_master_class;
2892 ctlr->dev.parent = dev;
2893 pm_suspend_ignore_children(&ctlr->dev, true);
2894 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2895
2896 return ctlr;
2897 }
2898 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2899
devm_spi_release_controller(struct device * dev,void * ctlr)2900 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2901 {
2902 spi_controller_put(*(struct spi_controller **)ctlr);
2903 }
2904
2905 /**
2906 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2907 * @dev: physical device of SPI controller
2908 * @size: how much zeroed driver-private data to allocate
2909 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2910 * Context: can sleep
2911 *
2912 * Allocate an SPI controller and automatically release a reference on it
2913 * when @dev is unbound from its driver. Drivers are thus relieved from
2914 * having to call spi_controller_put().
2915 *
2916 * The arguments to this function are identical to __spi_alloc_controller().
2917 *
2918 * Return: the SPI controller structure on success, else NULL.
2919 */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2920 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2921 unsigned int size,
2922 bool slave)
2923 {
2924 struct spi_controller **ptr, *ctlr;
2925
2926 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2927 GFP_KERNEL);
2928 if (!ptr)
2929 return NULL;
2930
2931 ctlr = __spi_alloc_controller(dev, size, slave);
2932 if (ctlr) {
2933 ctlr->devm_allocated = true;
2934 *ptr = ctlr;
2935 devres_add(dev, ptr);
2936 } else {
2937 devres_free(ptr);
2938 }
2939
2940 return ctlr;
2941 }
2942 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2943
2944 /**
2945 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2946 * @ctlr: The SPI master to grab GPIO descriptors for
2947 */
spi_get_gpio_descs(struct spi_controller * ctlr)2948 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2949 {
2950 int nb, i;
2951 struct gpio_desc **cs;
2952 struct device *dev = &ctlr->dev;
2953 unsigned long native_cs_mask = 0;
2954 unsigned int num_cs_gpios = 0;
2955
2956 nb = gpiod_count(dev, "cs");
2957 if (nb < 0) {
2958 /* No GPIOs at all is fine, else return the error */
2959 if (nb == -ENOENT)
2960 return 0;
2961 return nb;
2962 }
2963
2964 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2965
2966 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2967 GFP_KERNEL);
2968 if (!cs)
2969 return -ENOMEM;
2970 ctlr->cs_gpiods = cs;
2971
2972 for (i = 0; i < nb; i++) {
2973 /*
2974 * Most chipselects are active low, the inverted
2975 * semantics are handled by special quirks in gpiolib,
2976 * so initializing them GPIOD_OUT_LOW here means
2977 * "unasserted", in most cases this will drive the physical
2978 * line high.
2979 */
2980 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2981 GPIOD_OUT_LOW);
2982 if (IS_ERR(cs[i]))
2983 return PTR_ERR(cs[i]);
2984
2985 if (cs[i]) {
2986 /*
2987 * If we find a CS GPIO, name it after the device and
2988 * chip select line.
2989 */
2990 char *gpioname;
2991
2992 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2993 dev_name(dev), i);
2994 if (!gpioname)
2995 return -ENOMEM;
2996 gpiod_set_consumer_name(cs[i], gpioname);
2997 num_cs_gpios++;
2998 continue;
2999 }
3000
3001 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3002 dev_err(dev, "Invalid native chip select %d\n", i);
3003 return -EINVAL;
3004 }
3005 native_cs_mask |= BIT(i);
3006 }
3007
3008 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3009
3010 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
3011 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3012 dev_err(dev, "No unused native chip select available\n");
3013 return -EINVAL;
3014 }
3015
3016 return 0;
3017 }
3018
spi_controller_check_ops(struct spi_controller * ctlr)3019 static int spi_controller_check_ops(struct spi_controller *ctlr)
3020 {
3021 /*
3022 * The controller may implement only the high-level SPI-memory like
3023 * operations if it does not support regular SPI transfers, and this is
3024 * valid use case.
3025 * If ->mem_ops is NULL, we request that at least one of the
3026 * ->transfer_xxx() method be implemented.
3027 */
3028 if (ctlr->mem_ops) {
3029 if (!ctlr->mem_ops->exec_op)
3030 return -EINVAL;
3031 } else if (!ctlr->transfer && !ctlr->transfer_one &&
3032 !ctlr->transfer_one_message) {
3033 return -EINVAL;
3034 }
3035
3036 return 0;
3037 }
3038
3039 /**
3040 * spi_register_controller - register SPI master or slave controller
3041 * @ctlr: initialized master, originally from spi_alloc_master() or
3042 * spi_alloc_slave()
3043 * Context: can sleep
3044 *
3045 * SPI controllers connect to their drivers using some non-SPI bus,
3046 * such as the platform bus. The final stage of probe() in that code
3047 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3048 *
3049 * SPI controllers use board specific (often SOC specific) bus numbers,
3050 * and board-specific addressing for SPI devices combines those numbers
3051 * with chip select numbers. Since SPI does not directly support dynamic
3052 * device identification, boards need configuration tables telling which
3053 * chip is at which address.
3054 *
3055 * This must be called from context that can sleep. It returns zero on
3056 * success, else a negative error code (dropping the controller's refcount).
3057 * After a successful return, the caller is responsible for calling
3058 * spi_unregister_controller().
3059 *
3060 * Return: zero on success, else a negative error code.
3061 */
spi_register_controller(struct spi_controller * ctlr)3062 int spi_register_controller(struct spi_controller *ctlr)
3063 {
3064 struct device *dev = ctlr->dev.parent;
3065 struct boardinfo *bi;
3066 int status;
3067 int id, first_dynamic;
3068
3069 if (!dev)
3070 return -ENODEV;
3071
3072 /*
3073 * Make sure all necessary hooks are implemented before registering
3074 * the SPI controller.
3075 */
3076 status = spi_controller_check_ops(ctlr);
3077 if (status)
3078 return status;
3079
3080 if (ctlr->bus_num >= 0) {
3081 /* Devices with a fixed bus num must check-in with the num */
3082 mutex_lock(&board_lock);
3083 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3084 ctlr->bus_num + 1, GFP_KERNEL);
3085 mutex_unlock(&board_lock);
3086 if (WARN(id < 0, "couldn't get idr"))
3087 return id == -ENOSPC ? -EBUSY : id;
3088 ctlr->bus_num = id;
3089 } else if (ctlr->dev.of_node) {
3090 /* Allocate dynamic bus number using Linux idr */
3091 id = of_alias_get_id(ctlr->dev.of_node, "spi");
3092 if (id >= 0) {
3093 ctlr->bus_num = id;
3094 mutex_lock(&board_lock);
3095 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3096 ctlr->bus_num + 1, GFP_KERNEL);
3097 mutex_unlock(&board_lock);
3098 if (WARN(id < 0, "couldn't get idr"))
3099 return id == -ENOSPC ? -EBUSY : id;
3100 }
3101 }
3102 if (ctlr->bus_num < 0) {
3103 first_dynamic = of_alias_get_highest_id("spi");
3104 if (first_dynamic < 0)
3105 first_dynamic = 0;
3106 else
3107 first_dynamic++;
3108
3109 mutex_lock(&board_lock);
3110 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
3111 0, GFP_KERNEL);
3112 mutex_unlock(&board_lock);
3113 if (WARN(id < 0, "couldn't get idr"))
3114 return id;
3115 ctlr->bus_num = id;
3116 }
3117 ctlr->bus_lock_flag = 0;
3118 init_completion(&ctlr->xfer_completion);
3119 init_completion(&ctlr->cur_msg_completion);
3120 if (!ctlr->max_dma_len)
3121 ctlr->max_dma_len = INT_MAX;
3122
3123 /*
3124 * Register the device, then userspace will see it.
3125 * Registration fails if the bus ID is in use.
3126 */
3127 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3128
3129 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3130 status = spi_get_gpio_descs(ctlr);
3131 if (status)
3132 goto free_bus_id;
3133 /*
3134 * A controller using GPIO descriptors always
3135 * supports SPI_CS_HIGH if need be.
3136 */
3137 ctlr->mode_bits |= SPI_CS_HIGH;
3138 }
3139
3140 /*
3141 * Even if it's just one always-selected device, there must
3142 * be at least one chipselect.
3143 */
3144 if (!ctlr->num_chipselect) {
3145 status = -EINVAL;
3146 goto free_bus_id;
3147 }
3148
3149 /* Setting last_cs to -1 means no chip selected */
3150 ctlr->last_cs = -1;
3151
3152 status = device_add(&ctlr->dev);
3153 if (status < 0)
3154 goto free_bus_id;
3155 dev_dbg(dev, "registered %s %s\n",
3156 spi_controller_is_slave(ctlr) ? "slave" : "master",
3157 dev_name(&ctlr->dev));
3158
3159 /*
3160 * If we're using a queued driver, start the queue. Note that we don't
3161 * need the queueing logic if the driver is only supporting high-level
3162 * memory operations.
3163 */
3164 if (ctlr->transfer) {
3165 dev_info(dev, "controller is unqueued, this is deprecated\n");
3166 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3167 status = spi_controller_initialize_queue(ctlr);
3168 if (status) {
3169 device_del(&ctlr->dev);
3170 goto free_bus_id;
3171 }
3172 }
3173 /* Add statistics */
3174 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3175 if (!ctlr->pcpu_statistics) {
3176 dev_err(dev, "Error allocating per-cpu statistics\n");
3177 status = -ENOMEM;
3178 goto destroy_queue;
3179 }
3180
3181 mutex_lock(&board_lock);
3182 list_add_tail(&ctlr->list, &spi_controller_list);
3183 list_for_each_entry(bi, &board_list, list)
3184 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3185 mutex_unlock(&board_lock);
3186
3187 /* Register devices from the device tree and ACPI */
3188 of_register_spi_devices(ctlr);
3189 acpi_register_spi_devices(ctlr);
3190 return status;
3191
3192 destroy_queue:
3193 spi_destroy_queue(ctlr);
3194 free_bus_id:
3195 mutex_lock(&board_lock);
3196 idr_remove(&spi_master_idr, ctlr->bus_num);
3197 mutex_unlock(&board_lock);
3198 return status;
3199 }
3200 EXPORT_SYMBOL_GPL(spi_register_controller);
3201
devm_spi_unregister(struct device * dev,void * res)3202 static void devm_spi_unregister(struct device *dev, void *res)
3203 {
3204 spi_unregister_controller(*(struct spi_controller **)res);
3205 }
3206
3207 /**
3208 * devm_spi_register_controller - register managed SPI master or slave
3209 * controller
3210 * @dev: device managing SPI controller
3211 * @ctlr: initialized controller, originally from spi_alloc_master() or
3212 * spi_alloc_slave()
3213 * Context: can sleep
3214 *
3215 * Register a SPI device as with spi_register_controller() which will
3216 * automatically be unregistered and freed.
3217 *
3218 * Return: zero on success, else a negative error code.
3219 */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3220 int devm_spi_register_controller(struct device *dev,
3221 struct spi_controller *ctlr)
3222 {
3223 struct spi_controller **ptr;
3224 int ret;
3225
3226 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3227 if (!ptr)
3228 return -ENOMEM;
3229
3230 ret = spi_register_controller(ctlr);
3231 if (!ret) {
3232 *ptr = ctlr;
3233 devres_add(dev, ptr);
3234 } else {
3235 devres_free(ptr);
3236 }
3237
3238 return ret;
3239 }
3240 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3241
__unregister(struct device * dev,void * null)3242 static int __unregister(struct device *dev, void *null)
3243 {
3244 spi_unregister_device(to_spi_device(dev));
3245 return 0;
3246 }
3247
3248 /**
3249 * spi_unregister_controller - unregister SPI master or slave controller
3250 * @ctlr: the controller being unregistered
3251 * Context: can sleep
3252 *
3253 * This call is used only by SPI controller drivers, which are the
3254 * only ones directly touching chip registers.
3255 *
3256 * This must be called from context that can sleep.
3257 *
3258 * Note that this function also drops a reference to the controller.
3259 */
spi_unregister_controller(struct spi_controller * ctlr)3260 void spi_unregister_controller(struct spi_controller *ctlr)
3261 {
3262 struct spi_controller *found;
3263 int id = ctlr->bus_num;
3264
3265 /* Prevent addition of new devices, unregister existing ones */
3266 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3267 mutex_lock(&ctlr->add_lock);
3268
3269 device_for_each_child(&ctlr->dev, NULL, __unregister);
3270
3271 /* First make sure that this controller was ever added */
3272 mutex_lock(&board_lock);
3273 found = idr_find(&spi_master_idr, id);
3274 mutex_unlock(&board_lock);
3275 if (ctlr->queued) {
3276 if (spi_destroy_queue(ctlr))
3277 dev_err(&ctlr->dev, "queue remove failed\n");
3278 }
3279 mutex_lock(&board_lock);
3280 list_del(&ctlr->list);
3281 mutex_unlock(&board_lock);
3282
3283 device_del(&ctlr->dev);
3284
3285 /* Free bus id */
3286 mutex_lock(&board_lock);
3287 if (found == ctlr)
3288 idr_remove(&spi_master_idr, id);
3289 mutex_unlock(&board_lock);
3290
3291 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3292 mutex_unlock(&ctlr->add_lock);
3293
3294 /* Release the last reference on the controller if its driver
3295 * has not yet been converted to devm_spi_alloc_master/slave().
3296 */
3297 if (!ctlr->devm_allocated)
3298 put_device(&ctlr->dev);
3299 }
3300 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3301
spi_controller_suspend(struct spi_controller * ctlr)3302 int spi_controller_suspend(struct spi_controller *ctlr)
3303 {
3304 int ret;
3305
3306 /* Basically no-ops for non-queued controllers */
3307 if (!ctlr->queued)
3308 return 0;
3309
3310 ret = spi_stop_queue(ctlr);
3311 if (ret)
3312 dev_err(&ctlr->dev, "queue stop failed\n");
3313
3314 return ret;
3315 }
3316 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3317
spi_controller_resume(struct spi_controller * ctlr)3318 int spi_controller_resume(struct spi_controller *ctlr)
3319 {
3320 int ret;
3321
3322 if (!ctlr->queued)
3323 return 0;
3324
3325 ret = spi_start_queue(ctlr);
3326 if (ret)
3327 dev_err(&ctlr->dev, "queue restart failed\n");
3328
3329 return ret;
3330 }
3331 EXPORT_SYMBOL_GPL(spi_controller_resume);
3332
3333 /*-------------------------------------------------------------------------*/
3334
3335 /* Core methods for spi_message alterations */
3336
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3337 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3338 struct spi_message *msg,
3339 void *res)
3340 {
3341 struct spi_replaced_transfers *rxfer = res;
3342 size_t i;
3343
3344 /* Call extra callback if requested */
3345 if (rxfer->release)
3346 rxfer->release(ctlr, msg, res);
3347
3348 /* Insert replaced transfers back into the message */
3349 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3350
3351 /* Remove the formerly inserted entries */
3352 for (i = 0; i < rxfer->inserted; i++)
3353 list_del(&rxfer->inserted_transfers[i].transfer_list);
3354 }
3355
3356 /**
3357 * spi_replace_transfers - replace transfers with several transfers
3358 * and register change with spi_message.resources
3359 * @msg: the spi_message we work upon
3360 * @xfer_first: the first spi_transfer we want to replace
3361 * @remove: number of transfers to remove
3362 * @insert: the number of transfers we want to insert instead
3363 * @release: extra release code necessary in some circumstances
3364 * @extradatasize: extra data to allocate (with alignment guarantees
3365 * of struct @spi_transfer)
3366 * @gfp: gfp flags
3367 *
3368 * Returns: pointer to @spi_replaced_transfers,
3369 * PTR_ERR(...) in case of errors.
3370 */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3371 static struct spi_replaced_transfers *spi_replace_transfers(
3372 struct spi_message *msg,
3373 struct spi_transfer *xfer_first,
3374 size_t remove,
3375 size_t insert,
3376 spi_replaced_release_t release,
3377 size_t extradatasize,
3378 gfp_t gfp)
3379 {
3380 struct spi_replaced_transfers *rxfer;
3381 struct spi_transfer *xfer;
3382 size_t i;
3383
3384 /* Allocate the structure using spi_res */
3385 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3386 struct_size(rxfer, inserted_transfers, insert)
3387 + extradatasize,
3388 gfp);
3389 if (!rxfer)
3390 return ERR_PTR(-ENOMEM);
3391
3392 /* The release code to invoke before running the generic release */
3393 rxfer->release = release;
3394
3395 /* Assign extradata */
3396 if (extradatasize)
3397 rxfer->extradata =
3398 &rxfer->inserted_transfers[insert];
3399
3400 /* Init the replaced_transfers list */
3401 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3402
3403 /*
3404 * Assign the list_entry after which we should reinsert
3405 * the @replaced_transfers - it may be spi_message.messages!
3406 */
3407 rxfer->replaced_after = xfer_first->transfer_list.prev;
3408
3409 /* Remove the requested number of transfers */
3410 for (i = 0; i < remove; i++) {
3411 /*
3412 * If the entry after replaced_after it is msg->transfers
3413 * then we have been requested to remove more transfers
3414 * than are in the list.
3415 */
3416 if (rxfer->replaced_after->next == &msg->transfers) {
3417 dev_err(&msg->spi->dev,
3418 "requested to remove more spi_transfers than are available\n");
3419 /* Insert replaced transfers back into the message */
3420 list_splice(&rxfer->replaced_transfers,
3421 rxfer->replaced_after);
3422
3423 /* Free the spi_replace_transfer structure... */
3424 spi_res_free(rxfer);
3425
3426 /* ...and return with an error */
3427 return ERR_PTR(-EINVAL);
3428 }
3429
3430 /*
3431 * Remove the entry after replaced_after from list of
3432 * transfers and add it to list of replaced_transfers.
3433 */
3434 list_move_tail(rxfer->replaced_after->next,
3435 &rxfer->replaced_transfers);
3436 }
3437
3438 /*
3439 * Create copy of the given xfer with identical settings
3440 * based on the first transfer to get removed.
3441 */
3442 for (i = 0; i < insert; i++) {
3443 /* We need to run in reverse order */
3444 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3445
3446 /* Copy all spi_transfer data */
3447 memcpy(xfer, xfer_first, sizeof(*xfer));
3448
3449 /* Add to list */
3450 list_add(&xfer->transfer_list, rxfer->replaced_after);
3451
3452 /* Clear cs_change and delay for all but the last */
3453 if (i) {
3454 xfer->cs_change = false;
3455 xfer->delay.value = 0;
3456 }
3457 }
3458
3459 /* Set up inserted... */
3460 rxfer->inserted = insert;
3461
3462 /* ...and register it with spi_res/spi_message */
3463 spi_res_add(msg, rxfer);
3464
3465 return rxfer;
3466 }
3467
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize,gfp_t gfp)3468 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3469 struct spi_message *msg,
3470 struct spi_transfer **xferp,
3471 size_t maxsize,
3472 gfp_t gfp)
3473 {
3474 struct spi_transfer *xfer = *xferp, *xfers;
3475 struct spi_replaced_transfers *srt;
3476 size_t offset;
3477 size_t count, i;
3478
3479 /* Calculate how many we have to replace */
3480 count = DIV_ROUND_UP(xfer->len, maxsize);
3481
3482 /* Create replacement */
3483 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3484 if (IS_ERR(srt))
3485 return PTR_ERR(srt);
3486 xfers = srt->inserted_transfers;
3487
3488 /*
3489 * Now handle each of those newly inserted spi_transfers.
3490 * Note that the replacements spi_transfers all are preset
3491 * to the same values as *xferp, so tx_buf, rx_buf and len
3492 * are all identical (as well as most others)
3493 * so we just have to fix up len and the pointers.
3494 *
3495 * This also includes support for the depreciated
3496 * spi_message.is_dma_mapped interface.
3497 */
3498
3499 /*
3500 * The first transfer just needs the length modified, so we
3501 * run it outside the loop.
3502 */
3503 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3504
3505 /* All the others need rx_buf/tx_buf also set */
3506 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3507 /* Update rx_buf, tx_buf and dma */
3508 if (xfers[i].rx_buf)
3509 xfers[i].rx_buf += offset;
3510 if (xfers[i].rx_dma)
3511 xfers[i].rx_dma += offset;
3512 if (xfers[i].tx_buf)
3513 xfers[i].tx_buf += offset;
3514 if (xfers[i].tx_dma)
3515 xfers[i].tx_dma += offset;
3516
3517 /* Update length */
3518 xfers[i].len = min(maxsize, xfers[i].len - offset);
3519 }
3520
3521 /*
3522 * We set up xferp to the last entry we have inserted,
3523 * so that we skip those already split transfers.
3524 */
3525 *xferp = &xfers[count - 1];
3526
3527 /* Increment statistics counters */
3528 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3529 transfers_split_maxsize);
3530 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3531 transfers_split_maxsize);
3532
3533 return 0;
3534 }
3535
3536 /**
3537 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3538 * when an individual transfer exceeds a
3539 * certain size
3540 * @ctlr: the @spi_controller for this transfer
3541 * @msg: the @spi_message to transform
3542 * @maxsize: the maximum when to apply this
3543 * @gfp: GFP allocation flags
3544 *
3545 * Return: status of transformation
3546 */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize,gfp_t gfp)3547 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3548 struct spi_message *msg,
3549 size_t maxsize,
3550 gfp_t gfp)
3551 {
3552 struct spi_transfer *xfer;
3553 int ret;
3554
3555 /*
3556 * Iterate over the transfer_list,
3557 * but note that xfer is advanced to the last transfer inserted
3558 * to avoid checking sizes again unnecessarily (also xfer does
3559 * potentially belong to a different list by the time the
3560 * replacement has happened).
3561 */
3562 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3563 if (xfer->len > maxsize) {
3564 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3565 maxsize, gfp);
3566 if (ret)
3567 return ret;
3568 }
3569 }
3570
3571 return 0;
3572 }
3573 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3574
3575 /*-------------------------------------------------------------------------*/
3576
3577 /* Core methods for SPI controller protocol drivers. Some of the
3578 * other core methods are currently defined as inline functions.
3579 */
3580
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3581 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3582 u8 bits_per_word)
3583 {
3584 if (ctlr->bits_per_word_mask) {
3585 /* Only 32 bits fit in the mask */
3586 if (bits_per_word > 32)
3587 return -EINVAL;
3588 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3589 return -EINVAL;
3590 }
3591
3592 return 0;
3593 }
3594
3595 /**
3596 * spi_setup - setup SPI mode and clock rate
3597 * @spi: the device whose settings are being modified
3598 * Context: can sleep, and no requests are queued to the device
3599 *
3600 * SPI protocol drivers may need to update the transfer mode if the
3601 * device doesn't work with its default. They may likewise need
3602 * to update clock rates or word sizes from initial values. This function
3603 * changes those settings, and must be called from a context that can sleep.
3604 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3605 * effect the next time the device is selected and data is transferred to
3606 * or from it. When this function returns, the spi device is deselected.
3607 *
3608 * Note that this call will fail if the protocol driver specifies an option
3609 * that the underlying controller or its driver does not support. For
3610 * example, not all hardware supports wire transfers using nine bit words,
3611 * LSB-first wire encoding, or active-high chipselects.
3612 *
3613 * Return: zero on success, else a negative error code.
3614 */
spi_setup(struct spi_device * spi)3615 int spi_setup(struct spi_device *spi)
3616 {
3617 unsigned bad_bits, ugly_bits;
3618 int status = 0;
3619
3620 /*
3621 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3622 * are set at the same time.
3623 */
3624 if ((hweight_long(spi->mode &
3625 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3626 (hweight_long(spi->mode &
3627 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3628 dev_err(&spi->dev,
3629 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3630 return -EINVAL;
3631 }
3632 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3633 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3634 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3635 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3636 return -EINVAL;
3637 /*
3638 * Help drivers fail *cleanly* when they need options
3639 * that aren't supported with their current controller.
3640 * SPI_CS_WORD has a fallback software implementation,
3641 * so it is ignored here.
3642 */
3643 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3644 SPI_NO_TX | SPI_NO_RX);
3645 ugly_bits = bad_bits &
3646 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3647 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3648 if (ugly_bits) {
3649 dev_warn(&spi->dev,
3650 "setup: ignoring unsupported mode bits %x\n",
3651 ugly_bits);
3652 spi->mode &= ~ugly_bits;
3653 bad_bits &= ~ugly_bits;
3654 }
3655 if (bad_bits) {
3656 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3657 bad_bits);
3658 return -EINVAL;
3659 }
3660
3661 if (!spi->bits_per_word) {
3662 spi->bits_per_word = 8;
3663 } else {
3664 /*
3665 * Some controllers may not support the default 8 bits-per-word
3666 * so only perform the check when this is explicitly provided.
3667 */
3668 status = __spi_validate_bits_per_word(spi->controller,
3669 spi->bits_per_word);
3670 if (status)
3671 return status;
3672 }
3673
3674 if (spi->controller->max_speed_hz &&
3675 (!spi->max_speed_hz ||
3676 spi->max_speed_hz > spi->controller->max_speed_hz))
3677 spi->max_speed_hz = spi->controller->max_speed_hz;
3678
3679 mutex_lock(&spi->controller->io_mutex);
3680
3681 if (spi->controller->setup) {
3682 status = spi->controller->setup(spi);
3683 if (status) {
3684 mutex_unlock(&spi->controller->io_mutex);
3685 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3686 status);
3687 return status;
3688 }
3689 }
3690
3691 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3692 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3693 if (status < 0) {
3694 mutex_unlock(&spi->controller->io_mutex);
3695 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3696 status);
3697 return status;
3698 }
3699
3700 /*
3701 * We do not want to return positive value from pm_runtime_get,
3702 * there are many instances of devices calling spi_setup() and
3703 * checking for a non-zero return value instead of a negative
3704 * return value.
3705 */
3706 status = 0;
3707
3708 spi_set_cs(spi, false, true);
3709 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3710 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3711 } else {
3712 spi_set_cs(spi, false, true);
3713 }
3714
3715 mutex_unlock(&spi->controller->io_mutex);
3716
3717 if (spi->rt && !spi->controller->rt) {
3718 spi->controller->rt = true;
3719 spi_set_thread_rt(spi->controller);
3720 }
3721
3722 trace_spi_setup(spi, status);
3723
3724 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3725 spi->mode & SPI_MODE_X_MASK,
3726 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3727 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3728 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3729 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3730 spi->bits_per_word, spi->max_speed_hz,
3731 status);
3732
3733 return status;
3734 }
3735 EXPORT_SYMBOL_GPL(spi_setup);
3736
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)3737 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3738 struct spi_device *spi)
3739 {
3740 int delay1, delay2;
3741
3742 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3743 if (delay1 < 0)
3744 return delay1;
3745
3746 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3747 if (delay2 < 0)
3748 return delay2;
3749
3750 if (delay1 < delay2)
3751 memcpy(&xfer->word_delay, &spi->word_delay,
3752 sizeof(xfer->word_delay));
3753
3754 return 0;
3755 }
3756
__spi_validate(struct spi_device * spi,struct spi_message * message)3757 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3758 {
3759 struct spi_controller *ctlr = spi->controller;
3760 struct spi_transfer *xfer;
3761 int w_size;
3762
3763 if (list_empty(&message->transfers))
3764 return -EINVAL;
3765
3766 /*
3767 * If an SPI controller does not support toggling the CS line on each
3768 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3769 * for the CS line, we can emulate the CS-per-word hardware function by
3770 * splitting transfers into one-word transfers and ensuring that
3771 * cs_change is set for each transfer.
3772 */
3773 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3774 spi->cs_gpiod)) {
3775 size_t maxsize;
3776 int ret;
3777
3778 maxsize = (spi->bits_per_word + 7) / 8;
3779
3780 /* spi_split_transfers_maxsize() requires message->spi */
3781 message->spi = spi;
3782
3783 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3784 GFP_KERNEL);
3785 if (ret)
3786 return ret;
3787
3788 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3789 /* Don't change cs_change on the last entry in the list */
3790 if (list_is_last(&xfer->transfer_list, &message->transfers))
3791 break;
3792 xfer->cs_change = 1;
3793 }
3794 }
3795
3796 /*
3797 * Half-duplex links include original MicroWire, and ones with
3798 * only one data pin like SPI_3WIRE (switches direction) or where
3799 * either MOSI or MISO is missing. They can also be caused by
3800 * software limitations.
3801 */
3802 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3803 (spi->mode & SPI_3WIRE)) {
3804 unsigned flags = ctlr->flags;
3805
3806 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3807 if (xfer->rx_buf && xfer->tx_buf)
3808 return -EINVAL;
3809 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3810 return -EINVAL;
3811 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3812 return -EINVAL;
3813 }
3814 }
3815
3816 /*
3817 * Set transfer bits_per_word and max speed as spi device default if
3818 * it is not set for this transfer.
3819 * Set transfer tx_nbits and rx_nbits as single transfer default
3820 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3821 * Ensure transfer word_delay is at least as long as that required by
3822 * device itself.
3823 */
3824 message->frame_length = 0;
3825 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3826 xfer->effective_speed_hz = 0;
3827 message->frame_length += xfer->len;
3828 if (!xfer->bits_per_word)
3829 xfer->bits_per_word = spi->bits_per_word;
3830
3831 if (!xfer->speed_hz)
3832 xfer->speed_hz = spi->max_speed_hz;
3833
3834 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3835 xfer->speed_hz = ctlr->max_speed_hz;
3836
3837 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3838 return -EINVAL;
3839
3840 /*
3841 * SPI transfer length should be multiple of SPI word size
3842 * where SPI word size should be power-of-two multiple.
3843 */
3844 if (xfer->bits_per_word <= 8)
3845 w_size = 1;
3846 else if (xfer->bits_per_word <= 16)
3847 w_size = 2;
3848 else
3849 w_size = 4;
3850
3851 /* No partial transfers accepted */
3852 if (xfer->len % w_size)
3853 return -EINVAL;
3854
3855 if (xfer->speed_hz && ctlr->min_speed_hz &&
3856 xfer->speed_hz < ctlr->min_speed_hz)
3857 return -EINVAL;
3858
3859 if (xfer->tx_buf && !xfer->tx_nbits)
3860 xfer->tx_nbits = SPI_NBITS_SINGLE;
3861 if (xfer->rx_buf && !xfer->rx_nbits)
3862 xfer->rx_nbits = SPI_NBITS_SINGLE;
3863 /*
3864 * Check transfer tx/rx_nbits:
3865 * 1. check the value matches one of single, dual and quad
3866 * 2. check tx/rx_nbits match the mode in spi_device
3867 */
3868 if (xfer->tx_buf) {
3869 if (spi->mode & SPI_NO_TX)
3870 return -EINVAL;
3871 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3872 xfer->tx_nbits != SPI_NBITS_DUAL &&
3873 xfer->tx_nbits != SPI_NBITS_QUAD)
3874 return -EINVAL;
3875 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3876 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3877 return -EINVAL;
3878 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3879 !(spi->mode & SPI_TX_QUAD))
3880 return -EINVAL;
3881 }
3882 /* Check transfer rx_nbits */
3883 if (xfer->rx_buf) {
3884 if (spi->mode & SPI_NO_RX)
3885 return -EINVAL;
3886 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3887 xfer->rx_nbits != SPI_NBITS_DUAL &&
3888 xfer->rx_nbits != SPI_NBITS_QUAD)
3889 return -EINVAL;
3890 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3891 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3892 return -EINVAL;
3893 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3894 !(spi->mode & SPI_RX_QUAD))
3895 return -EINVAL;
3896 }
3897
3898 if (_spi_xfer_word_delay_update(xfer, spi))
3899 return -EINVAL;
3900 }
3901
3902 message->status = -EINPROGRESS;
3903
3904 return 0;
3905 }
3906
__spi_async(struct spi_device * spi,struct spi_message * message)3907 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3908 {
3909 struct spi_controller *ctlr = spi->controller;
3910 struct spi_transfer *xfer;
3911
3912 /*
3913 * Some controllers do not support doing regular SPI transfers. Return
3914 * ENOTSUPP when this is the case.
3915 */
3916 if (!ctlr->transfer)
3917 return -ENOTSUPP;
3918
3919 message->spi = spi;
3920
3921 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
3922 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
3923
3924 trace_spi_message_submit(message);
3925
3926 if (!ctlr->ptp_sts_supported) {
3927 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3928 xfer->ptp_sts_word_pre = 0;
3929 ptp_read_system_prets(xfer->ptp_sts);
3930 }
3931 }
3932
3933 return ctlr->transfer(spi, message);
3934 }
3935
3936 /**
3937 * spi_async - asynchronous SPI transfer
3938 * @spi: device with which data will be exchanged
3939 * @message: describes the data transfers, including completion callback
3940 * Context: any (irqs may be blocked, etc)
3941 *
3942 * This call may be used in_irq and other contexts which can't sleep,
3943 * as well as from task contexts which can sleep.
3944 *
3945 * The completion callback is invoked in a context which can't sleep.
3946 * Before that invocation, the value of message->status is undefined.
3947 * When the callback is issued, message->status holds either zero (to
3948 * indicate complete success) or a negative error code. After that
3949 * callback returns, the driver which issued the transfer request may
3950 * deallocate the associated memory; it's no longer in use by any SPI
3951 * core or controller driver code.
3952 *
3953 * Note that although all messages to a spi_device are handled in
3954 * FIFO order, messages may go to different devices in other orders.
3955 * Some device might be higher priority, or have various "hard" access
3956 * time requirements, for example.
3957 *
3958 * On detection of any fault during the transfer, processing of
3959 * the entire message is aborted, and the device is deselected.
3960 * Until returning from the associated message completion callback,
3961 * no other spi_message queued to that device will be processed.
3962 * (This rule applies equally to all the synchronous transfer calls,
3963 * which are wrappers around this core asynchronous primitive.)
3964 *
3965 * Return: zero on success, else a negative error code.
3966 */
spi_async(struct spi_device * spi,struct spi_message * message)3967 int spi_async(struct spi_device *spi, struct spi_message *message)
3968 {
3969 struct spi_controller *ctlr = spi->controller;
3970 int ret;
3971 unsigned long flags;
3972
3973 ret = __spi_validate(spi, message);
3974 if (ret != 0)
3975 return ret;
3976
3977 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3978
3979 if (ctlr->bus_lock_flag)
3980 ret = -EBUSY;
3981 else
3982 ret = __spi_async(spi, message);
3983
3984 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3985
3986 return ret;
3987 }
3988 EXPORT_SYMBOL_GPL(spi_async);
3989
3990 /**
3991 * spi_async_locked - version of spi_async with exclusive bus usage
3992 * @spi: device with which data will be exchanged
3993 * @message: describes the data transfers, including completion callback
3994 * Context: any (irqs may be blocked, etc)
3995 *
3996 * This call may be used in_irq and other contexts which can't sleep,
3997 * as well as from task contexts which can sleep.
3998 *
3999 * The completion callback is invoked in a context which can't sleep.
4000 * Before that invocation, the value of message->status is undefined.
4001 * When the callback is issued, message->status holds either zero (to
4002 * indicate complete success) or a negative error code. After that
4003 * callback returns, the driver which issued the transfer request may
4004 * deallocate the associated memory; it's no longer in use by any SPI
4005 * core or controller driver code.
4006 *
4007 * Note that although all messages to a spi_device are handled in
4008 * FIFO order, messages may go to different devices in other orders.
4009 * Some device might be higher priority, or have various "hard" access
4010 * time requirements, for example.
4011 *
4012 * On detection of any fault during the transfer, processing of
4013 * the entire message is aborted, and the device is deselected.
4014 * Until returning from the associated message completion callback,
4015 * no other spi_message queued to that device will be processed.
4016 * (This rule applies equally to all the synchronous transfer calls,
4017 * which are wrappers around this core asynchronous primitive.)
4018 *
4019 * Return: zero on success, else a negative error code.
4020 */
spi_async_locked(struct spi_device * spi,struct spi_message * message)4021 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
4022 {
4023 struct spi_controller *ctlr = spi->controller;
4024 int ret;
4025 unsigned long flags;
4026
4027 ret = __spi_validate(spi, message);
4028 if (ret != 0)
4029 return ret;
4030
4031 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4032
4033 ret = __spi_async(spi, message);
4034
4035 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4036
4037 return ret;
4038
4039 }
4040
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4041 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4042 {
4043 bool was_busy;
4044 int ret;
4045
4046 mutex_lock(&ctlr->io_mutex);
4047
4048 was_busy = ctlr->busy;
4049
4050 ctlr->cur_msg = msg;
4051 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4052 if (ret)
4053 goto out;
4054
4055 ctlr->cur_msg = NULL;
4056 ctlr->fallback = false;
4057
4058 if (!was_busy) {
4059 kfree(ctlr->dummy_rx);
4060 ctlr->dummy_rx = NULL;
4061 kfree(ctlr->dummy_tx);
4062 ctlr->dummy_tx = NULL;
4063 if (ctlr->unprepare_transfer_hardware &&
4064 ctlr->unprepare_transfer_hardware(ctlr))
4065 dev_err(&ctlr->dev,
4066 "failed to unprepare transfer hardware\n");
4067 spi_idle_runtime_pm(ctlr);
4068 }
4069
4070 out:
4071 mutex_unlock(&ctlr->io_mutex);
4072 }
4073
4074 /*-------------------------------------------------------------------------*/
4075
4076 /*
4077 * Utility methods for SPI protocol drivers, layered on
4078 * top of the core. Some other utility methods are defined as
4079 * inline functions.
4080 */
4081
spi_complete(void * arg)4082 static void spi_complete(void *arg)
4083 {
4084 complete(arg);
4085 }
4086
__spi_sync(struct spi_device * spi,struct spi_message * message)4087 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4088 {
4089 DECLARE_COMPLETION_ONSTACK(done);
4090 int status;
4091 struct spi_controller *ctlr = spi->controller;
4092
4093 status = __spi_validate(spi, message);
4094 if (status != 0)
4095 return status;
4096
4097 message->spi = spi;
4098
4099 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4100 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4101
4102 /*
4103 * Checking queue_empty here only guarantees async/sync message
4104 * ordering when coming from the same context. It does not need to
4105 * guard against reentrancy from a different context. The io_mutex
4106 * will catch those cases.
4107 */
4108 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4109 message->actual_length = 0;
4110 message->status = -EINPROGRESS;
4111
4112 trace_spi_message_submit(message);
4113
4114 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4115 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4116
4117 __spi_transfer_message_noqueue(ctlr, message);
4118
4119 return message->status;
4120 }
4121
4122 /*
4123 * There are messages in the async queue that could have originated
4124 * from the same context, so we need to preserve ordering.
4125 * Therefor we send the message to the async queue and wait until they
4126 * are completed.
4127 */
4128 message->complete = spi_complete;
4129 message->context = &done;
4130 status = spi_async_locked(spi, message);
4131 if (status == 0) {
4132 wait_for_completion(&done);
4133 status = message->status;
4134 }
4135 message->context = NULL;
4136
4137 return status;
4138 }
4139
4140 /**
4141 * spi_sync - blocking/synchronous SPI data transfers
4142 * @spi: device with which data will be exchanged
4143 * @message: describes the data transfers
4144 * Context: can sleep
4145 *
4146 * This call may only be used from a context that may sleep. The sleep
4147 * is non-interruptible, and has no timeout. Low-overhead controller
4148 * drivers may DMA directly into and out of the message buffers.
4149 *
4150 * Note that the SPI device's chip select is active during the message,
4151 * and then is normally disabled between messages. Drivers for some
4152 * frequently-used devices may want to minimize costs of selecting a chip,
4153 * by leaving it selected in anticipation that the next message will go
4154 * to the same chip. (That may increase power usage.)
4155 *
4156 * Also, the caller is guaranteeing that the memory associated with the
4157 * message will not be freed before this call returns.
4158 *
4159 * Return: zero on success, else a negative error code.
4160 */
spi_sync(struct spi_device * spi,struct spi_message * message)4161 int spi_sync(struct spi_device *spi, struct spi_message *message)
4162 {
4163 int ret;
4164
4165 mutex_lock(&spi->controller->bus_lock_mutex);
4166 ret = __spi_sync(spi, message);
4167 mutex_unlock(&spi->controller->bus_lock_mutex);
4168
4169 return ret;
4170 }
4171 EXPORT_SYMBOL_GPL(spi_sync);
4172
4173 /**
4174 * spi_sync_locked - version of spi_sync with exclusive bus usage
4175 * @spi: device with which data will be exchanged
4176 * @message: describes the data transfers
4177 * Context: can sleep
4178 *
4179 * This call may only be used from a context that may sleep. The sleep
4180 * is non-interruptible, and has no timeout. Low-overhead controller
4181 * drivers may DMA directly into and out of the message buffers.
4182 *
4183 * This call should be used by drivers that require exclusive access to the
4184 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4185 * be released by a spi_bus_unlock call when the exclusive access is over.
4186 *
4187 * Return: zero on success, else a negative error code.
4188 */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4189 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4190 {
4191 return __spi_sync(spi, message);
4192 }
4193 EXPORT_SYMBOL_GPL(spi_sync_locked);
4194
4195 /**
4196 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4197 * @ctlr: SPI bus master that should be locked for exclusive bus access
4198 * Context: can sleep
4199 *
4200 * This call may only be used from a context that may sleep. The sleep
4201 * is non-interruptible, and has no timeout.
4202 *
4203 * This call should be used by drivers that require exclusive access to the
4204 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4205 * exclusive access is over. Data transfer must be done by spi_sync_locked
4206 * and spi_async_locked calls when the SPI bus lock is held.
4207 *
4208 * Return: always zero.
4209 */
spi_bus_lock(struct spi_controller * ctlr)4210 int spi_bus_lock(struct spi_controller *ctlr)
4211 {
4212 unsigned long flags;
4213
4214 mutex_lock(&ctlr->bus_lock_mutex);
4215
4216 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4217 ctlr->bus_lock_flag = 1;
4218 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4219
4220 /* Mutex remains locked until spi_bus_unlock() is called */
4221
4222 return 0;
4223 }
4224 EXPORT_SYMBOL_GPL(spi_bus_lock);
4225
4226 /**
4227 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4228 * @ctlr: SPI bus master that was locked for exclusive bus access
4229 * Context: can sleep
4230 *
4231 * This call may only be used from a context that may sleep. The sleep
4232 * is non-interruptible, and has no timeout.
4233 *
4234 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4235 * call.
4236 *
4237 * Return: always zero.
4238 */
spi_bus_unlock(struct spi_controller * ctlr)4239 int spi_bus_unlock(struct spi_controller *ctlr)
4240 {
4241 ctlr->bus_lock_flag = 0;
4242
4243 mutex_unlock(&ctlr->bus_lock_mutex);
4244
4245 return 0;
4246 }
4247 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4248
4249 /* Portable code must never pass more than 32 bytes */
4250 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4251
4252 static u8 *buf;
4253
4254 /**
4255 * spi_write_then_read - SPI synchronous write followed by read
4256 * @spi: device with which data will be exchanged
4257 * @txbuf: data to be written (need not be dma-safe)
4258 * @n_tx: size of txbuf, in bytes
4259 * @rxbuf: buffer into which data will be read (need not be dma-safe)
4260 * @n_rx: size of rxbuf, in bytes
4261 * Context: can sleep
4262 *
4263 * This performs a half duplex MicroWire style transaction with the
4264 * device, sending txbuf and then reading rxbuf. The return value
4265 * is zero for success, else a negative errno status code.
4266 * This call may only be used from a context that may sleep.
4267 *
4268 * Parameters to this routine are always copied using a small buffer.
4269 * Performance-sensitive or bulk transfer code should instead use
4270 * spi_{async,sync}() calls with dma-safe buffers.
4271 *
4272 * Return: zero on success, else a negative error code.
4273 */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4274 int spi_write_then_read(struct spi_device *spi,
4275 const void *txbuf, unsigned n_tx,
4276 void *rxbuf, unsigned n_rx)
4277 {
4278 static DEFINE_MUTEX(lock);
4279
4280 int status;
4281 struct spi_message message;
4282 struct spi_transfer x[2];
4283 u8 *local_buf;
4284
4285 /*
4286 * Use preallocated DMA-safe buffer if we can. We can't avoid
4287 * copying here, (as a pure convenience thing), but we can
4288 * keep heap costs out of the hot path unless someone else is
4289 * using the pre-allocated buffer or the transfer is too large.
4290 */
4291 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4292 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4293 GFP_KERNEL | GFP_DMA);
4294 if (!local_buf)
4295 return -ENOMEM;
4296 } else {
4297 local_buf = buf;
4298 }
4299
4300 spi_message_init(&message);
4301 memset(x, 0, sizeof(x));
4302 if (n_tx) {
4303 x[0].len = n_tx;
4304 spi_message_add_tail(&x[0], &message);
4305 }
4306 if (n_rx) {
4307 x[1].len = n_rx;
4308 spi_message_add_tail(&x[1], &message);
4309 }
4310
4311 memcpy(local_buf, txbuf, n_tx);
4312 x[0].tx_buf = local_buf;
4313 x[1].rx_buf = local_buf + n_tx;
4314
4315 /* Do the i/o */
4316 status = spi_sync(spi, &message);
4317 if (status == 0)
4318 memcpy(rxbuf, x[1].rx_buf, n_rx);
4319
4320 if (x[0].tx_buf == buf)
4321 mutex_unlock(&lock);
4322 else
4323 kfree(local_buf);
4324
4325 return status;
4326 }
4327 EXPORT_SYMBOL_GPL(spi_write_then_read);
4328
4329 /*-------------------------------------------------------------------------*/
4330
4331 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4332 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4333 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4334 {
4335 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4336
4337 return dev ? to_spi_device(dev) : NULL;
4338 }
4339
4340 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4341 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4342 {
4343 struct device *dev;
4344
4345 dev = class_find_device_by_of_node(&spi_master_class, node);
4346 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4347 dev = class_find_device_by_of_node(&spi_slave_class, node);
4348 if (!dev)
4349 return NULL;
4350
4351 /* Reference got in class_find_device */
4352 return container_of(dev, struct spi_controller, dev);
4353 }
4354
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4355 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4356 void *arg)
4357 {
4358 struct of_reconfig_data *rd = arg;
4359 struct spi_controller *ctlr;
4360 struct spi_device *spi;
4361
4362 switch (of_reconfig_get_state_change(action, arg)) {
4363 case OF_RECONFIG_CHANGE_ADD:
4364 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4365 if (ctlr == NULL)
4366 return NOTIFY_OK; /* Not for us */
4367
4368 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4369 put_device(&ctlr->dev);
4370 return NOTIFY_OK;
4371 }
4372
4373 spi = of_register_spi_device(ctlr, rd->dn);
4374 put_device(&ctlr->dev);
4375
4376 if (IS_ERR(spi)) {
4377 pr_err("%s: failed to create for '%pOF'\n",
4378 __func__, rd->dn);
4379 of_node_clear_flag(rd->dn, OF_POPULATED);
4380 return notifier_from_errno(PTR_ERR(spi));
4381 }
4382 break;
4383
4384 case OF_RECONFIG_CHANGE_REMOVE:
4385 /* Already depopulated? */
4386 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4387 return NOTIFY_OK;
4388
4389 /* Find our device by node */
4390 spi = of_find_spi_device_by_node(rd->dn);
4391 if (spi == NULL)
4392 return NOTIFY_OK; /* No? not meant for us */
4393
4394 /* Unregister takes one ref away */
4395 spi_unregister_device(spi);
4396
4397 /* And put the reference of the find */
4398 put_device(&spi->dev);
4399 break;
4400 }
4401
4402 return NOTIFY_OK;
4403 }
4404
4405 static struct notifier_block spi_of_notifier = {
4406 .notifier_call = of_spi_notify,
4407 };
4408 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4409 extern struct notifier_block spi_of_notifier;
4410 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4411
4412 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4413 static int spi_acpi_controller_match(struct device *dev, const void *data)
4414 {
4415 return ACPI_COMPANION(dev->parent) == data;
4416 }
4417
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4418 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4419 {
4420 struct device *dev;
4421
4422 dev = class_find_device(&spi_master_class, NULL, adev,
4423 spi_acpi_controller_match);
4424 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4425 dev = class_find_device(&spi_slave_class, NULL, adev,
4426 spi_acpi_controller_match);
4427 if (!dev)
4428 return NULL;
4429
4430 return container_of(dev, struct spi_controller, dev);
4431 }
4432
acpi_spi_find_device_by_adev(struct acpi_device * adev)4433 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4434 {
4435 struct device *dev;
4436
4437 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4438 return to_spi_device(dev);
4439 }
4440
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4441 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4442 void *arg)
4443 {
4444 struct acpi_device *adev = arg;
4445 struct spi_controller *ctlr;
4446 struct spi_device *spi;
4447
4448 switch (value) {
4449 case ACPI_RECONFIG_DEVICE_ADD:
4450 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4451 if (!ctlr)
4452 break;
4453
4454 acpi_register_spi_device(ctlr, adev);
4455 put_device(&ctlr->dev);
4456 break;
4457 case ACPI_RECONFIG_DEVICE_REMOVE:
4458 if (!acpi_device_enumerated(adev))
4459 break;
4460
4461 spi = acpi_spi_find_device_by_adev(adev);
4462 if (!spi)
4463 break;
4464
4465 spi_unregister_device(spi);
4466 put_device(&spi->dev);
4467 break;
4468 }
4469
4470 return NOTIFY_OK;
4471 }
4472
4473 static struct notifier_block spi_acpi_notifier = {
4474 .notifier_call = acpi_spi_notify,
4475 };
4476 #else
4477 extern struct notifier_block spi_acpi_notifier;
4478 #endif
4479
spi_init(void)4480 static int __init spi_init(void)
4481 {
4482 int status;
4483
4484 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4485 if (!buf) {
4486 status = -ENOMEM;
4487 goto err0;
4488 }
4489
4490 status = bus_register(&spi_bus_type);
4491 if (status < 0)
4492 goto err1;
4493
4494 status = class_register(&spi_master_class);
4495 if (status < 0)
4496 goto err2;
4497
4498 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4499 status = class_register(&spi_slave_class);
4500 if (status < 0)
4501 goto err3;
4502 }
4503
4504 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4505 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4506 if (IS_ENABLED(CONFIG_ACPI))
4507 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4508
4509 return 0;
4510
4511 err3:
4512 class_unregister(&spi_master_class);
4513 err2:
4514 bus_unregister(&spi_bus_type);
4515 err1:
4516 kfree(buf);
4517 buf = NULL;
4518 err0:
4519 return status;
4520 }
4521
4522 /*
4523 * A board_info is normally registered in arch_initcall(),
4524 * but even essential drivers wait till later.
4525 *
4526 * REVISIT only boardinfo really needs static linking. The rest (device and
4527 * driver registration) _could_ be dynamically linked (modular) ... Costs
4528 * include needing to have boardinfo data structures be much more public.
4529 */
4530 postcore_initcall(spi_init);
4531