4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/spi.h>
44 static void spidev_release(struct device *dev)
46 struct spi_device *spi = to_spi_device(dev);
48 /* spi masters may cleanup for released devices */
49 if (spi->master->cleanup)
50 spi->master->cleanup(spi);
52 spi_master_put(spi->master);
57 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 const struct spi_device *spi = to_spi_device(dev);
62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 static DEVICE_ATTR_RO(modalias);
70 #define SPI_STATISTICS_ATTRS(field, file) \
71 static ssize_t spi_master_##field##_show(struct device *dev, \
72 struct device_attribute *attr, \
75 struct spi_master *master = container_of(dev, \
76 struct spi_master, dev); \
77 return spi_statistics_##field##_show(&master->statistics, buf); \
79 static struct device_attribute dev_attr_spi_master_##field = { \
80 .attr = { .name = file, .mode = S_IRUGO }, \
81 .show = spi_master_##field##_show, \
83 static ssize_t spi_device_##field##_show(struct device *dev, \
84 struct device_attribute *attr, \
87 struct spi_device *spi = to_spi_device(dev); \
88 return spi_statistics_##field##_show(&spi->statistics, buf); \
90 static struct device_attribute dev_attr_spi_device_##field = { \
91 .attr = { .name = file, .mode = S_IRUGO }, \
92 .show = spi_device_##field##_show, \
95 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
96 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
99 unsigned long flags; \
101 spin_lock_irqsave(&stat->lock, flags); \
102 len = sprintf(buf, format_string, stat->field); \
103 spin_unlock_irqrestore(&stat->lock, flags); \
106 SPI_STATISTICS_ATTRS(name, file)
108 #define SPI_STATISTICS_SHOW(field, format_string) \
109 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
110 field, format_string)
112 SPI_STATISTICS_SHOW(messages, "%lu");
113 SPI_STATISTICS_SHOW(transfers, "%lu");
114 SPI_STATISTICS_SHOW(errors, "%lu");
115 SPI_STATISTICS_SHOW(timedout, "%lu");
117 SPI_STATISTICS_SHOW(spi_sync, "%lu");
118 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
119 SPI_STATISTICS_SHOW(spi_async, "%lu");
121 SPI_STATISTICS_SHOW(bytes, "%llu");
122 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
123 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
125 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
126 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
127 "transfer_bytes_histo_" number, \
128 transfer_bytes_histo[index], "%lu")
129 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
147 static struct attribute *spi_dev_attrs[] = {
148 &dev_attr_modalias.attr,
152 static const struct attribute_group spi_dev_group = {
153 .attrs = spi_dev_attrs,
156 static struct attribute *spi_device_statistics_attrs[] = {
157 &dev_attr_spi_device_messages.attr,
158 &dev_attr_spi_device_transfers.attr,
159 &dev_attr_spi_device_errors.attr,
160 &dev_attr_spi_device_timedout.attr,
161 &dev_attr_spi_device_spi_sync.attr,
162 &dev_attr_spi_device_spi_sync_immediate.attr,
163 &dev_attr_spi_device_spi_async.attr,
164 &dev_attr_spi_device_bytes.attr,
165 &dev_attr_spi_device_bytes_rx.attr,
166 &dev_attr_spi_device_bytes_tx.attr,
167 &dev_attr_spi_device_transfer_bytes_histo0.attr,
168 &dev_attr_spi_device_transfer_bytes_histo1.attr,
169 &dev_attr_spi_device_transfer_bytes_histo2.attr,
170 &dev_attr_spi_device_transfer_bytes_histo3.attr,
171 &dev_attr_spi_device_transfer_bytes_histo4.attr,
172 &dev_attr_spi_device_transfer_bytes_histo5.attr,
173 &dev_attr_spi_device_transfer_bytes_histo6.attr,
174 &dev_attr_spi_device_transfer_bytes_histo7.attr,
175 &dev_attr_spi_device_transfer_bytes_histo8.attr,
176 &dev_attr_spi_device_transfer_bytes_histo9.attr,
177 &dev_attr_spi_device_transfer_bytes_histo10.attr,
178 &dev_attr_spi_device_transfer_bytes_histo11.attr,
179 &dev_attr_spi_device_transfer_bytes_histo12.attr,
180 &dev_attr_spi_device_transfer_bytes_histo13.attr,
181 &dev_attr_spi_device_transfer_bytes_histo14.attr,
182 &dev_attr_spi_device_transfer_bytes_histo15.attr,
183 &dev_attr_spi_device_transfer_bytes_histo16.attr,
187 static const struct attribute_group spi_device_statistics_group = {
188 .name = "statistics",
189 .attrs = spi_device_statistics_attrs,
192 static const struct attribute_group *spi_dev_groups[] = {
194 &spi_device_statistics_group,
198 static struct attribute *spi_master_statistics_attrs[] = {
199 &dev_attr_spi_master_messages.attr,
200 &dev_attr_spi_master_transfers.attr,
201 &dev_attr_spi_master_errors.attr,
202 &dev_attr_spi_master_timedout.attr,
203 &dev_attr_spi_master_spi_sync.attr,
204 &dev_attr_spi_master_spi_sync_immediate.attr,
205 &dev_attr_spi_master_spi_async.attr,
206 &dev_attr_spi_master_bytes.attr,
207 &dev_attr_spi_master_bytes_rx.attr,
208 &dev_attr_spi_master_bytes_tx.attr,
209 &dev_attr_spi_master_transfer_bytes_histo0.attr,
210 &dev_attr_spi_master_transfer_bytes_histo1.attr,
211 &dev_attr_spi_master_transfer_bytes_histo2.attr,
212 &dev_attr_spi_master_transfer_bytes_histo3.attr,
213 &dev_attr_spi_master_transfer_bytes_histo4.attr,
214 &dev_attr_spi_master_transfer_bytes_histo5.attr,
215 &dev_attr_spi_master_transfer_bytes_histo6.attr,
216 &dev_attr_spi_master_transfer_bytes_histo7.attr,
217 &dev_attr_spi_master_transfer_bytes_histo8.attr,
218 &dev_attr_spi_master_transfer_bytes_histo9.attr,
219 &dev_attr_spi_master_transfer_bytes_histo10.attr,
220 &dev_attr_spi_master_transfer_bytes_histo11.attr,
221 &dev_attr_spi_master_transfer_bytes_histo12.attr,
222 &dev_attr_spi_master_transfer_bytes_histo13.attr,
223 &dev_attr_spi_master_transfer_bytes_histo14.attr,
224 &dev_attr_spi_master_transfer_bytes_histo15.attr,
225 &dev_attr_spi_master_transfer_bytes_histo16.attr,
229 static const struct attribute_group spi_master_statistics_group = {
230 .name = "statistics",
231 .attrs = spi_master_statistics_attrs,
234 static const struct attribute_group *spi_master_groups[] = {
235 &spi_master_statistics_group,
239 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
240 struct spi_transfer *xfer,
241 struct spi_master *master)
244 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
249 spin_lock_irqsave(&stats->lock, flags);
252 stats->transfer_bytes_histo[l2len]++;
254 stats->bytes += xfer->len;
255 if ((xfer->tx_buf) &&
256 (xfer->tx_buf != master->dummy_tx))
257 stats->bytes_tx += xfer->len;
258 if ((xfer->rx_buf) &&
259 (xfer->rx_buf != master->dummy_rx))
260 stats->bytes_rx += xfer->len;
262 spin_unlock_irqrestore(&stats->lock, flags);
264 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
266 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
267 * and the sysfs version makes coldplug work too.
270 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
271 const struct spi_device *sdev)
273 while (id->name[0]) {
274 if (!strcmp(sdev->modalias, id->name))
281 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
283 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
285 return spi_match_id(sdrv->id_table, sdev);
287 EXPORT_SYMBOL_GPL(spi_get_device_id);
289 static int spi_match_device(struct device *dev, struct device_driver *drv)
291 const struct spi_device *spi = to_spi_device(dev);
292 const struct spi_driver *sdrv = to_spi_driver(drv);
294 /* Attempt an OF style match */
295 if (of_driver_match_device(dev, drv))
299 if (acpi_driver_match_device(dev, drv))
303 return !!spi_match_id(sdrv->id_table, spi);
305 return strcmp(spi->modalias, drv->name) == 0;
308 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
310 const struct spi_device *spi = to_spi_device(dev);
313 rc = acpi_device_uevent_modalias(dev, env);
317 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
321 struct bus_type spi_bus_type = {
323 .dev_groups = spi_dev_groups,
324 .match = spi_match_device,
325 .uevent = spi_uevent,
327 EXPORT_SYMBOL_GPL(spi_bus_type);
330 static int spi_drv_probe(struct device *dev)
332 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
333 struct spi_device *spi = to_spi_device(dev);
336 ret = of_clk_set_defaults(dev->of_node, false);
341 spi->irq = of_irq_get(dev->of_node, 0);
342 if (spi->irq == -EPROBE_DEFER)
343 return -EPROBE_DEFER;
348 ret = dev_pm_domain_attach(dev, true);
349 if (ret != -EPROBE_DEFER) {
350 ret = sdrv->probe(spi);
352 dev_pm_domain_detach(dev, true);
358 static int spi_drv_remove(struct device *dev)
360 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
363 ret = sdrv->remove(to_spi_device(dev));
364 dev_pm_domain_detach(dev, true);
369 static void spi_drv_shutdown(struct device *dev)
371 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
373 sdrv->shutdown(to_spi_device(dev));
377 * __spi_register_driver - register a SPI driver
378 * @owner: owner module of the driver to register
379 * @sdrv: the driver to register
382 * Return: zero on success, else a negative error code.
384 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
386 sdrv->driver.owner = owner;
387 sdrv->driver.bus = &spi_bus_type;
389 sdrv->driver.probe = spi_drv_probe;
391 sdrv->driver.remove = spi_drv_remove;
393 sdrv->driver.shutdown = spi_drv_shutdown;
394 return driver_register(&sdrv->driver);
396 EXPORT_SYMBOL_GPL(__spi_register_driver);
398 /*-------------------------------------------------------------------------*/
400 /* SPI devices should normally not be created by SPI device drivers; that
401 * would make them board-specific. Similarly with SPI master drivers.
402 * Device registration normally goes into like arch/.../mach.../board-YYY.c
403 * with other readonly (flashable) information about mainboard devices.
407 struct list_head list;
408 struct spi_board_info board_info;
411 static LIST_HEAD(board_list);
412 static LIST_HEAD(spi_master_list);
415 * Used to protect add/del opertion for board_info list and
416 * spi_master list, and their matching process
418 static DEFINE_MUTEX(board_lock);
421 * spi_alloc_device - Allocate a new SPI device
422 * @master: Controller to which device is connected
425 * Allows a driver to allocate and initialize a spi_device without
426 * registering it immediately. This allows a driver to directly
427 * fill the spi_device with device parameters before calling
428 * spi_add_device() on it.
430 * Caller is responsible to call spi_add_device() on the returned
431 * spi_device structure to add it to the SPI master. If the caller
432 * needs to discard the spi_device without adding it, then it should
433 * call spi_dev_put() on it.
435 * Return: a pointer to the new device, or NULL.
437 struct spi_device *spi_alloc_device(struct spi_master *master)
439 struct spi_device *spi;
441 if (!spi_master_get(master))
444 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
446 spi_master_put(master);
450 spi->master = master;
451 spi->dev.parent = &master->dev;
452 spi->dev.bus = &spi_bus_type;
453 spi->dev.release = spidev_release;
454 spi->cs_gpio = -ENOENT;
456 spin_lock_init(&spi->statistics.lock);
458 device_initialize(&spi->dev);
461 EXPORT_SYMBOL_GPL(spi_alloc_device);
463 static void spi_dev_set_name(struct spi_device *spi)
465 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
468 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
472 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
476 static int spi_dev_check(struct device *dev, void *data)
478 struct spi_device *spi = to_spi_device(dev);
479 struct spi_device *new_spi = data;
481 if (spi->master == new_spi->master &&
482 spi->chip_select == new_spi->chip_select)
488 * spi_add_device - Add spi_device allocated with spi_alloc_device
489 * @spi: spi_device to register
491 * Companion function to spi_alloc_device. Devices allocated with
492 * spi_alloc_device can be added onto the spi bus with this function.
494 * Return: 0 on success; negative errno on failure
496 int spi_add_device(struct spi_device *spi)
498 static DEFINE_MUTEX(spi_add_lock);
499 struct spi_master *master = spi->master;
500 struct device *dev = master->dev.parent;
503 /* Chipselects are numbered 0..max; validate. */
504 if (spi->chip_select >= master->num_chipselect) {
505 dev_err(dev, "cs%d >= max %d\n",
507 master->num_chipselect);
511 /* Set the bus ID string */
512 spi_dev_set_name(spi);
514 /* We need to make sure there's no other device with this
515 * chipselect **BEFORE** we call setup(), else we'll trash
516 * its configuration. Lock against concurrent add() calls.
518 mutex_lock(&spi_add_lock);
520 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
522 dev_err(dev, "chipselect %d already in use\n",
527 if (master->cs_gpios)
528 spi->cs_gpio = master->cs_gpios[spi->chip_select];
530 /* Drivers may modify this initial i/o setup, but will
531 * normally rely on the device being setup. Devices
532 * using SPI_CS_HIGH can't coexist well otherwise...
534 status = spi_setup(spi);
536 dev_err(dev, "can't setup %s, status %d\n",
537 dev_name(&spi->dev), status);
541 /* Device may be bound to an active driver when this returns */
542 status = device_add(&spi->dev);
544 dev_err(dev, "can't add %s, status %d\n",
545 dev_name(&spi->dev), status);
547 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
550 mutex_unlock(&spi_add_lock);
553 EXPORT_SYMBOL_GPL(spi_add_device);
556 * spi_new_device - instantiate one new SPI device
557 * @master: Controller to which device is connected
558 * @chip: Describes the SPI device
561 * On typical mainboards, this is purely internal; and it's not needed
562 * after board init creates the hard-wired devices. Some development
563 * platforms may not be able to use spi_register_board_info though, and
564 * this is exported so that for example a USB or parport based adapter
565 * driver could add devices (which it would learn about out-of-band).
567 * Return: the new device, or NULL.
569 struct spi_device *spi_new_device(struct spi_master *master,
570 struct spi_board_info *chip)
572 struct spi_device *proxy;
575 /* NOTE: caller did any chip->bus_num checks necessary.
577 * Also, unless we change the return value convention to use
578 * error-or-pointer (not NULL-or-pointer), troubleshootability
579 * suggests syslogged diagnostics are best here (ugh).
582 proxy = spi_alloc_device(master);
586 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
588 proxy->chip_select = chip->chip_select;
589 proxy->max_speed_hz = chip->max_speed_hz;
590 proxy->mode = chip->mode;
591 proxy->irq = chip->irq;
592 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
593 proxy->dev.platform_data = (void *) chip->platform_data;
594 proxy->controller_data = chip->controller_data;
595 proxy->controller_state = NULL;
597 status = spi_add_device(proxy);
605 EXPORT_SYMBOL_GPL(spi_new_device);
608 * spi_unregister_device - unregister a single SPI device
609 * @spi: spi_device to unregister
611 * Start making the passed SPI device vanish. Normally this would be handled
612 * by spi_unregister_master().
614 void spi_unregister_device(struct spi_device *spi)
619 if (spi->dev.of_node)
620 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
621 device_unregister(&spi->dev);
623 EXPORT_SYMBOL_GPL(spi_unregister_device);
625 static void spi_match_master_to_boardinfo(struct spi_master *master,
626 struct spi_board_info *bi)
628 struct spi_device *dev;
630 if (master->bus_num != bi->bus_num)
633 dev = spi_new_device(master, bi);
635 dev_err(master->dev.parent, "can't create new device for %s\n",
640 * spi_register_board_info - register SPI devices for a given board
641 * @info: array of chip descriptors
642 * @n: how many descriptors are provided
645 * Board-specific early init code calls this (probably during arch_initcall)
646 * with segments of the SPI device table. Any device nodes are created later,
647 * after the relevant parent SPI controller (bus_num) is defined. We keep
648 * this table of devices forever, so that reloading a controller driver will
649 * not make Linux forget about these hard-wired devices.
651 * Other code can also call this, e.g. a particular add-on board might provide
652 * SPI devices through its expansion connector, so code initializing that board
653 * would naturally declare its SPI devices.
655 * The board info passed can safely be __initdata ... but be careful of
656 * any embedded pointers (platform_data, etc), they're copied as-is.
658 * Return: zero on success, else a negative error code.
660 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
662 struct boardinfo *bi;
668 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
672 for (i = 0; i < n; i++, bi++, info++) {
673 struct spi_master *master;
675 memcpy(&bi->board_info, info, sizeof(*info));
676 mutex_lock(&board_lock);
677 list_add_tail(&bi->list, &board_list);
678 list_for_each_entry(master, &spi_master_list, list)
679 spi_match_master_to_boardinfo(master, &bi->board_info);
680 mutex_unlock(&board_lock);
686 /*-------------------------------------------------------------------------*/
688 static void spi_set_cs(struct spi_device *spi, bool enable)
690 if (spi->mode & SPI_CS_HIGH)
693 if (gpio_is_valid(spi->cs_gpio))
694 gpio_set_value(spi->cs_gpio, !enable);
695 else if (spi->master->set_cs)
696 spi->master->set_cs(spi, !enable);
699 #ifdef CONFIG_HAS_DMA
700 static int spi_map_buf(struct spi_master *master, struct device *dev,
701 struct sg_table *sgt, void *buf, size_t len,
702 enum dma_data_direction dir)
704 const bool vmalloced_buf = is_vmalloc_addr(buf);
705 unsigned int max_seg_size = dma_get_max_seg_size(dev);
708 struct page *vm_page;
714 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
715 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
717 desc_len = min_t(int, max_seg_size, master->max_dma_len);
718 sgs = DIV_ROUND_UP(len, desc_len);
721 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
725 for (i = 0; i < sgs; i++) {
729 len, desc_len - offset_in_page(buf));
730 vm_page = vmalloc_to_page(buf);
735 sg_set_page(&sgt->sgl[i], vm_page,
736 min, offset_in_page(buf));
738 min = min_t(size_t, len, desc_len);
740 sg_set_buf(&sgt->sgl[i], sg_buf, min);
747 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
760 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
761 struct sg_table *sgt, enum dma_data_direction dir)
763 if (sgt->orig_nents) {
764 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
769 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
771 struct device *tx_dev, *rx_dev;
772 struct spi_transfer *xfer;
775 if (!master->can_dma)
779 tx_dev = master->dma_tx->device->dev;
781 tx_dev = &master->dev;
784 rx_dev = master->dma_rx->device->dev;
786 rx_dev = &master->dev;
788 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
789 if (!master->can_dma(master, msg->spi, xfer))
792 if (xfer->tx_buf != NULL) {
793 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
794 (void *)xfer->tx_buf, xfer->len,
800 if (xfer->rx_buf != NULL) {
801 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
802 xfer->rx_buf, xfer->len,
805 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
812 master->cur_msg_mapped = true;
817 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
819 struct spi_transfer *xfer;
820 struct device *tx_dev, *rx_dev;
822 if (!master->cur_msg_mapped || !master->can_dma)
826 tx_dev = master->dma_tx->device->dev;
828 tx_dev = &master->dev;
831 rx_dev = master->dma_rx->device->dev;
833 rx_dev = &master->dev;
835 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
836 if (!master->can_dma(master, msg->spi, xfer))
839 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
840 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
845 #else /* !CONFIG_HAS_DMA */
846 static inline int __spi_map_msg(struct spi_master *master,
847 struct spi_message *msg)
852 static inline int __spi_unmap_msg(struct spi_master *master,
853 struct spi_message *msg)
857 #endif /* !CONFIG_HAS_DMA */
859 static inline int spi_unmap_msg(struct spi_master *master,
860 struct spi_message *msg)
862 struct spi_transfer *xfer;
864 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
866 * Restore the original value of tx_buf or rx_buf if they are
869 if (xfer->tx_buf == master->dummy_tx)
871 if (xfer->rx_buf == master->dummy_rx)
875 return __spi_unmap_msg(master, msg);
878 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
880 struct spi_transfer *xfer;
882 unsigned int max_tx, max_rx;
884 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
888 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
889 if ((master->flags & SPI_MASTER_MUST_TX) &&
891 max_tx = max(xfer->len, max_tx);
892 if ((master->flags & SPI_MASTER_MUST_RX) &&
894 max_rx = max(xfer->len, max_rx);
898 tmp = krealloc(master->dummy_tx, max_tx,
899 GFP_KERNEL | GFP_DMA);
902 master->dummy_tx = tmp;
903 memset(tmp, 0, max_tx);
907 tmp = krealloc(master->dummy_rx, max_rx,
908 GFP_KERNEL | GFP_DMA);
911 master->dummy_rx = tmp;
914 if (max_tx || max_rx) {
915 list_for_each_entry(xfer, &msg->transfers,
918 xfer->tx_buf = master->dummy_tx;
920 xfer->rx_buf = master->dummy_rx;
925 return __spi_map_msg(master, msg);
929 * spi_transfer_one_message - Default implementation of transfer_one_message()
931 * This is a standard implementation of transfer_one_message() for
932 * drivers which impelment a transfer_one() operation. It provides
933 * standard handling of delays and chip select management.
935 static int spi_transfer_one_message(struct spi_master *master,
936 struct spi_message *msg)
938 struct spi_transfer *xfer;
939 bool keep_cs = false;
941 unsigned long ms = 1;
942 struct spi_statistics *statm = &master->statistics;
943 struct spi_statistics *stats = &msg->spi->statistics;
945 spi_set_cs(msg->spi, true);
947 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
948 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
950 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
951 trace_spi_transfer_start(msg, xfer);
953 spi_statistics_add_transfer_stats(statm, xfer, master);
954 spi_statistics_add_transfer_stats(stats, xfer, master);
956 if (xfer->tx_buf || xfer->rx_buf) {
957 reinit_completion(&master->xfer_completion);
959 ret = master->transfer_one(master, msg->spi, xfer);
961 SPI_STATISTICS_INCREMENT_FIELD(statm,
963 SPI_STATISTICS_INCREMENT_FIELD(stats,
965 dev_err(&msg->spi->dev,
966 "SPI transfer failed: %d\n", ret);
972 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
973 ms += ms + 100; /* some tolerance */
975 ms = wait_for_completion_timeout(&master->xfer_completion,
976 msecs_to_jiffies(ms));
980 SPI_STATISTICS_INCREMENT_FIELD(statm,
982 SPI_STATISTICS_INCREMENT_FIELD(stats,
984 dev_err(&msg->spi->dev,
985 "SPI transfer timed out\n");
986 msg->status = -ETIMEDOUT;
990 dev_err(&msg->spi->dev,
991 "Bufferless transfer has length %u\n",
995 trace_spi_transfer_stop(msg, xfer);
997 if (msg->status != -EINPROGRESS)
1000 if (xfer->delay_usecs)
1001 udelay(xfer->delay_usecs);
1003 if (xfer->cs_change) {
1004 if (list_is_last(&xfer->transfer_list,
1008 spi_set_cs(msg->spi, false);
1010 spi_set_cs(msg->spi, true);
1014 msg->actual_length += xfer->len;
1018 if (ret != 0 || !keep_cs)
1019 spi_set_cs(msg->spi, false);
1021 if (msg->status == -EINPROGRESS)
1024 if (msg->status && master->handle_err)
1025 master->handle_err(master, msg);
1027 spi_finalize_current_message(master);
1033 * spi_finalize_current_transfer - report completion of a transfer
1034 * @master: the master reporting completion
1036 * Called by SPI drivers using the core transfer_one_message()
1037 * implementation to notify it that the current interrupt driven
1038 * transfer has finished and the next one may be scheduled.
1040 void spi_finalize_current_transfer(struct spi_master *master)
1042 complete(&master->xfer_completion);
1044 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1047 * __spi_pump_messages - function which processes spi message queue
1048 * @master: master to process queue for
1049 * @in_kthread: true if we are in the context of the message pump thread
1050 * @bus_locked: true if the bus mutex is held when calling this function
1052 * This function checks if there is any spi message in the queue that
1053 * needs processing and if so call out to the driver to initialize hardware
1054 * and transfer each message.
1056 * Note that it is called both from the kthread itself and also from
1057 * inside spi_sync(); the queue extraction handling at the top of the
1058 * function should deal with this safely.
1060 static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
1063 unsigned long flags;
1064 bool was_busy = false;
1068 spin_lock_irqsave(&master->queue_lock, flags);
1070 /* Make sure we are not already running a message */
1071 if (master->cur_msg) {
1072 spin_unlock_irqrestore(&master->queue_lock, flags);
1076 /* If another context is idling the device then defer */
1077 if (master->idling) {
1078 queue_kthread_work(&master->kworker, &master->pump_messages);
1079 spin_unlock_irqrestore(&master->queue_lock, flags);
1083 /* Check if the queue is idle */
1084 if (list_empty(&master->queue) || !master->running) {
1085 if (!master->busy) {
1086 spin_unlock_irqrestore(&master->queue_lock, flags);
1090 /* Only do teardown in the thread */
1092 queue_kthread_work(&master->kworker,
1093 &master->pump_messages);
1094 spin_unlock_irqrestore(&master->queue_lock, flags);
1098 master->busy = false;
1099 master->idling = true;
1100 spin_unlock_irqrestore(&master->queue_lock, flags);
1102 kfree(master->dummy_rx);
1103 master->dummy_rx = NULL;
1104 kfree(master->dummy_tx);
1105 master->dummy_tx = NULL;
1106 if (master->unprepare_transfer_hardware &&
1107 master->unprepare_transfer_hardware(master))
1108 dev_err(&master->dev,
1109 "failed to unprepare transfer hardware\n");
1110 if (master->auto_runtime_pm) {
1111 pm_runtime_mark_last_busy(master->dev.parent);
1112 pm_runtime_put_autosuspend(master->dev.parent);
1114 trace_spi_master_idle(master);
1116 spin_lock_irqsave(&master->queue_lock, flags);
1117 master->idling = false;
1118 spin_unlock_irqrestore(&master->queue_lock, flags);
1122 /* Extract head of queue */
1124 list_first_entry(&master->queue, struct spi_message, queue);
1126 list_del_init(&master->cur_msg->queue);
1130 master->busy = true;
1131 spin_unlock_irqrestore(&master->queue_lock, flags);
1133 if (!was_busy && master->auto_runtime_pm) {
1134 ret = pm_runtime_get_sync(master->dev.parent);
1136 dev_err(&master->dev, "Failed to power device: %d\n",
1143 trace_spi_master_busy(master);
1145 if (!was_busy && master->prepare_transfer_hardware) {
1146 ret = master->prepare_transfer_hardware(master);
1148 dev_err(&master->dev,
1149 "failed to prepare transfer hardware\n");
1151 if (master->auto_runtime_pm)
1152 pm_runtime_put(master->dev.parent);
1158 mutex_lock(&master->bus_lock_mutex);
1160 trace_spi_message_start(master->cur_msg);
1162 if (master->prepare_message) {
1163 ret = master->prepare_message(master, master->cur_msg);
1165 dev_err(&master->dev,
1166 "failed to prepare message: %d\n", ret);
1167 master->cur_msg->status = ret;
1168 spi_finalize_current_message(master);
1171 master->cur_msg_prepared = true;
1174 ret = spi_map_msg(master, master->cur_msg);
1176 master->cur_msg->status = ret;
1177 spi_finalize_current_message(master);
1181 ret = master->transfer_one_message(master, master->cur_msg);
1183 dev_err(&master->dev,
1184 "failed to transfer one message from queue\n");
1190 mutex_unlock(&master->bus_lock_mutex);
1192 /* Prod the scheduler in case transfer_one() was busy waiting */
1198 * spi_pump_messages - kthread work function which processes spi message queue
1199 * @work: pointer to kthread work struct contained in the master struct
1201 static void spi_pump_messages(struct kthread_work *work)
1203 struct spi_master *master =
1204 container_of(work, struct spi_master, pump_messages);
1206 __spi_pump_messages(master, true, false);
1209 static int spi_init_queue(struct spi_master *master)
1211 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1213 master->running = false;
1214 master->busy = false;
1216 init_kthread_worker(&master->kworker);
1217 master->kworker_task = kthread_run(kthread_worker_fn,
1218 &master->kworker, "%s",
1219 dev_name(&master->dev));
1220 if (IS_ERR(master->kworker_task)) {
1221 dev_err(&master->dev, "failed to create message pump task\n");
1222 return PTR_ERR(master->kworker_task);
1224 init_kthread_work(&master->pump_messages, spi_pump_messages);
1227 * Master config will indicate if this controller should run the
1228 * message pump with high (realtime) priority to reduce the transfer
1229 * latency on the bus by minimising the delay between a transfer
1230 * request and the scheduling of the message pump thread. Without this
1231 * setting the message pump thread will remain at default priority.
1234 dev_info(&master->dev,
1235 "will run message pump with realtime priority\n");
1236 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m);
1243 * spi_get_next_queued_message() - called by driver to check for queued
1245 * @master: the master to check for queued messages
1247 * If there are more messages in the queue, the next message is returned from
1250 * Return: the next message in the queue, else NULL if the queue is empty.
1252 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1254 struct spi_message *next;
1255 unsigned long flags;
1257 /* get a pointer to the next message, if any */
1258 spin_lock_irqsave(&master->queue_lock, flags);
1259 next = list_first_entry_or_null(&master->queue, struct spi_message,
1261 spin_unlock_irqrestore(&master->queue_lock, flags);
1265 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1268 * spi_finalize_current_message() - the current message is complete
1269 * @master: the master to return the message to
1271 * Called by the driver to notify the core that the message in the front of the
1272 * queue is complete and can be removed from the queue.
1274 void spi_finalize_current_message(struct spi_master *master)
1276 struct spi_message *mesg;
1277 unsigned long flags;
1280 spin_lock_irqsave(&master->queue_lock, flags);
1281 mesg = master->cur_msg;
1282 spin_unlock_irqrestore(&master->queue_lock, flags);
1284 spi_unmap_msg(master, mesg);
1286 if (master->cur_msg_prepared && master->unprepare_message) {
1287 ret = master->unprepare_message(master, mesg);
1289 dev_err(&master->dev,
1290 "failed to unprepare message: %d\n", ret);
1294 spin_lock_irqsave(&master->queue_lock, flags);
1295 master->cur_msg = NULL;
1296 master->cur_msg_prepared = false;
1297 queue_kthread_work(&master->kworker, &master->pump_messages);
1298 spin_unlock_irqrestore(&master->queue_lock, flags);
1300 trace_spi_message_done(mesg);
1304 mesg->complete(mesg->context);
1306 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1308 static int spi_start_queue(struct spi_master *master)
1310 unsigned long flags;
1312 spin_lock_irqsave(&master->queue_lock, flags);
1314 if (master->running || master->busy) {
1315 spin_unlock_irqrestore(&master->queue_lock, flags);
1319 master->running = true;
1320 master->cur_msg = NULL;
1321 spin_unlock_irqrestore(&master->queue_lock, flags);
1323 queue_kthread_work(&master->kworker, &master->pump_messages);
1328 static int spi_stop_queue(struct spi_master *master)
1330 unsigned long flags;
1331 unsigned limit = 500;
1334 spin_lock_irqsave(&master->queue_lock, flags);
1337 * This is a bit lame, but is optimized for the common execution path.
1338 * A wait_queue on the master->busy could be used, but then the common
1339 * execution path (pump_messages) would be required to call wake_up or
1340 * friends on every SPI message. Do this instead.
1342 while ((!list_empty(&master->queue) || master->busy) && limit--) {
1343 spin_unlock_irqrestore(&master->queue_lock, flags);
1344 usleep_range(10000, 11000);
1345 spin_lock_irqsave(&master->queue_lock, flags);
1348 if (!list_empty(&master->queue) || master->busy)
1351 master->running = false;
1353 spin_unlock_irqrestore(&master->queue_lock, flags);
1356 dev_warn(&master->dev,
1357 "could not stop message queue\n");
1363 static int spi_destroy_queue(struct spi_master *master)
1367 ret = spi_stop_queue(master);
1370 * flush_kthread_worker will block until all work is done.
1371 * If the reason that stop_queue timed out is that the work will never
1372 * finish, then it does no good to call flush/stop thread, so
1376 dev_err(&master->dev, "problem destroying queue\n");
1380 flush_kthread_worker(&master->kworker);
1381 kthread_stop(master->kworker_task);
1386 static int __spi_queued_transfer(struct spi_device *spi,
1387 struct spi_message *msg,
1390 struct spi_master *master = spi->master;
1391 unsigned long flags;
1393 spin_lock_irqsave(&master->queue_lock, flags);
1395 if (!master->running) {
1396 spin_unlock_irqrestore(&master->queue_lock, flags);
1399 msg->actual_length = 0;
1400 msg->status = -EINPROGRESS;
1402 list_add_tail(&msg->queue, &master->queue);
1403 if (!master->busy && need_pump)
1404 queue_kthread_work(&master->kworker, &master->pump_messages);
1406 spin_unlock_irqrestore(&master->queue_lock, flags);
1411 * spi_queued_transfer - transfer function for queued transfers
1412 * @spi: spi device which is requesting transfer
1413 * @msg: spi message which is to handled is queued to driver queue
1415 * Return: zero on success, else a negative error code.
1417 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1419 return __spi_queued_transfer(spi, msg, true);
1422 static int spi_master_initialize_queue(struct spi_master *master)
1426 master->transfer = spi_queued_transfer;
1427 if (!master->transfer_one_message)
1428 master->transfer_one_message = spi_transfer_one_message;
1430 /* Initialize and start queue */
1431 ret = spi_init_queue(master);
1433 dev_err(&master->dev, "problem initializing queue\n");
1434 goto err_init_queue;
1436 master->queued = true;
1437 ret = spi_start_queue(master);
1439 dev_err(&master->dev, "problem starting queue\n");
1440 goto err_start_queue;
1446 spi_destroy_queue(master);
1451 /*-------------------------------------------------------------------------*/
1453 #if defined(CONFIG_OF)
1454 static struct spi_device *
1455 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1457 struct spi_device *spi;
1461 /* Alloc an spi_device */
1462 spi = spi_alloc_device(master);
1464 dev_err(&master->dev, "spi_device alloc error for %s\n",
1470 /* Select device driver */
1471 rc = of_modalias_node(nc, spi->modalias,
1472 sizeof(spi->modalias));
1474 dev_err(&master->dev, "cannot find modalias for %s\n",
1479 /* Device address */
1480 rc = of_property_read_u32(nc, "reg", &value);
1482 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1486 spi->chip_select = value;
1488 /* Mode (clock phase/polarity/etc.) */
1489 if (of_find_property(nc, "spi-cpha", NULL))
1490 spi->mode |= SPI_CPHA;
1491 if (of_find_property(nc, "spi-cpol", NULL))
1492 spi->mode |= SPI_CPOL;
1493 if (of_find_property(nc, "spi-cs-high", NULL))
1494 spi->mode |= SPI_CS_HIGH;
1495 if (of_find_property(nc, "spi-3wire", NULL))
1496 spi->mode |= SPI_3WIRE;
1497 if (of_find_property(nc, "spi-lsb-first", NULL))
1498 spi->mode |= SPI_LSB_FIRST;
1500 /* Device DUAL/QUAD mode */
1501 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1506 spi->mode |= SPI_TX_DUAL;
1509 spi->mode |= SPI_TX_QUAD;
1512 dev_warn(&master->dev,
1513 "spi-tx-bus-width %d not supported\n",
1519 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1524 spi->mode |= SPI_RX_DUAL;
1527 spi->mode |= SPI_RX_QUAD;
1530 dev_warn(&master->dev,
1531 "spi-rx-bus-width %d not supported\n",
1538 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1540 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1544 spi->max_speed_hz = value;
1546 /* Store a pointer to the node in the device structure */
1548 spi->dev.of_node = nc;
1550 /* Register the new device */
1551 rc = spi_add_device(spi);
1553 dev_err(&master->dev, "spi_device register error %s\n",
1566 * of_register_spi_devices() - Register child devices onto the SPI bus
1567 * @master: Pointer to spi_master device
1569 * Registers an spi_device for each child node of master node which has a 'reg'
1572 static void of_register_spi_devices(struct spi_master *master)
1574 struct spi_device *spi;
1575 struct device_node *nc;
1577 if (!master->dev.of_node)
1580 for_each_available_child_of_node(master->dev.of_node, nc) {
1581 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1583 spi = of_register_spi_device(master, nc);
1585 dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1590 static void of_register_spi_devices(struct spi_master *master) { }
1594 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1596 struct spi_device *spi = data;
1597 struct spi_master *master = spi->master;
1599 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1600 struct acpi_resource_spi_serialbus *sb;
1602 sb = &ares->data.spi_serial_bus;
1603 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1605 * ACPI DeviceSelection numbering is handled by the
1606 * host controller driver in Windows and can vary
1607 * from driver to driver. In Linux we always expect
1608 * 0 .. max - 1 so we need to ask the driver to
1609 * translate between the two schemes.
1611 if (master->fw_translate_cs) {
1612 int cs = master->fw_translate_cs(master,
1613 sb->device_selection);
1616 spi->chip_select = cs;
1618 spi->chip_select = sb->device_selection;
1621 spi->max_speed_hz = sb->connection_speed;
1623 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1624 spi->mode |= SPI_CPHA;
1625 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1626 spi->mode |= SPI_CPOL;
1627 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1628 spi->mode |= SPI_CS_HIGH;
1630 } else if (spi->irq < 0) {
1633 if (acpi_dev_resource_interrupt(ares, 0, &r))
1637 /* Always tell the ACPI core to skip this resource */
1641 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1642 void *data, void **return_value)
1644 struct spi_master *master = data;
1645 struct list_head resource_list;
1646 struct acpi_device *adev;
1647 struct spi_device *spi;
1650 if (acpi_bus_get_device(handle, &adev))
1652 if (acpi_bus_get_status(adev) || !adev->status.present)
1655 spi = spi_alloc_device(master);
1657 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1658 dev_name(&adev->dev));
1659 return AE_NO_MEMORY;
1662 ACPI_COMPANION_SET(&spi->dev, adev);
1665 INIT_LIST_HEAD(&resource_list);
1666 ret = acpi_dev_get_resources(adev, &resource_list,
1667 acpi_spi_add_resource, spi);
1668 acpi_dev_free_resource_list(&resource_list);
1670 if (ret < 0 || !spi->max_speed_hz) {
1676 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1678 adev->power.flags.ignore_parent = true;
1679 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1680 if (spi_add_device(spi)) {
1681 adev->power.flags.ignore_parent = false;
1682 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1683 dev_name(&adev->dev));
1690 static void acpi_register_spi_devices(struct spi_master *master)
1695 handle = ACPI_HANDLE(master->dev.parent);
1699 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1700 acpi_spi_add_device, NULL,
1702 if (ACPI_FAILURE(status))
1703 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1706 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1707 #endif /* CONFIG_ACPI */
1709 static void spi_master_release(struct device *dev)
1711 struct spi_master *master;
1713 master = container_of(dev, struct spi_master, dev);
1717 static struct class spi_master_class = {
1718 .name = "spi_master",
1719 .owner = THIS_MODULE,
1720 .dev_release = spi_master_release,
1721 .dev_groups = spi_master_groups,
1726 * spi_alloc_master - allocate SPI master controller
1727 * @dev: the controller, possibly using the platform_bus
1728 * @size: how much zeroed driver-private data to allocate; the pointer to this
1729 * memory is in the driver_data field of the returned device,
1730 * accessible with spi_master_get_devdata().
1731 * Context: can sleep
1733 * This call is used only by SPI master controller drivers, which are the
1734 * only ones directly touching chip registers. It's how they allocate
1735 * an spi_master structure, prior to calling spi_register_master().
1737 * This must be called from context that can sleep.
1739 * The caller is responsible for assigning the bus number and initializing
1740 * the master's methods before calling spi_register_master(); and (after errors
1741 * adding the device) calling spi_master_put() to prevent a memory leak.
1743 * Return: the SPI master structure on success, else NULL.
1745 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1747 struct spi_master *master;
1752 master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1756 device_initialize(&master->dev);
1757 master->bus_num = -1;
1758 master->num_chipselect = 1;
1759 master->dev.class = &spi_master_class;
1760 master->dev.parent = dev;
1761 spi_master_set_devdata(master, &master[1]);
1765 EXPORT_SYMBOL_GPL(spi_alloc_master);
1768 static int of_spi_register_master(struct spi_master *master)
1771 struct device_node *np = master->dev.of_node;
1776 nb = of_gpio_named_count(np, "cs-gpios");
1777 master->num_chipselect = max_t(int, nb, master->num_chipselect);
1779 /* Return error only for an incorrectly formed cs-gpios property */
1780 if (nb == 0 || nb == -ENOENT)
1785 cs = devm_kzalloc(&master->dev,
1786 sizeof(int) * master->num_chipselect,
1788 master->cs_gpios = cs;
1790 if (!master->cs_gpios)
1793 for (i = 0; i < master->num_chipselect; i++)
1796 for (i = 0; i < nb; i++)
1797 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1802 static int of_spi_register_master(struct spi_master *master)
1809 * spi_register_master - register SPI master controller
1810 * @master: initialized master, originally from spi_alloc_master()
1811 * Context: can sleep
1813 * SPI master controllers connect to their drivers using some non-SPI bus,
1814 * such as the platform bus. The final stage of probe() in that code
1815 * includes calling spi_register_master() to hook up to this SPI bus glue.
1817 * SPI controllers use board specific (often SOC specific) bus numbers,
1818 * and board-specific addressing for SPI devices combines those numbers
1819 * with chip select numbers. Since SPI does not directly support dynamic
1820 * device identification, boards need configuration tables telling which
1821 * chip is at which address.
1823 * This must be called from context that can sleep. It returns zero on
1824 * success, else a negative error code (dropping the master's refcount).
1825 * After a successful return, the caller is responsible for calling
1826 * spi_unregister_master().
1828 * Return: zero on success, else a negative error code.
1830 int spi_register_master(struct spi_master *master)
1832 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1833 struct device *dev = master->dev.parent;
1834 struct boardinfo *bi;
1835 int status = -ENODEV;
1841 status = of_spi_register_master(master);
1845 /* even if it's just one always-selected device, there must
1846 * be at least one chipselect
1848 if (master->num_chipselect == 0)
1851 if ((master->bus_num < 0) && master->dev.of_node)
1852 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1854 /* convention: dynamically assigned bus IDs count down from the max */
1855 if (master->bus_num < 0) {
1856 /* FIXME switch to an IDR based scheme, something like
1857 * I2C now uses, so we can't run out of "dynamic" IDs
1859 master->bus_num = atomic_dec_return(&dyn_bus_id);
1863 INIT_LIST_HEAD(&master->queue);
1864 spin_lock_init(&master->queue_lock);
1865 spin_lock_init(&master->bus_lock_spinlock);
1866 mutex_init(&master->bus_lock_mutex);
1867 master->bus_lock_flag = 0;
1868 init_completion(&master->xfer_completion);
1869 if (!master->max_dma_len)
1870 master->max_dma_len = INT_MAX;
1872 /* register the device, then userspace will see it.
1873 * registration fails if the bus ID is in use.
1875 dev_set_name(&master->dev, "spi%u", master->bus_num);
1876 status = device_add(&master->dev);
1879 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1880 dynamic ? " (dynamic)" : "");
1882 /* If we're using a queued driver, start the queue */
1883 if (master->transfer)
1884 dev_info(dev, "master is unqueued, this is deprecated\n");
1886 status = spi_master_initialize_queue(master);
1888 device_del(&master->dev);
1892 /* add statistics */
1893 spin_lock_init(&master->statistics.lock);
1895 mutex_lock(&board_lock);
1896 list_add_tail(&master->list, &spi_master_list);
1897 list_for_each_entry(bi, &board_list, list)
1898 spi_match_master_to_boardinfo(master, &bi->board_info);
1899 mutex_unlock(&board_lock);
1901 /* Register devices from the device tree and ACPI */
1902 of_register_spi_devices(master);
1903 acpi_register_spi_devices(master);
1907 EXPORT_SYMBOL_GPL(spi_register_master);
1909 static void devm_spi_unregister(struct device *dev, void *res)
1911 spi_unregister_master(*(struct spi_master **)res);
1915 * dev_spi_register_master - register managed SPI master controller
1916 * @dev: device managing SPI master
1917 * @master: initialized master, originally from spi_alloc_master()
1918 * Context: can sleep
1920 * Register a SPI device as with spi_register_master() which will
1921 * automatically be unregister
1923 * Return: zero on success, else a negative error code.
1925 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1927 struct spi_master **ptr;
1930 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1934 ret = spi_register_master(master);
1937 devres_add(dev, ptr);
1944 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1946 static int __unregister(struct device *dev, void *null)
1948 spi_unregister_device(to_spi_device(dev));
1953 * spi_unregister_master - unregister SPI master controller
1954 * @master: the master being unregistered
1955 * Context: can sleep
1957 * This call is used only by SPI master controller drivers, which are the
1958 * only ones directly touching chip registers.
1960 * This must be called from context that can sleep.
1962 void spi_unregister_master(struct spi_master *master)
1966 if (master->queued) {
1967 if (spi_destroy_queue(master))
1968 dev_err(&master->dev, "queue remove failed\n");
1971 mutex_lock(&board_lock);
1972 list_del(&master->list);
1973 mutex_unlock(&board_lock);
1975 dummy = device_for_each_child(&master->dev, NULL, __unregister);
1976 device_unregister(&master->dev);
1978 EXPORT_SYMBOL_GPL(spi_unregister_master);
1980 int spi_master_suspend(struct spi_master *master)
1984 /* Basically no-ops for non-queued masters */
1985 if (!master->queued)
1988 ret = spi_stop_queue(master);
1990 dev_err(&master->dev, "queue stop failed\n");
1994 EXPORT_SYMBOL_GPL(spi_master_suspend);
1996 int spi_master_resume(struct spi_master *master)
2000 if (!master->queued)
2003 ret = spi_start_queue(master);
2005 dev_err(&master->dev, "queue restart failed\n");
2009 EXPORT_SYMBOL_GPL(spi_master_resume);
2011 static int __spi_master_match(struct device *dev, const void *data)
2013 struct spi_master *m;
2014 const u16 *bus_num = data;
2016 m = container_of(dev, struct spi_master, dev);
2017 return m->bus_num == *bus_num;
2021 * spi_busnum_to_master - look up master associated with bus_num
2022 * @bus_num: the master's bus number
2023 * Context: can sleep
2025 * This call may be used with devices that are registered after
2026 * arch init time. It returns a refcounted pointer to the relevant
2027 * spi_master (which the caller must release), or NULL if there is
2028 * no such master registered.
2030 * Return: the SPI master structure on success, else NULL.
2032 struct spi_master *spi_busnum_to_master(u16 bus_num)
2035 struct spi_master *master = NULL;
2037 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2038 __spi_master_match);
2040 master = container_of(dev, struct spi_master, dev);
2041 /* reference got in class_find_device */
2044 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2047 /*-------------------------------------------------------------------------*/
2049 /* Core methods for SPI master protocol drivers. Some of the
2050 * other core methods are currently defined as inline functions.
2053 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2055 if (master->bits_per_word_mask) {
2056 /* Only 32 bits fit in the mask */
2057 if (bits_per_word > 32)
2059 if (!(master->bits_per_word_mask &
2060 SPI_BPW_MASK(bits_per_word)))
2068 * spi_setup - setup SPI mode and clock rate
2069 * @spi: the device whose settings are being modified
2070 * Context: can sleep, and no requests are queued to the device
2072 * SPI protocol drivers may need to update the transfer mode if the
2073 * device doesn't work with its default. They may likewise need
2074 * to update clock rates or word sizes from initial values. This function
2075 * changes those settings, and must be called from a context that can sleep.
2076 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2077 * effect the next time the device is selected and data is transferred to
2078 * or from it. When this function returns, the spi device is deselected.
2080 * Note that this call will fail if the protocol driver specifies an option
2081 * that the underlying controller or its driver does not support. For
2082 * example, not all hardware supports wire transfers using nine bit words,
2083 * LSB-first wire encoding, or active-high chipselects.
2085 * Return: zero on success, else a negative error code.
2087 int spi_setup(struct spi_device *spi)
2089 unsigned bad_bits, ugly_bits;
2092 /* check mode to prevent that DUAL and QUAD set at the same time
2094 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2095 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2097 "setup: can not select dual and quad at the same time\n");
2100 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2102 if ((spi->mode & SPI_3WIRE) && (spi->mode &
2103 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2105 /* help drivers fail *cleanly* when they need options
2106 * that aren't supported with their current master
2108 bad_bits = spi->mode & ~spi->master->mode_bits;
2109 ugly_bits = bad_bits &
2110 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2113 "setup: ignoring unsupported mode bits %x\n",
2115 spi->mode &= ~ugly_bits;
2116 bad_bits &= ~ugly_bits;
2119 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2124 if (!spi->bits_per_word)
2125 spi->bits_per_word = 8;
2127 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2131 if (!spi->max_speed_hz)
2132 spi->max_speed_hz = spi->master->max_speed_hz;
2134 if (spi->master->setup)
2135 status = spi->master->setup(spi);
2137 spi_set_cs(spi, false);
2139 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2140 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2141 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2142 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2143 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2144 (spi->mode & SPI_LOOP) ? "loopback, " : "",
2145 spi->bits_per_word, spi->max_speed_hz,
2150 EXPORT_SYMBOL_GPL(spi_setup);
2152 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2154 struct spi_master *master = spi->master;
2155 struct spi_transfer *xfer;
2158 if (list_empty(&message->transfers))
2161 /* Half-duplex links include original MicroWire, and ones with
2162 * only one data pin like SPI_3WIRE (switches direction) or where
2163 * either MOSI or MISO is missing. They can also be caused by
2164 * software limitations.
2166 if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2167 || (spi->mode & SPI_3WIRE)) {
2168 unsigned flags = master->flags;
2170 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2171 if (xfer->rx_buf && xfer->tx_buf)
2173 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2175 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2181 * Set transfer bits_per_word and max speed as spi device default if
2182 * it is not set for this transfer.
2183 * Set transfer tx_nbits and rx_nbits as single transfer default
2184 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2186 message->frame_length = 0;
2187 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2188 message->frame_length += xfer->len;
2189 if (!xfer->bits_per_word)
2190 xfer->bits_per_word = spi->bits_per_word;
2192 if (!xfer->speed_hz)
2193 xfer->speed_hz = spi->max_speed_hz;
2194 if (!xfer->speed_hz)
2195 xfer->speed_hz = master->max_speed_hz;
2197 if (master->max_speed_hz &&
2198 xfer->speed_hz > master->max_speed_hz)
2199 xfer->speed_hz = master->max_speed_hz;
2201 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2205 * SPI transfer length should be multiple of SPI word size
2206 * where SPI word size should be power-of-two multiple
2208 if (xfer->bits_per_word <= 8)
2210 else if (xfer->bits_per_word <= 16)
2215 /* No partial transfers accepted */
2216 if (xfer->len % w_size)
2219 if (xfer->speed_hz && master->min_speed_hz &&
2220 xfer->speed_hz < master->min_speed_hz)
2223 if (xfer->tx_buf && !xfer->tx_nbits)
2224 xfer->tx_nbits = SPI_NBITS_SINGLE;
2225 if (xfer->rx_buf && !xfer->rx_nbits)
2226 xfer->rx_nbits = SPI_NBITS_SINGLE;
2227 /* check transfer tx/rx_nbits:
2228 * 1. check the value matches one of single, dual and quad
2229 * 2. check tx/rx_nbits match the mode in spi_device
2232 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2233 xfer->tx_nbits != SPI_NBITS_DUAL &&
2234 xfer->tx_nbits != SPI_NBITS_QUAD)
2236 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2237 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2239 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2240 !(spi->mode & SPI_TX_QUAD))
2243 /* check transfer rx_nbits */
2245 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2246 xfer->rx_nbits != SPI_NBITS_DUAL &&
2247 xfer->rx_nbits != SPI_NBITS_QUAD)
2249 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2250 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2252 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2253 !(spi->mode & SPI_RX_QUAD))
2258 message->status = -EINPROGRESS;
2263 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2265 struct spi_master *master = spi->master;
2269 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2270 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2272 trace_spi_message_submit(message);
2274 return master->transfer(spi, message);
2278 * spi_async - asynchronous SPI transfer
2279 * @spi: device with which data will be exchanged
2280 * @message: describes the data transfers, including completion callback
2281 * Context: any (irqs may be blocked, etc)
2283 * This call may be used in_irq and other contexts which can't sleep,
2284 * as well as from task contexts which can sleep.
2286 * The completion callback is invoked in a context which can't sleep.
2287 * Before that invocation, the value of message->status is undefined.
2288 * When the callback is issued, message->status holds either zero (to
2289 * indicate complete success) or a negative error code. After that
2290 * callback returns, the driver which issued the transfer request may
2291 * deallocate the associated memory; it's no longer in use by any SPI
2292 * core or controller driver code.
2294 * Note that although all messages to a spi_device are handled in
2295 * FIFO order, messages may go to different devices in other orders.
2296 * Some device might be higher priority, or have various "hard" access
2297 * time requirements, for example.
2299 * On detection of any fault during the transfer, processing of
2300 * the entire message is aborted, and the device is deselected.
2301 * Until returning from the associated message completion callback,
2302 * no other spi_message queued to that device will be processed.
2303 * (This rule applies equally to all the synchronous transfer calls,
2304 * which are wrappers around this core asynchronous primitive.)
2306 * Return: zero on success, else a negative error code.
2308 int spi_async(struct spi_device *spi, struct spi_message *message)
2310 struct spi_master *master = spi->master;
2312 unsigned long flags;
2314 ret = __spi_validate(spi, message);
2318 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2320 if (master->bus_lock_flag)
2323 ret = __spi_async(spi, message);
2325 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2329 EXPORT_SYMBOL_GPL(spi_async);
2332 * spi_async_locked - version of spi_async with exclusive bus usage
2333 * @spi: device with which data will be exchanged
2334 * @message: describes the data transfers, including completion callback
2335 * Context: any (irqs may be blocked, etc)
2337 * This call may be used in_irq and other contexts which can't sleep,
2338 * as well as from task contexts which can sleep.
2340 * The completion callback is invoked in a context which can't sleep.
2341 * Before that invocation, the value of message->status is undefined.
2342 * When the callback is issued, message->status holds either zero (to
2343 * indicate complete success) or a negative error code. After that
2344 * callback returns, the driver which issued the transfer request may
2345 * deallocate the associated memory; it's no longer in use by any SPI
2346 * core or controller driver code.
2348 * Note that although all messages to a spi_device are handled in
2349 * FIFO order, messages may go to different devices in other orders.
2350 * Some device might be higher priority, or have various "hard" access
2351 * time requirements, for example.
2353 * On detection of any fault during the transfer, processing of
2354 * the entire message is aborted, and the device is deselected.
2355 * Until returning from the associated message completion callback,
2356 * no other spi_message queued to that device will be processed.
2357 * (This rule applies equally to all the synchronous transfer calls,
2358 * which are wrappers around this core asynchronous primitive.)
2360 * Return: zero on success, else a negative error code.
2362 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2364 struct spi_master *master = spi->master;
2366 unsigned long flags;
2368 ret = __spi_validate(spi, message);
2372 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2374 ret = __spi_async(spi, message);
2376 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2381 EXPORT_SYMBOL_GPL(spi_async_locked);
2384 int spi_flash_read(struct spi_device *spi,
2385 struct spi_flash_read_message *msg)
2388 struct spi_master *master = spi->master;
2391 if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2392 msg->addr_nbits == SPI_NBITS_DUAL) &&
2393 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2395 if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2396 msg->addr_nbits == SPI_NBITS_QUAD) &&
2397 !(spi->mode & SPI_TX_QUAD))
2399 if (msg->data_nbits == SPI_NBITS_DUAL &&
2400 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2402 if (msg->data_nbits == SPI_NBITS_QUAD &&
2403 !(spi->mode & SPI_RX_QUAD))
2406 if (master->auto_runtime_pm) {
2407 ret = pm_runtime_get_sync(master->dev.parent);
2409 dev_err(&master->dev, "Failed to power device: %d\n",
2414 mutex_lock(&master->bus_lock_mutex);
2415 ret = master->spi_flash_read(spi, msg);
2416 mutex_unlock(&master->bus_lock_mutex);
2417 if (master->auto_runtime_pm)
2418 pm_runtime_put(master->dev.parent);
2422 EXPORT_SYMBOL_GPL(spi_flash_read);
2424 /*-------------------------------------------------------------------------*/
2426 /* Utility methods for SPI master protocol drivers, layered on
2427 * top of the core. Some other utility methods are defined as
2431 static void spi_complete(void *arg)
2436 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2439 DECLARE_COMPLETION_ONSTACK(done);
2441 struct spi_master *master = spi->master;
2442 unsigned long flags;
2444 status = __spi_validate(spi, message);
2448 message->complete = spi_complete;
2449 message->context = &done;
2452 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2453 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2456 mutex_lock(&master->bus_lock_mutex);
2458 /* If we're not using the legacy transfer method then we will
2459 * try to transfer in the calling context so special case.
2460 * This code would be less tricky if we could remove the
2461 * support for driver implemented message queues.
2463 if (master->transfer == spi_queued_transfer) {
2464 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2466 trace_spi_message_submit(message);
2468 status = __spi_queued_transfer(spi, message, false);
2470 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2472 status = spi_async_locked(spi, message);
2476 mutex_unlock(&master->bus_lock_mutex);
2479 /* Push out the messages in the calling context if we
2482 if (master->transfer == spi_queued_transfer) {
2483 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2484 spi_sync_immediate);
2485 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2486 spi_sync_immediate);
2487 __spi_pump_messages(master, false, bus_locked);
2490 wait_for_completion(&done);
2491 status = message->status;
2493 message->context = NULL;
2498 * spi_sync - blocking/synchronous SPI data transfers
2499 * @spi: device with which data will be exchanged
2500 * @message: describes the data transfers
2501 * Context: can sleep
2503 * This call may only be used from a context that may sleep. The sleep
2504 * is non-interruptible, and has no timeout. Low-overhead controller
2505 * drivers may DMA directly into and out of the message buffers.
2507 * Note that the SPI device's chip select is active during the message,
2508 * and then is normally disabled between messages. Drivers for some
2509 * frequently-used devices may want to minimize costs of selecting a chip,
2510 * by leaving it selected in anticipation that the next message will go
2511 * to the same chip. (That may increase power usage.)
2513 * Also, the caller is guaranteeing that the memory associated with the
2514 * message will not be freed before this call returns.
2516 * Return: zero on success, else a negative error code.
2518 int spi_sync(struct spi_device *spi, struct spi_message *message)
2520 return __spi_sync(spi, message, 0);
2522 EXPORT_SYMBOL_GPL(spi_sync);
2525 * spi_sync_locked - version of spi_sync with exclusive bus usage
2526 * @spi: device with which data will be exchanged
2527 * @message: describes the data transfers
2528 * Context: can sleep
2530 * This call may only be used from a context that may sleep. The sleep
2531 * is non-interruptible, and has no timeout. Low-overhead controller
2532 * drivers may DMA directly into and out of the message buffers.
2534 * This call should be used by drivers that require exclusive access to the
2535 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2536 * be released by a spi_bus_unlock call when the exclusive access is over.
2538 * Return: zero on success, else a negative error code.
2540 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2542 return __spi_sync(spi, message, 1);
2544 EXPORT_SYMBOL_GPL(spi_sync_locked);
2547 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2548 * @master: SPI bus master that should be locked for exclusive bus access
2549 * Context: can sleep
2551 * This call may only be used from a context that may sleep. The sleep
2552 * is non-interruptible, and has no timeout.
2554 * This call should be used by drivers that require exclusive access to the
2555 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2556 * exclusive access is over. Data transfer must be done by spi_sync_locked
2557 * and spi_async_locked calls when the SPI bus lock is held.
2559 * Return: always zero.
2561 int spi_bus_lock(struct spi_master *master)
2563 unsigned long flags;
2565 mutex_lock(&master->bus_lock_mutex);
2567 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2568 master->bus_lock_flag = 1;
2569 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2571 /* mutex remains locked until spi_bus_unlock is called */
2575 EXPORT_SYMBOL_GPL(spi_bus_lock);
2578 * spi_bus_unlock - release the lock for exclusive SPI bus usage
2579 * @master: SPI bus master that was locked for exclusive bus access
2580 * Context: can sleep
2582 * This call may only be used from a context that may sleep. The sleep
2583 * is non-interruptible, and has no timeout.
2585 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2588 * Return: always zero.
2590 int spi_bus_unlock(struct spi_master *master)
2592 master->bus_lock_flag = 0;
2594 mutex_unlock(&master->bus_lock_mutex);
2598 EXPORT_SYMBOL_GPL(spi_bus_unlock);
2600 /* portable code must never pass more than 32 bytes */
2601 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
2606 * spi_write_then_read - SPI synchronous write followed by read
2607 * @spi: device with which data will be exchanged
2608 * @txbuf: data to be written (need not be dma-safe)
2609 * @n_tx: size of txbuf, in bytes
2610 * @rxbuf: buffer into which data will be read (need not be dma-safe)
2611 * @n_rx: size of rxbuf, in bytes
2612 * Context: can sleep
2614 * This performs a half duplex MicroWire style transaction with the
2615 * device, sending txbuf and then reading rxbuf. The return value
2616 * is zero for success, else a negative errno status code.
2617 * This call may only be used from a context that may sleep.
2619 * Parameters to this routine are always copied using a small buffer;
2620 * portable code should never use this for more than 32 bytes.
2621 * Performance-sensitive or bulk transfer code should instead use
2622 * spi_{async,sync}() calls with dma-safe buffers.
2624 * Return: zero on success, else a negative error code.
2626 int spi_write_then_read(struct spi_device *spi,
2627 const void *txbuf, unsigned n_tx,
2628 void *rxbuf, unsigned n_rx)
2630 static DEFINE_MUTEX(lock);
2633 struct spi_message message;
2634 struct spi_transfer x[2];
2637 /* Use preallocated DMA-safe buffer if we can. We can't avoid
2638 * copying here, (as a pure convenience thing), but we can
2639 * keep heap costs out of the hot path unless someone else is
2640 * using the pre-allocated buffer or the transfer is too large.
2642 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2643 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
2644 GFP_KERNEL | GFP_DMA);
2651 spi_message_init(&message);
2652 memset(x, 0, sizeof(x));
2655 spi_message_add_tail(&x[0], &message);
2659 spi_message_add_tail(&x[1], &message);
2662 memcpy(local_buf, txbuf, n_tx);
2663 x[0].tx_buf = local_buf;
2664 x[1].rx_buf = local_buf + n_tx;
2667 status = spi_sync(spi, &message);
2669 memcpy(rxbuf, x[1].rx_buf, n_rx);
2671 if (x[0].tx_buf == buf)
2672 mutex_unlock(&lock);
2678 EXPORT_SYMBOL_GPL(spi_write_then_read);
2680 /*-------------------------------------------------------------------------*/
2682 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
2683 static int __spi_of_device_match(struct device *dev, void *data)
2685 return dev->of_node == data;
2688 /* must call put_device() when done with returned spi_device device */
2689 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
2691 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
2692 __spi_of_device_match);
2693 return dev ? to_spi_device(dev) : NULL;
2696 static int __spi_of_master_match(struct device *dev, const void *data)
2698 return dev->of_node == data;
2701 /* the spi masters are not using spi_bus, so we find it with another way */
2702 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
2706 dev = class_find_device(&spi_master_class, NULL, node,
2707 __spi_of_master_match);
2711 /* reference got in class_find_device */
2712 return container_of(dev, struct spi_master, dev);
2715 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
2718 struct of_reconfig_data *rd = arg;
2719 struct spi_master *master;
2720 struct spi_device *spi;
2722 switch (of_reconfig_get_state_change(action, arg)) {
2723 case OF_RECONFIG_CHANGE_ADD:
2724 master = of_find_spi_master_by_node(rd->dn->parent);
2726 return NOTIFY_OK; /* not for us */
2728 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
2729 put_device(&master->dev);
2733 spi = of_register_spi_device(master, rd->dn);
2734 put_device(&master->dev);
2737 pr_err("%s: failed to create for '%s'\n",
2738 __func__, rd->dn->full_name);
2739 return notifier_from_errno(PTR_ERR(spi));
2743 case OF_RECONFIG_CHANGE_REMOVE:
2744 /* already depopulated? */
2745 if (!of_node_check_flag(rd->dn, OF_POPULATED))
2748 /* find our device by node */
2749 spi = of_find_spi_device_by_node(rd->dn);
2751 return NOTIFY_OK; /* no? not meant for us */
2753 /* unregister takes one ref away */
2754 spi_unregister_device(spi);
2756 /* and put the reference of the find */
2757 put_device(&spi->dev);
2764 static struct notifier_block spi_of_notifier = {
2765 .notifier_call = of_spi_notify,
2767 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2768 extern struct notifier_block spi_of_notifier;
2769 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
2771 static int __init spi_init(void)
2775 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2781 status = bus_register(&spi_bus_type);
2785 status = class_register(&spi_master_class);
2789 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
2790 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
2795 bus_unregister(&spi_bus_type);
2803 /* board_info is normally registered in arch_initcall(),
2804 * but even essential drivers wait till later
2806 * REVISIT only boardinfo really needs static linking. the rest (device and
2807 * driver registration) _could_ be dynamically linked (modular) ... costs
2808 * include needing to have boardinfo data structures be much more public.
2810 postcore_initcall(spi_init);