blob: e2da17b5900e2f5e659d8ff2334b7f4841d1ea82 [file] [log] [blame]
David Brownell8ae12a02006-01-08 13:34:19 -08001/*
2 * Copyright (C) 2005 David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
David Brownell8ae12a02006-01-08 13:34:19 -080013 */
14
15#ifndef __LINUX_SPI_H
16#define __LINUX_SPI_H
17
Randy Dunlap0a30c5c2009-01-04 12:00:47 -080018#include <linux/device.h>
Anton Vorontsov75368bf2009-09-22 16:46:04 -070019#include <linux/mod_devicetable.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Linus Walleijffbbdd212012-02-22 10:05:38 +010021#include <linux/kthread.h>
Mark Brownb1589352013-10-05 11:50:40 +010022#include <linux/completion.h>
Mark Brown6ad45a22014-02-02 13:47:47 +000023#include <linux/scatterlist.h>
Randy Dunlap0a30c5c2009-01-04 12:00:47 -080024
Mark Brown99adef32014-01-16 12:22:43 +000025struct dma_chan;
Martin Sperleca2ebc2015-06-22 13:00:36 +000026struct spi_master;
27struct spi_transfer;
David Brownellb8852442006-01-08 13:34:23 -080028
David Brownell8ae12a02006-01-08 13:34:19 -080029/*
David Brownell8ae12a02006-01-08 13:34:19 -080030 * INTERFACES between SPI master-side drivers and SPI infrastructure.
31 * (There's no SPI slave support for Linux yet...)
32 */
33extern struct bus_type spi_bus_type;
34
35/**
Martin Sperleca2ebc2015-06-22 13:00:36 +000036 * struct spi_statistics - statistics for spi transfers
37 * @clock: lock protecting this structure
38 *
39 * @messages: number of spi-messages handled
40 * @transfers: number of spi_transfers handled
41 * @errors: number of errors during spi_transfer
42 * @timedout: number of timeouts during spi_transfer
43 *
44 * @spi_sync: number of times spi_sync is used
45 * @spi_sync_immediate:
46 * number of times spi_sync is executed immediately
47 * in calling context without queuing and scheduling
48 * @spi_async: number of times spi_async is used
49 *
50 * @bytes: number of bytes transferred to/from device
51 * @bytes_tx: number of bytes sent to device
52 * @bytes_rx: number of bytes received from device
53 *
54 */
55struct spi_statistics {
56 spinlock_t lock; /* lock for the whole structure */
57
58 unsigned long messages;
59 unsigned long transfers;
60 unsigned long errors;
61 unsigned long timedout;
62
63 unsigned long spi_sync;
64 unsigned long spi_sync_immediate;
65 unsigned long spi_async;
66
67 unsigned long long bytes;
68 unsigned long long bytes_rx;
69 unsigned long long bytes_tx;
70
71};
72
73void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
74 struct spi_transfer *xfer,
75 struct spi_master *master);
76
77#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
78 do { \
79 unsigned long flags; \
80 spin_lock_irqsave(&(stats)->lock, flags); \
81 (stats)->field += count; \
82 spin_unlock_irqrestore(&(stats)->lock, flags); \
83 } while (0)
84
85#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
86 SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
87
88/**
David Brownell8ae12a02006-01-08 13:34:19 -080089 * struct spi_device - Master side proxy for an SPI slave device
90 * @dev: Driver model representation of the device.
91 * @master: SPI controller used with the device.
92 * @max_speed_hz: Maximum clock rate to be used with this chip
93 * (on this board); may be changed by the device's driver.
Imre Deak4cff33f2006-02-17 10:02:18 -080094 * The spi_transfer.speed_hz can override this for each transfer.
David Brownell33e34dc2007-05-08 00:32:21 -070095 * @chip_select: Chipselect, distinguishing chips handled by @master.
David Brownell8ae12a02006-01-08 13:34:19 -080096 * @mode: The spi mode defines how data is clocked out and in.
97 * This may be changed by the device's driver.
David Brownell33e34dc2007-05-08 00:32:21 -070098 * The "active low" default for chipselect mode can be overridden
99 * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
100 * each word in a transfer (by specifying SPI_LSB_FIRST).
David Brownell8ae12a02006-01-08 13:34:19 -0800101 * @bits_per_word: Data transfers involve one or more words; word sizes
David Brownell747d8442006-04-02 10:33:37 -0800102 * like eight or 12 bits are common. In-memory wordsizes are
David Brownell8ae12a02006-01-08 13:34:19 -0800103 * powers of two bytes (e.g. 20 bit samples use 32 bits).
David Brownellccf77cc2006-04-03 15:46:22 -0700104 * This may be changed by the device's driver, or left at the
105 * default (0) indicating protocol words are eight bit bytes.
Imre Deak4cff33f2006-02-17 10:02:18 -0800106 * The spi_transfer.bits_per_word can override this for each transfer.
David Brownell8ae12a02006-01-08 13:34:19 -0800107 * @irq: Negative, or the number passed to request_irq() to receive
David Brownell747d8442006-04-02 10:33:37 -0800108 * interrupts from this device.
David Brownell8ae12a02006-01-08 13:34:19 -0800109 * @controller_state: Controller's runtime state
David Brownellb8852442006-01-08 13:34:23 -0800110 * @controller_data: Board-specific definitions for controller, such as
David Brownell747d8442006-04-02 10:33:37 -0800111 * FIFO initialization parameters; from board_info.controller_data
David Brownell33e34dc2007-05-08 00:32:21 -0700112 * @modalias: Name of the driver to use with this device, or an alias
113 * for that name. This appears in the sysfs "modalias" attribute
114 * for driver coldplugging, and in uevents used for hotplugging
Andreas Larsson446411e2013-02-13 14:20:25 +0100115 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
Andreas Larsson095c3752013-01-29 15:53:41 +0100116 * when not using a GPIO line)
David Brownell8ae12a02006-01-08 13:34:19 -0800117 *
Martin Sperleca2ebc2015-06-22 13:00:36 +0000118 * @statistics: statistics for the spi_device
119 *
David Brownell33e34dc2007-05-08 00:32:21 -0700120 * A @spi_device is used to interchange data between an SPI slave
David Brownell8ae12a02006-01-08 13:34:19 -0800121 * (usually a discrete chip) and CPU memory.
122 *
David Brownell33e34dc2007-05-08 00:32:21 -0700123 * In @dev, the platform_data is used to hold information about this
David Brownell8ae12a02006-01-08 13:34:19 -0800124 * device that's meaningful to the device's protocol driver, but not
125 * to its controller. One example might be an identifier for a chip
David Brownell33e34dc2007-05-08 00:32:21 -0700126 * variant with slightly different functionality; another might be
127 * information about how this particular board wires the chip's pins.
David Brownell8ae12a02006-01-08 13:34:19 -0800128 */
129struct spi_device {
130 struct device dev;
131 struct spi_master *master;
132 u32 max_speed_hz;
133 u8 chip_select;
Trent Piepho89c1f60742013-12-13 18:27:44 -0800134 u8 bits_per_word;
wangyuhangf477b7f2013-08-11 18:15:17 +0800135 u16 mode;
David Brownellb8852442006-01-08 13:34:23 -0800136#define SPI_CPHA 0x01 /* clock phase */
137#define SPI_CPOL 0x02 /* clock polarity */
David Brownell0c868462006-01-08 13:34:25 -0800138#define SPI_MODE_0 (0|0) /* (original MicroWire) */
139#define SPI_MODE_1 (0|SPI_CPHA)
David Brownell8ae12a02006-01-08 13:34:19 -0800140#define SPI_MODE_2 (SPI_CPOL|0)
141#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
David Brownellb8852442006-01-08 13:34:23 -0800142#define SPI_CS_HIGH 0x04 /* chipselect active high? */
David Brownellccf77cc2006-04-03 15:46:22 -0700143#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
David Brownellc06e6772007-07-17 04:04:03 -0700144#define SPI_3WIRE 0x10 /* SI/SO signals shared */
Anton Vorontsov4ef7af52007-07-31 00:38:43 -0700145#define SPI_LOOP 0x20 /* loopback mode */
David Brownellb55f6272009-06-30 11:41:26 -0700146#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
147#define SPI_READY 0x80 /* slave pulls low to pause */
wangyuhangf477b7f2013-08-11 18:15:17 +0800148#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
149#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
150#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
151#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
David Brownell8ae12a02006-01-08 13:34:19 -0800152 int irq;
153 void *controller_state;
David Brownellb8852442006-01-08 13:34:23 -0800154 void *controller_data;
Anton Vorontsov75368bf2009-09-22 16:46:04 -0700155 char modalias[SPI_NAME_SIZE];
Jean-Christophe PLAGNIOL-VILLARD74317982012-11-15 20:19:57 +0100156 int cs_gpio; /* chip select gpio */
David Brownell8ae12a02006-01-08 13:34:19 -0800157
Martin Sperleca2ebc2015-06-22 13:00:36 +0000158 /* the statistics */
159 struct spi_statistics statistics;
160
David Brownell33e34dc2007-05-08 00:32:21 -0700161 /*
162 * likely need more hooks for more protocol options affecting how
163 * the controller talks to each chip, like:
164 * - memory packing (12 bit samples into low bits, others zeroed)
165 * - priority
166 * - drop chipselect after each word
167 * - chipselect delays
168 * - ...
169 */
David Brownell8ae12a02006-01-08 13:34:19 -0800170};
171
172static inline struct spi_device *to_spi_device(struct device *dev)
173{
David Brownellb8852442006-01-08 13:34:23 -0800174 return dev ? container_of(dev, struct spi_device, dev) : NULL;
David Brownell8ae12a02006-01-08 13:34:19 -0800175}
176
177/* most drivers won't need to care about device refcounting */
178static inline struct spi_device *spi_dev_get(struct spi_device *spi)
179{
180 return (spi && get_device(&spi->dev)) ? spi : NULL;
181}
182
183static inline void spi_dev_put(struct spi_device *spi)
184{
185 if (spi)
186 put_device(&spi->dev);
187}
188
189/* ctldata is for the bus_master driver's runtime state */
190static inline void *spi_get_ctldata(struct spi_device *spi)
191{
192 return spi->controller_state;
193}
194
195static inline void spi_set_ctldata(struct spi_device *spi, void *state)
196{
197 spi->controller_state = state;
198}
199
Ben Dooks9b40ff42007-02-12 00:52:41 -0800200/* device driver data */
201
202static inline void spi_set_drvdata(struct spi_device *spi, void *data)
203{
204 dev_set_drvdata(&spi->dev, data);
205}
206
207static inline void *spi_get_drvdata(struct spi_device *spi)
208{
209 return dev_get_drvdata(&spi->dev);
210}
David Brownell8ae12a02006-01-08 13:34:19 -0800211
212struct spi_message;
Mark Brownb1589352013-10-05 11:50:40 +0100213struct spi_transfer;
David Brownellb8852442006-01-08 13:34:23 -0800214
David Brownell26042882007-07-31 00:39:44 -0700215/**
216 * struct spi_driver - Host side "protocol" driver
Anton Vorontsov75368bf2009-09-22 16:46:04 -0700217 * @id_table: List of SPI devices supported by this driver
David Brownell26042882007-07-31 00:39:44 -0700218 * @probe: Binds this driver to the spi device. Drivers can verify
219 * that the device is actually present, and may need to configure
220 * characteristics (such as bits_per_word) which weren't needed for
221 * the initial configuration done during system setup.
222 * @remove: Unbinds this driver from the spi device
223 * @shutdown: Standard shutdown callback used during system state
224 * transitions such as powerdown/halt and kexec
David Brownell26042882007-07-31 00:39:44 -0700225 * @driver: SPI device drivers should initialize the name and owner
226 * field of this structure.
227 *
228 * This represents the kind of device driver that uses SPI messages to
229 * interact with the hardware at the other end of a SPI link. It's called
230 * a "protocol" driver because it works through messages rather than talking
231 * directly to SPI hardware (which is what the underlying SPI controller
232 * driver does to pass those messages). These protocols are defined in the
233 * specification for the device(s) supported by the driver.
234 *
235 * As a rule, those device protocols represent the lowest level interface
236 * supported by a driver, and it will support upper level interfaces too.
237 * Examples of such upper levels include frameworks like MTD, networking,
238 * MMC, RTC, filesystem character device nodes, and hardware monitoring.
239 */
David Brownellb8852442006-01-08 13:34:23 -0800240struct spi_driver {
Anton Vorontsov75368bf2009-09-22 16:46:04 -0700241 const struct spi_device_id *id_table;
David Brownellb8852442006-01-08 13:34:23 -0800242 int (*probe)(struct spi_device *spi);
243 int (*remove)(struct spi_device *spi);
244 void (*shutdown)(struct spi_device *spi);
David Brownellb8852442006-01-08 13:34:23 -0800245 struct device_driver driver;
246};
247
248static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
249{
250 return drv ? container_of(drv, struct spi_driver, driver) : NULL;
251}
252
Andrew F. Davisca5d2482015-10-23 08:59:10 -0500253extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
David Brownellb8852442006-01-08 13:34:23 -0800254
David Brownell33e34dc2007-05-08 00:32:21 -0700255/**
256 * spi_unregister_driver - reverse effect of spi_register_driver
257 * @sdrv: the driver to unregister
258 * Context: can sleep
259 */
David Brownellb8852442006-01-08 13:34:23 -0800260static inline void spi_unregister_driver(struct spi_driver *sdrv)
261{
Ben Dooksddc1e972007-02-12 00:52:43 -0800262 if (sdrv)
263 driver_unregister(&sdrv->driver);
David Brownellb8852442006-01-08 13:34:23 -0800264}
265
Andrew F. Davisca5d2482015-10-23 08:59:10 -0500266/* use a define to avoid include chaining to get THIS_MODULE */
267#define spi_register_driver(driver) \
268 __spi_register_driver(THIS_MODULE, driver)
269
Lars-Peter Clausen3acbb012011-11-16 10:13:37 +0100270/**
271 * module_spi_driver() - Helper macro for registering a SPI driver
272 * @__spi_driver: spi_driver struct
273 *
274 * Helper macro for SPI drivers which do not do anything special in module
275 * init/exit. This eliminates a lot of boilerplate. Each module may only
276 * use this macro once, and calling it replaces module_init() and module_exit()
277 */
278#define module_spi_driver(__spi_driver) \
279 module_driver(__spi_driver, spi_register_driver, \
280 spi_unregister_driver)
David Brownellb8852442006-01-08 13:34:23 -0800281
David Brownell8ae12a02006-01-08 13:34:19 -0800282/**
283 * struct spi_master - interface to SPI master controller
Tony Jones49dce682007-10-16 01:27:48 -0700284 * @dev: device interface to this driver
Feng Tang2b9603a2010-08-02 15:52:15 +0800285 * @list: link with the global spi_master list
David Brownell8ae12a02006-01-08 13:34:19 -0800286 * @bus_num: board-specific (and often SOC-specific) identifier for a
David Brownell747d8442006-04-02 10:33:37 -0800287 * given SPI controller.
David Brownellb8852442006-01-08 13:34:23 -0800288 * @num_chipselect: chipselects are used to distinguish individual
David Brownell747d8442006-04-02 10:33:37 -0800289 * SPI slaves, and are numbered from zero to num_chipselects.
290 * each slave has a chipselect signal, but it's common that not
291 * every chipselect is connected to a slave.
Mike Rapoportfd5e1912009-04-06 19:00:56 -0700292 * @dma_alignment: SPI controller constraint on DMA buffers alignment.
Randy Dunlapb73b2552009-09-22 16:46:00 -0700293 * @mode_bits: flags understood by this controller driver
Stephen Warren543bb252013-03-26 20:37:57 -0600294 * @bits_per_word_mask: A mask indicating which values of bits_per_word are
295 * supported by the driver. Bit n indicates that a bits_per_word n+1 is
Masanari Iidae2278672014-02-18 22:54:36 +0900296 * supported. If set, the SPI core will reject any transfer with an
Stephen Warren543bb252013-03-26 20:37:57 -0600297 * unsupported bits_per_word. If not set, this value is simply ignored,
298 * and it's up to the individual driver to perform any validation.
Mark Browna2fd4f92013-07-10 14:57:26 +0100299 * @min_speed_hz: Lowest supported transfer speed
300 * @max_speed_hz: Highest supported transfer speed
Randy Dunlapb73b2552009-09-22 16:46:00 -0700301 * @flags: other constraints relevant to this driver
Ernst Schwab5c79a5a2010-08-16 15:10:11 +0200302 * @bus_lock_spinlock: spinlock for SPI bus locking
303 * @bus_lock_mutex: mutex for SPI bus locking
304 * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
David Brownell8ae12a02006-01-08 13:34:19 -0800305 * @setup: updates the device mode and clocking records used by a
David Brownell80224562007-02-12 00:52:46 -0800306 * device's SPI controller; protocol code may call this. This
307 * must fail if an unrecognized or unsupported mode is requested.
David Brownell33e34dc2007-05-08 00:32:21 -0700308 * It's always safe to call this unless transfers are pending on
309 * the device whose settings are being modified.
David Brownell8ae12a02006-01-08 13:34:19 -0800310 * @transfer: adds a message to the controller's transfer queue.
311 * @cleanup: frees controller-specific state
Thierry Reding2c675682014-08-08 13:02:36 +0200312 * @can_dma: determine whether this master supports DMA
Linus Walleijffbbdd212012-02-22 10:05:38 +0100313 * @queued: whether this master is providing an internal message queue
314 * @kworker: thread struct for message pump
315 * @kworker_task: pointer to task for message pump kworker thread
316 * @pump_messages: work struct for scheduling work to the message pump
317 * @queue_lock: spinlock to syncronise access to message queue
318 * @queue: message queue
Mark Brown0461a412014-12-09 21:38:05 +0000319 * @idling: the device is entering idle state
Linus Walleijffbbdd212012-02-22 10:05:38 +0100320 * @cur_msg: the currently in-flight message
Mark Brown2841a5f2013-10-05 00:23:12 +0100321 * @cur_msg_prepared: spi_prepare_message was called for the currently
322 * in-flight message
Thierry Reding2c675682014-08-08 13:02:36 +0200323 * @cur_msg_mapped: message has been mapped for DMA
Masanari Iidae2278672014-02-18 22:54:36 +0900324 * @xfer_completion: used by core transfer_one_message()
Linus Walleijffbbdd212012-02-22 10:05:38 +0100325 * @busy: message pump is busy
326 * @running: message pump is running
327 * @rt: whether this queue is set to run as a realtime task
Mark Brown49834de2013-07-28 14:47:02 +0100328 * @auto_runtime_pm: the core should ensure a runtime PM reference is held
329 * while the hardware is prepared, using the parent
330 * device for the spidev
Mark Brown6ad45a22014-02-02 13:47:47 +0000331 * @max_dma_len: Maximum length of a DMA transfer for the device.
Linus Walleijffbbdd212012-02-22 10:05:38 +0100332 * @prepare_transfer_hardware: a message will soon arrive from the queue
333 * so the subsystem requests the driver to prepare the transfer hardware
334 * by issuing this call
335 * @transfer_one_message: the subsystem calls the driver to transfer a single
336 * message while queuing transfers that arrive in the meantime. When the
337 * driver is finished with this message, it must call
338 * spi_finalize_current_message() so the subsystem can issue the next
Baruch Siache9305332014-01-25 22:36:15 +0200339 * message
Randy Dunlapdbabe0d2012-04-17 17:03:50 -0700340 * @unprepare_transfer_hardware: there are currently no more messages on the
Linus Walleijffbbdd212012-02-22 10:05:38 +0100341 * queue so the subsystem notifies the driver that it may relax the
342 * hardware by issuing this call
Geert Uytterhoevenbd6857a2014-01-21 16:10:07 +0100343 * @set_cs: set the logic level of the chip select line. May be called
Mark Brownb1589352013-10-05 11:50:40 +0100344 * from interrupt context.
Mark Brown2841a5f2013-10-05 00:23:12 +0100345 * @prepare_message: set up the controller to transfer a single message,
346 * for example doing DMA mapping. Called from threaded
347 * context.
Geert Uytterhoeven05167122014-01-21 16:10:06 +0100348 * @transfer_one: transfer a single spi_transfer.
349 * - return 0 if the transfer is finished,
350 * - return 1 if the transfer is still in progress. When
351 * the driver is finished with this transfer it must
352 * call spi_finalize_current_transfer() so the subsystem
Baruch Siach6e5f5262014-01-25 22:36:13 +0200353 * can issue the next transfer. Note: transfer_one and
354 * transfer_one_message are mutually exclusive; when both
355 * are set, the generic subsystem does not call your
356 * transfer_one callback.
Geert Uytterhoevenff61eb42015-04-07 20:39:19 +0200357 * @handle_err: the subsystem calls the driver to handle an error that occurs
Andy Shevchenkob716c4f2015-02-27 17:34:15 +0200358 * in the generic implementation of transfer_one_message().
Mark Brown2841a5f2013-10-05 00:23:12 +0100359 * @unprepare_message: undo any work done by prepare_message().
Andreas Larsson095c3752013-01-29 15:53:41 +0100360 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
Andreas Larsson446411e2013-02-13 14:20:25 +0100361 * number. Any individual value may be -ENOENT for CS lines that
Andreas Larsson095c3752013-01-29 15:53:41 +0100362 * are not GPIOs (driven by the SPI controller itself).
Martin Sperleca2ebc2015-06-22 13:00:36 +0000363 * @statistics: statistics for the spi_master
Thierry Reding2c675682014-08-08 13:02:36 +0200364 * @dma_tx: DMA transmit channel
365 * @dma_rx: DMA receive channel
366 * @dummy_rx: dummy receive buffer for full-duplex devices
367 * @dummy_tx: dummy transmit buffer for full-duplex devices
David Brownell8ae12a02006-01-08 13:34:19 -0800368 *
David Brownell33e34dc2007-05-08 00:32:21 -0700369 * Each SPI master controller can communicate with one or more @spi_device
David Brownell8ae12a02006-01-08 13:34:19 -0800370 * children. These make a small bus, sharing MOSI, MISO and SCK signals
371 * but not chip select signals. Each device may be configured to use a
372 * different clock rate, since those shared signals are ignored unless
373 * the chip is selected.
374 *
375 * The driver for an SPI controller manages access to those devices through
David Brownell33e34dc2007-05-08 00:32:21 -0700376 * a queue of spi_message transactions, copying data between CPU memory and
377 * an SPI slave device. For each such message it queues, it calls the
David Brownell8ae12a02006-01-08 13:34:19 -0800378 * message's completion function when the transaction completes.
379 */
380struct spi_master {
Tony Jones49dce682007-10-16 01:27:48 -0700381 struct device dev;
David Brownell8ae12a02006-01-08 13:34:19 -0800382
Feng Tang2b9603a2010-08-02 15:52:15 +0800383 struct list_head list;
384
David Brownella020ed72006-04-03 15:49:04 -0700385 /* other than negative (== assign one dynamically), bus_num is fully
David Brownell8ae12a02006-01-08 13:34:19 -0800386 * board-specific. usually that simplifies to being SOC-specific.
David Brownella020ed72006-04-03 15:49:04 -0700387 * example: one SOC has three SPI controllers, numbered 0..2,
David Brownell8ae12a02006-01-08 13:34:19 -0800388 * and one board's schematics might show it using SPI-2. software
389 * would normally use bus_num=2 for that controller.
390 */
David Brownella020ed72006-04-03 15:49:04 -0700391 s16 bus_num;
David Brownell8ae12a02006-01-08 13:34:19 -0800392
393 /* chipselects will be integral to many controllers; some others
394 * might use board-specific GPIOs.
395 */
396 u16 num_chipselect;
397
Mike Rapoportfd5e1912009-04-06 19:00:56 -0700398 /* some SPI controllers pose alignment requirements on DMAable
399 * buffers; let protocol drivers know about these requirements.
400 */
401 u16 dma_alignment;
402
David Brownelle7db06b2009-06-17 16:26:04 -0700403 /* spi_device.mode flags understood by this controller driver */
404 u16 mode_bits;
405
Stephen Warren543bb252013-03-26 20:37:57 -0600406 /* bitmask of supported bits_per_word for transfers */
407 u32 bits_per_word_mask;
Stephen Warren2922a8d2013-05-21 20:36:34 -0600408#define SPI_BPW_MASK(bits) BIT((bits) - 1)
Stephen Warrenb6aa23c2013-08-01 16:08:57 -0600409#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1))
Stephen Warreneca89602013-05-30 09:59:40 -0600410#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1))
Stephen Warren543bb252013-03-26 20:37:57 -0600411
Mark Browna2fd4f92013-07-10 14:57:26 +0100412 /* limits on transfer speed */
413 u32 min_speed_hz;
414 u32 max_speed_hz;
415
David Brownell70d60272009-06-30 11:41:27 -0700416 /* other constraints relevant to this driver */
417 u16 flags;
418#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
David Brownell568d0692009-09-22 16:46:18 -0700419#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
420#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
Mark Brown3a2eba92014-01-28 20:17:03 +0000421#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */
422#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */
David Brownell70d60272009-06-30 11:41:27 -0700423
Ernst Schwabcf32b71e2010-06-28 17:49:29 -0700424 /* lock and mutex for SPI bus locking */
425 spinlock_t bus_lock_spinlock;
426 struct mutex bus_lock_mutex;
427
428 /* flag indicating that the SPI bus is locked for exclusive use */
429 bool bus_lock_flag;
430
David Brownell6e538aa2009-04-21 12:24:49 -0700431 /* Setup mode and clock, etc (spi driver may call many times).
432 *
433 * IMPORTANT: this may be called when transfers to another
434 * device are active. DO NOT UPDATE SHARED REGISTERS in ways
435 * which could break those transfers.
436 */
David Brownell8ae12a02006-01-08 13:34:19 -0800437 int (*setup)(struct spi_device *spi);
438
439 /* bidirectional bulk transfers
440 *
441 * + The transfer() method may not sleep; its main role is
442 * just to add the message to the queue.
443 * + For now there's no remove-from-queue operation, or
444 * any other request management
445 * + To a given spi_device, message queueing is pure fifo
446 *
447 * + The master's main job is to process its message queue,
448 * selecting a chip then transferring data
449 * + If there are multiple spi_device children, the i/o queue
450 * arbitration algorithm is unspecified (round robin, fifo,
451 * priority, reservations, preemption, etc)
452 *
453 * + Chipselect stays active during the entire message
454 * (unless modified by spi_transfer.cs_change != 0).
455 * + The message transfers use clock and SPI mode parameters
456 * previously established by setup() for this device
457 */
458 int (*transfer)(struct spi_device *spi,
459 struct spi_message *mesg);
460
461 /* called on release() to free memory provided by spi_master */
Hans-Peter Nilsson0ffa0282007-02-12 00:52:45 -0800462 void (*cleanup)(struct spi_device *spi);
Linus Walleijffbbdd212012-02-22 10:05:38 +0100463
464 /*
Mark Brown99adef32014-01-16 12:22:43 +0000465 * Used to enable core support for DMA handling, if can_dma()
466 * exists and returns true then the transfer will be mapped
467 * prior to transfer_one() being called. The driver should
468 * not modify or store xfer and dma_tx and dma_rx must be set
469 * while the device is prepared.
470 */
471 bool (*can_dma)(struct spi_master *master,
472 struct spi_device *spi,
473 struct spi_transfer *xfer);
474
475 /*
Linus Walleijffbbdd212012-02-22 10:05:38 +0100476 * These hooks are for drivers that want to use the generic
477 * master transfer queueing mechanism. If these are used, the
478 * transfer() function above must NOT be specified by the driver.
479 * Over time we expect SPI drivers to be phased over to this API.
480 */
481 bool queued;
482 struct kthread_worker kworker;
483 struct task_struct *kworker_task;
484 struct kthread_work pump_messages;
485 spinlock_t queue_lock;
486 struct list_head queue;
487 struct spi_message *cur_msg;
Mark Brown0461a412014-12-09 21:38:05 +0000488 bool idling;
Linus Walleijffbbdd212012-02-22 10:05:38 +0100489 bool busy;
490 bool running;
491 bool rt;
Mark Brown49834de2013-07-28 14:47:02 +0100492 bool auto_runtime_pm;
Mark Brown2841a5f2013-10-05 00:23:12 +0100493 bool cur_msg_prepared;
Mark Brown99adef32014-01-16 12:22:43 +0000494 bool cur_msg_mapped;
Mark Brownb1589352013-10-05 11:50:40 +0100495 struct completion xfer_completion;
Mark Brown6ad45a22014-02-02 13:47:47 +0000496 size_t max_dma_len;
Linus Walleijffbbdd212012-02-22 10:05:38 +0100497
498 int (*prepare_transfer_hardware)(struct spi_master *master);
499 int (*transfer_one_message)(struct spi_master *master,
500 struct spi_message *mesg);
501 int (*unprepare_transfer_hardware)(struct spi_master *master);
Mark Brown2841a5f2013-10-05 00:23:12 +0100502 int (*prepare_message)(struct spi_master *master,
503 struct spi_message *message);
504 int (*unprepare_message)(struct spi_master *master,
505 struct spi_message *message);
Mark Brown49834de2013-07-28 14:47:02 +0100506
Mark Brownb1589352013-10-05 11:50:40 +0100507 /*
508 * These hooks are for drivers that use a generic implementation
509 * of transfer_one_message() provied by the core.
510 */
511 void (*set_cs)(struct spi_device *spi, bool enable);
512 int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
513 struct spi_transfer *transfer);
Andy Shevchenkob716c4f2015-02-27 17:34:15 +0200514 void (*handle_err)(struct spi_master *master,
515 struct spi_message *message);
Mark Brownb1589352013-10-05 11:50:40 +0100516
Jean-Christophe PLAGNIOL-VILLARD74317982012-11-15 20:19:57 +0100517 /* gpio chip select */
518 int *cs_gpios;
Mark Brown99adef32014-01-16 12:22:43 +0000519
Martin Sperleca2ebc2015-06-22 13:00:36 +0000520 /* statistics */
521 struct spi_statistics statistics;
522
Mark Brown99adef32014-01-16 12:22:43 +0000523 /* DMA channels for use with core dmaengine helpers */
524 struct dma_chan *dma_tx;
525 struct dma_chan *dma_rx;
Mark Brown3a2eba92014-01-28 20:17:03 +0000526
527 /* dummy data for full duplex devices */
528 void *dummy_rx;
529 void *dummy_tx;
David Brownell8ae12a02006-01-08 13:34:19 -0800530};
531
David Brownell0c868462006-01-08 13:34:25 -0800532static inline void *spi_master_get_devdata(struct spi_master *master)
533{
Tony Jones49dce682007-10-16 01:27:48 -0700534 return dev_get_drvdata(&master->dev);
David Brownell0c868462006-01-08 13:34:25 -0800535}
536
537static inline void spi_master_set_devdata(struct spi_master *master, void *data)
538{
Tony Jones49dce682007-10-16 01:27:48 -0700539 dev_set_drvdata(&master->dev, data);
David Brownell0c868462006-01-08 13:34:25 -0800540}
541
542static inline struct spi_master *spi_master_get(struct spi_master *master)
543{
Tony Jones49dce682007-10-16 01:27:48 -0700544 if (!master || !get_device(&master->dev))
David Brownell0c868462006-01-08 13:34:25 -0800545 return NULL;
546 return master;
547}
548
549static inline void spi_master_put(struct spi_master *master)
550{
551 if (master)
Tony Jones49dce682007-10-16 01:27:48 -0700552 put_device(&master->dev);
David Brownell0c868462006-01-08 13:34:25 -0800553}
554
Linus Walleijffbbdd212012-02-22 10:05:38 +0100555/* PM calls that need to be issued by the driver */
556extern int spi_master_suspend(struct spi_master *master);
557extern int spi_master_resume(struct spi_master *master);
558
559/* Calls the driver make to interact with the message queue */
560extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
561extern void spi_finalize_current_message(struct spi_master *master);
Mark Brownb1589352013-10-05 11:50:40 +0100562extern void spi_finalize_current_transfer(struct spi_master *master);
David Brownell0c868462006-01-08 13:34:25 -0800563
David Brownell8ae12a02006-01-08 13:34:19 -0800564/* the spi driver core manages memory for the spi_master classdev */
565extern struct spi_master *
566spi_alloc_master(struct device *host, unsigned size);
567
568extern int spi_register_master(struct spi_master *master);
Mark Brown666d5b42013-08-31 18:50:52 +0100569extern int devm_spi_register_master(struct device *dev,
570 struct spi_master *master);
David Brownell8ae12a02006-01-08 13:34:19 -0800571extern void spi_unregister_master(struct spi_master *master);
572
573extern struct spi_master *spi_busnum_to_master(u16 busnum);
574
575/*---------------------------------------------------------------------------*/
576
577/*
578 * I/O INTERFACE between SPI controller and protocol drivers
579 *
580 * Protocol drivers use a queue of spi_messages, each transferring data
581 * between the controller and memory buffers.
582 *
583 * The spi_messages themselves consist of a series of read+write transfer
584 * segments. Those segments always read the same number of bits as they
585 * write; but one or the other is easily ignored by passing a null buffer
586 * pointer. (This is unlike most types of I/O API, because SPI hardware
587 * is full duplex.)
588 *
589 * NOTE: Allocation of spi_transfer and spi_message memory is entirely
590 * up to the protocol driver, which guarantees the integrity of both (as
591 * well as the data buffers) for as long as the message is queued.
592 */
593
594/**
595 * struct spi_transfer - a read/write buffer pair
Vitaly Wool8275c642006-01-08 13:34:28 -0800596 * @tx_buf: data to be written (dma-safe memory), or NULL
597 * @rx_buf: data to be read (dma-safe memory), or NULL
David Brownell33e34dc2007-05-08 00:32:21 -0700598 * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
599 * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
Masanari Iidae2278672014-02-18 22:54:36 +0900600 * @tx_nbits: number of bits used for writing. If 0 the default
wangyuhangf477b7f2013-08-11 18:15:17 +0800601 * (SPI_NBITS_SINGLE) is used.
602 * @rx_nbits: number of bits used for reading. If 0 the default
603 * (SPI_NBITS_SINGLE) is used.
David Brownell8ae12a02006-01-08 13:34:19 -0800604 * @len: size of rx and tx buffers (in bytes)
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200605 * @speed_hz: Select a speed other than the device default for this
David Brownell33e34dc2007-05-08 00:32:21 -0700606 * transfer. If 0 the default (from @spi_device) is used.
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200607 * @bits_per_word: select a bits_per_word other than the device default
David Brownell33e34dc2007-05-08 00:32:21 -0700608 * for this transfer. If 0 the default (from @spi_device) is used.
David Brownell8ae12a02006-01-08 13:34:19 -0800609 * @cs_change: affects chipselect after this transfer completes
610 * @delay_usecs: microseconds to delay after this transfer before
David Brownell747d8442006-04-02 10:33:37 -0800611 * (optionally) changing the chipselect status, then starting
David Brownell33e34dc2007-05-08 00:32:21 -0700612 * the next transfer or completing this @spi_message.
613 * @transfer_list: transfers are sequenced through @spi_message.transfers
Mark Brown6ad45a22014-02-02 13:47:47 +0000614 * @tx_sg: Scatterlist for transmit, currently not for client use
615 * @rx_sg: Scatterlist for receive, currently not for client use
David Brownell8ae12a02006-01-08 13:34:19 -0800616 *
617 * SPI transfers always write the same number of bytes as they read.
David Brownell33e34dc2007-05-08 00:32:21 -0700618 * Protocol drivers should always provide @rx_buf and/or @tx_buf.
David Brownell8ae12a02006-01-08 13:34:19 -0800619 * In some cases, they may also want to provide DMA addresses for
620 * the data being transferred; that may reduce overhead, when the
621 * underlying driver uses dma.
622 *
David Brownell4b1badf2006-12-29 16:48:39 -0800623 * If the transmit buffer is null, zeroes will be shifted out
David Brownell33e34dc2007-05-08 00:32:21 -0700624 * while filling @rx_buf. If the receive buffer is null, the data
Vitaly Wool8275c642006-01-08 13:34:28 -0800625 * shifted in will be discarded. Only "len" bytes shift out (or in).
626 * It's an error to try to shift out a partial word. (For example, by
627 * shifting out three bytes with word size of sixteen or twenty bits;
628 * the former uses two bytes per word, the latter uses four bytes.)
629 *
David Brownell80224562007-02-12 00:52:46 -0800630 * In-memory data values are always in native CPU byte order, translated
631 * from the wire byte order (big-endian except with SPI_LSB_FIRST). So
632 * for example when bits_per_word is sixteen, buffers are 2N bytes long
David Brownell33e34dc2007-05-08 00:32:21 -0700633 * (@len = 2N) and hold N sixteen bit words in CPU byte order.
David Brownell80224562007-02-12 00:52:46 -0800634 *
635 * When the word size of the SPI transfer is not a power-of-two multiple
636 * of eight bits, those in-memory words include extra bits. In-memory
637 * words are always seen by protocol drivers as right-justified, so the
638 * undefined (rx) or unused (tx) bits are always the most significant bits.
639 *
Vitaly Wool8275c642006-01-08 13:34:28 -0800640 * All SPI transfers start with the relevant chipselect active. Normally
641 * it stays selected until after the last transfer in a message. Drivers
David Brownell33e34dc2007-05-08 00:32:21 -0700642 * can affect the chipselect signal using cs_change.
David Brownell8ae12a02006-01-08 13:34:19 -0800643 *
644 * (i) If the transfer isn't the last one in the message, this flag is
645 * used to make the chipselect briefly go inactive in the middle of the
646 * message. Toggling chipselect in this way may be needed to terminate
647 * a chip command, letting a single spi_message perform all of group of
648 * chip transactions together.
649 *
650 * (ii) When the transfer is the last one in the message, the chip may
David Brownellf5a9c772007-06-16 10:16:08 -0700651 * stay selected until the next transfer. On multi-device SPI busses
652 * with nothing blocking messages going to other devices, this is just
653 * a performance hint; starting a message to another device deselects
654 * this one. But in other cases, this can be used to ensure correctness.
655 * Some devices need protocol transactions to be built from a series of
656 * spi_message submissions, where the content of one message is determined
657 * by the results of previous messages and where the whole transaction
658 * ends when the chipselect goes intactive.
David Brownell0c868462006-01-08 13:34:25 -0800659 *
Masanari Iidae2278672014-02-18 22:54:36 +0900660 * When SPI can transfer in 1x,2x or 4x. It can get this transfer information
wangyuhangf477b7f2013-08-11 18:15:17 +0800661 * from device through @tx_nbits and @rx_nbits. In Bi-direction, these
662 * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
663 * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
664 *
David Brownell0c868462006-01-08 13:34:25 -0800665 * The code that submits an spi_message (and its spi_transfers)
666 * to the lower layers is responsible for managing its memory.
667 * Zero-initialize every field you don't set up explicitly, to
Vitaly Wool8275c642006-01-08 13:34:28 -0800668 * insulate against future API updates. After you submit a message
669 * and its transfers, ignore them until its completion callback.
David Brownell8ae12a02006-01-08 13:34:19 -0800670 */
671struct spi_transfer {
672 /* it's ok if tx_buf == rx_buf (right?)
673 * for MicroWire, one buffer must be null
David Brownell0c868462006-01-08 13:34:25 -0800674 * buffers must work with dma_*map_single() calls, unless
675 * spi_message.is_dma_mapped reports a pre-existing mapping
David Brownell8ae12a02006-01-08 13:34:19 -0800676 */
677 const void *tx_buf;
678 void *rx_buf;
679 unsigned len;
680
681 dma_addr_t tx_dma;
682 dma_addr_t rx_dma;
Mark Brown6ad45a22014-02-02 13:47:47 +0000683 struct sg_table tx_sg;
684 struct sg_table rx_sg;
David Brownell8ae12a02006-01-08 13:34:19 -0800685
686 unsigned cs_change:1;
Mark Brownd3fbd452014-01-10 17:09:53 +0000687 unsigned tx_nbits:3;
688 unsigned rx_nbits:3;
wangyuhangf477b7f2013-08-11 18:15:17 +0800689#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
690#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
691#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
Imre Deak4cff33f2006-02-17 10:02:18 -0800692 u8 bits_per_word;
David Brownell8ae12a02006-01-08 13:34:19 -0800693 u16 delay_usecs;
Imre Deak4cff33f2006-02-17 10:02:18 -0800694 u32 speed_hz;
Vitaly Wool8275c642006-01-08 13:34:28 -0800695
696 struct list_head transfer_list;
David Brownell8ae12a02006-01-08 13:34:19 -0800697};
698
699/**
700 * struct spi_message - one multi-segment SPI transaction
Vitaly Wool8275c642006-01-08 13:34:28 -0800701 * @transfers: list of transfer segments in this transaction
David Brownell8ae12a02006-01-08 13:34:19 -0800702 * @spi: SPI device to which the transaction is queued
703 * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
704 * addresses for each transfer buffer
705 * @complete: called to report transaction completions
706 * @context: the argument to complete() when it's called
Thierry Reding2c675682014-08-08 13:02:36 +0200707 * @frame_length: the total number of bytes in the message
David Brownellb8852442006-01-08 13:34:23 -0800708 * @actual_length: the total number of bytes that were transferred in all
709 * successful segments
David Brownell8ae12a02006-01-08 13:34:19 -0800710 * @status: zero for success, else negative errno
711 * @queue: for use by whichever driver currently owns the message
712 * @state: for use by whichever driver currently owns the message
David Brownell0c868462006-01-08 13:34:25 -0800713 *
David Brownell33e34dc2007-05-08 00:32:21 -0700714 * A @spi_message is used to execute an atomic sequence of data transfers,
Vitaly Wool8275c642006-01-08 13:34:28 -0800715 * each represented by a struct spi_transfer. The sequence is "atomic"
716 * in the sense that no other spi_message may use that SPI bus until that
717 * sequence completes. On some systems, many such sequences can execute as
718 * as single programmed DMA transfer. On all systems, these messages are
719 * queued, and might complete after transactions to other devices. Messages
Marcin Bisc6331ba2015-03-01 13:49:32 +0100720 * sent to a given spi_device are always executed in FIFO order.
Vitaly Wool8275c642006-01-08 13:34:28 -0800721 *
David Brownell0c868462006-01-08 13:34:25 -0800722 * The code that submits an spi_message (and its spi_transfers)
723 * to the lower layers is responsible for managing its memory.
724 * Zero-initialize every field you don't set up explicitly, to
Vitaly Wool8275c642006-01-08 13:34:28 -0800725 * insulate against future API updates. After you submit a message
726 * and its transfers, ignore them until its completion callback.
David Brownell8ae12a02006-01-08 13:34:19 -0800727 */
728struct spi_message {
David Brownell747d8442006-04-02 10:33:37 -0800729 struct list_head transfers;
David Brownell8ae12a02006-01-08 13:34:19 -0800730
731 struct spi_device *spi;
732
733 unsigned is_dma_mapped:1;
734
735 /* REVISIT: we might want a flag affecting the behavior of the
736 * last transfer ... allowing things like "read 16 bit length L"
737 * immediately followed by "read L bytes". Basically imposing
738 * a specific message scheduling algorithm.
739 *
740 * Some controller drivers (message-at-a-time queue processing)
741 * could provide that as their default scheduling algorithm. But
David Brownellb8852442006-01-08 13:34:23 -0800742 * others (with multi-message pipelines) could need a flag to
David Brownell8ae12a02006-01-08 13:34:19 -0800743 * tell them about such special cases.
744 */
745
746 /* completion is reported through a callback */
David Brownell747d8442006-04-02 10:33:37 -0800747 void (*complete)(void *context);
David Brownell8ae12a02006-01-08 13:34:19 -0800748 void *context;
Sourav Poddar078726c2013-07-18 15:31:25 +0530749 unsigned frame_length;
David Brownell8ae12a02006-01-08 13:34:19 -0800750 unsigned actual_length;
751 int status;
752
753 /* for optional use by whatever driver currently owns the
754 * spi_message ... between calls to spi_async and then later
755 * complete(), that's the spi_master controller driver.
756 */
757 struct list_head queue;
758 void *state;
759};
760
Vitaly Wool8275c642006-01-08 13:34:28 -0800761static inline void spi_message_init(struct spi_message *m)
762{
763 memset(m, 0, sizeof *m);
764 INIT_LIST_HEAD(&m->transfers);
765}
766
767static inline void
768spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
769{
770 list_add_tail(&t->transfer_list, &m->transfers);
771}
772
773static inline void
774spi_transfer_del(struct spi_transfer *t)
775{
776 list_del(&t->transfer_list);
777}
778
Lars-Peter Clausen6d9eecd2013-01-09 17:31:00 +0000779/**
780 * spi_message_init_with_transfers - Initialize spi_message and append transfers
781 * @m: spi_message to be initialized
782 * @xfers: An array of spi transfers
783 * @num_xfers: Number of items in the xfer array
784 *
785 * This function initializes the given spi_message and adds each spi_transfer in
786 * the given array to the message.
787 */
788static inline void
789spi_message_init_with_transfers(struct spi_message *m,
790struct spi_transfer *xfers, unsigned int num_xfers)
791{
792 unsigned int i;
793
794 spi_message_init(m);
795 for (i = 0; i < num_xfers; ++i)
796 spi_message_add_tail(&xfers[i], m);
797}
798
David Brownell0c868462006-01-08 13:34:25 -0800799/* It's fine to embed message and transaction structures in other data
800 * structures so long as you don't free them while they're in use.
801 */
802
803static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
804{
805 struct spi_message *m;
806
807 m = kzalloc(sizeof(struct spi_message)
808 + ntrans * sizeof(struct spi_transfer),
809 flags);
810 if (m) {
Shubhrajyoti D8f536022012-02-27 19:29:05 +0530811 unsigned i;
Vitaly Wool8275c642006-01-08 13:34:28 -0800812 struct spi_transfer *t = (struct spi_transfer *)(m + 1);
813
814 INIT_LIST_HEAD(&m->transfers);
815 for (i = 0; i < ntrans; i++, t++)
816 spi_message_add_tail(t, m);
David Brownell0c868462006-01-08 13:34:25 -0800817 }
818 return m;
819}
820
821static inline void spi_message_free(struct spi_message *m)
822{
823 kfree(m);
824}
825
David Brownell7d077192009-06-17 16:26:03 -0700826extern int spi_setup(struct spi_device *spi);
David Brownell568d0692009-09-22 16:46:18 -0700827extern int spi_async(struct spi_device *spi, struct spi_message *message);
Ernst Schwabcf32b71e2010-06-28 17:49:29 -0700828extern int spi_async_locked(struct spi_device *spi,
829 struct spi_message *message);
David Brownell8ae12a02006-01-08 13:34:19 -0800830
831/*---------------------------------------------------------------------------*/
832
833/* All these synchronous SPI transfer routines are utilities layered
834 * over the core async transfer primitive. Here, "synchronous" means
835 * they will sleep uninterruptibly until the async transfer completes.
836 */
837
838extern int spi_sync(struct spi_device *spi, struct spi_message *message);
Ernst Schwabcf32b71e2010-06-28 17:49:29 -0700839extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
840extern int spi_bus_lock(struct spi_master *master);
841extern int spi_bus_unlock(struct spi_master *master);
David Brownell8ae12a02006-01-08 13:34:19 -0800842
843/**
844 * spi_write - SPI synchronous write
845 * @spi: device to which data will be written
846 * @buf: data buffer
847 * @len: data buffer size
David Brownell33e34dc2007-05-08 00:32:21 -0700848 * Context: can sleep
David Brownell8ae12a02006-01-08 13:34:19 -0800849 *
850 * This writes the buffer and returns zero or a negative error code.
851 * Callable only from contexts that can sleep.
852 */
853static inline int
Mark Brown0c4a1592011-05-11 00:09:30 +0200854spi_write(struct spi_device *spi, const void *buf, size_t len)
David Brownell8ae12a02006-01-08 13:34:19 -0800855{
856 struct spi_transfer t = {
857 .tx_buf = buf,
David Brownell8ae12a02006-01-08 13:34:19 -0800858 .len = len,
David Brownell8ae12a02006-01-08 13:34:19 -0800859 };
Vitaly Wool8275c642006-01-08 13:34:28 -0800860 struct spi_message m;
David Brownell8ae12a02006-01-08 13:34:19 -0800861
Vitaly Wool8275c642006-01-08 13:34:28 -0800862 spi_message_init(&m);
863 spi_message_add_tail(&t, &m);
David Brownell8ae12a02006-01-08 13:34:19 -0800864 return spi_sync(spi, &m);
865}
866
867/**
868 * spi_read - SPI synchronous read
869 * @spi: device from which data will be read
870 * @buf: data buffer
871 * @len: data buffer size
David Brownell33e34dc2007-05-08 00:32:21 -0700872 * Context: can sleep
David Brownell8ae12a02006-01-08 13:34:19 -0800873 *
David Brownell33e34dc2007-05-08 00:32:21 -0700874 * This reads the buffer and returns zero or a negative error code.
David Brownell8ae12a02006-01-08 13:34:19 -0800875 * Callable only from contexts that can sleep.
876 */
877static inline int
Mark Brown0c4a1592011-05-11 00:09:30 +0200878spi_read(struct spi_device *spi, void *buf, size_t len)
David Brownell8ae12a02006-01-08 13:34:19 -0800879{
880 struct spi_transfer t = {
David Brownell8ae12a02006-01-08 13:34:19 -0800881 .rx_buf = buf,
882 .len = len,
David Brownell8ae12a02006-01-08 13:34:19 -0800883 };
Vitaly Wool8275c642006-01-08 13:34:28 -0800884 struct spi_message m;
David Brownell8ae12a02006-01-08 13:34:19 -0800885
Vitaly Wool8275c642006-01-08 13:34:28 -0800886 spi_message_init(&m);
887 spi_message_add_tail(&t, &m);
David Brownell8ae12a02006-01-08 13:34:19 -0800888 return spi_sync(spi, &m);
889}
890
Lars-Peter Clausen6d9eecd2013-01-09 17:31:00 +0000891/**
892 * spi_sync_transfer - synchronous SPI data transfer
893 * @spi: device with which data will be exchanged
894 * @xfers: An array of spi_transfers
895 * @num_xfers: Number of items in the xfer array
896 * Context: can sleep
897 *
898 * Does a synchronous SPI data transfer of the given spi_transfer array.
899 *
900 * For more specific semantics see spi_sync().
901 *
902 * It returns zero on success, else a negative error code.
903 */
904static inline int
905spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
906 unsigned int num_xfers)
907{
908 struct spi_message msg;
909
910 spi_message_init_with_transfers(&msg, xfers, num_xfers);
911
912 return spi_sync(spi, &msg);
913}
914
David Brownell0c868462006-01-08 13:34:25 -0800915/* this copies txbuf and rxbuf data; for small transfers only! */
David Brownell8ae12a02006-01-08 13:34:19 -0800916extern int spi_write_then_read(struct spi_device *spi,
Mark Brown0c4a1592011-05-11 00:09:30 +0200917 const void *txbuf, unsigned n_tx,
918 void *rxbuf, unsigned n_rx);
David Brownell8ae12a02006-01-08 13:34:19 -0800919
920/**
921 * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
922 * @spi: device with which data will be exchanged
923 * @cmd: command to be written before data is read back
David Brownell33e34dc2007-05-08 00:32:21 -0700924 * Context: can sleep
David Brownell8ae12a02006-01-08 13:34:19 -0800925 *
926 * This returns the (unsigned) eight bit number returned by the
927 * device, or else a negative error code. Callable only from
928 * contexts that can sleep.
929 */
930static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
931{
932 ssize_t status;
933 u8 result;
934
935 status = spi_write_then_read(spi, &cmd, 1, &result, 1);
936
937 /* return negative errno or unsigned value */
938 return (status < 0) ? status : result;
939}
940
941/**
942 * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read
943 * @spi: device with which data will be exchanged
944 * @cmd: command to be written before data is read back
David Brownell33e34dc2007-05-08 00:32:21 -0700945 * Context: can sleep
David Brownell8ae12a02006-01-08 13:34:19 -0800946 *
947 * This returns the (unsigned) sixteen bit number returned by the
948 * device, or else a negative error code. Callable only from
949 * contexts that can sleep.
950 *
951 * The number is returned in wire-order, which is at least sometimes
952 * big-endian.
953 */
954static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
955{
956 ssize_t status;
957 u16 result;
958
Geert Uytterhoeven269ccca2014-01-12 13:59:06 +0100959 status = spi_write_then_read(spi, &cmd, 1, &result, 2);
David Brownell8ae12a02006-01-08 13:34:19 -0800960
961 /* return negative errno or unsigned value */
962 return (status < 0) ? status : result;
963}
964
Lars-Peter Clausen05071aa2013-09-27 16:34:27 +0200965/**
966 * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read
967 * @spi: device with which data will be exchanged
968 * @cmd: command to be written before data is read back
969 * Context: can sleep
970 *
971 * This returns the (unsigned) sixteen bit number returned by the device in cpu
972 * endianness, or else a negative error code. Callable only from contexts that
973 * can sleep.
974 *
975 * This function is similar to spi_w8r16, with the exception that it will
976 * convert the read 16 bit data word from big-endian to native endianness.
977 *
978 */
979static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
980
981{
982 ssize_t status;
983 __be16 result;
984
985 status = spi_write_then_read(spi, &cmd, 1, &result, 2);
986 if (status < 0)
987 return status;
988
989 return be16_to_cpu(result);
990}
991
David Brownell8ae12a02006-01-08 13:34:19 -0800992/*---------------------------------------------------------------------------*/
993
994/*
995 * INTERFACE between board init code and SPI infrastructure.
996 *
997 * No SPI driver ever sees these SPI device table segments, but
998 * it's how the SPI core (or adapters that get hotplugged) grows
999 * the driver model tree.
1000 *
1001 * As a rule, SPI devices can't be probed. Instead, board init code
1002 * provides a table listing the devices which are present, with enough
1003 * information to bind and set up the device's driver. There's basic
1004 * support for nonstatic configurations too; enough to handle adding
1005 * parport adapters, or microcontrollers acting as USB-to-SPI bridges.
1006 */
1007
David Brownell26042882007-07-31 00:39:44 -07001008/**
1009 * struct spi_board_info - board-specific template for a SPI device
1010 * @modalias: Initializes spi_device.modalias; identifies the driver.
1011 * @platform_data: Initializes spi_device.platform_data; the particular
1012 * data stored there is driver-specific.
1013 * @controller_data: Initializes spi_device.controller_data; some
1014 * controllers need hints about hardware setup, e.g. for DMA.
1015 * @irq: Initializes spi_device.irq; depends on how the board is wired.
1016 * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits
1017 * from the chip datasheet and board-specific signal quality issues.
1018 * @bus_num: Identifies which spi_master parents the spi_device; unused
1019 * by spi_new_device(), and otherwise depends on board wiring.
1020 * @chip_select: Initializes spi_device.chip_select; depends on how
1021 * the board is wired.
1022 * @mode: Initializes spi_device.mode; based on the chip datasheet, board
1023 * wiring (some devices support both 3WIRE and standard modes), and
1024 * possibly presence of an inverter in the chipselect path.
1025 *
1026 * When adding new SPI devices to the device tree, these structures serve
1027 * as a partial device template. They hold information which can't always
1028 * be determined by drivers. Information that probe() can establish (such
1029 * as the default transfer wordsize) is not included here.
1030 *
1031 * These structures are used in two places. Their primary role is to
1032 * be stored in tables of board-specific device descriptors, which are
1033 * declared early in board initialization and then used (much later) to
1034 * populate a controller's device tree after the that controller's driver
1035 * initializes. A secondary (and atypical) role is as a parameter to
1036 * spi_new_device() call, which happens after those controller drivers
1037 * are active in some dynamic board configuration models.
1038 */
David Brownell8ae12a02006-01-08 13:34:19 -08001039struct spi_board_info {
1040 /* the device name and module name are coupled, like platform_bus;
1041 * "modalias" is normally the driver name.
1042 *
1043 * platform_data goes to spi_device.dev.platform_data,
David Brownellb8852442006-01-08 13:34:23 -08001044 * controller_data goes to spi_device.controller_data,
David Brownell8ae12a02006-01-08 13:34:19 -08001045 * irq is copied too
1046 */
Anton Vorontsov75368bf2009-09-22 16:46:04 -07001047 char modalias[SPI_NAME_SIZE];
David Brownell8ae12a02006-01-08 13:34:19 -08001048 const void *platform_data;
David Brownellb8852442006-01-08 13:34:23 -08001049 void *controller_data;
David Brownell8ae12a02006-01-08 13:34:19 -08001050 int irq;
1051
1052 /* slower signaling on noisy or low voltage boards */
1053 u32 max_speed_hz;
1054
1055
1056 /* bus_num is board specific and matches the bus_num of some
1057 * spi_master that will probably be registered later.
1058 *
1059 * chip_select reflects how this chip is wired to that master;
1060 * it's less than num_chipselect.
1061 */
1062 u16 bus_num;
1063 u16 chip_select;
1064
David Brownell980a01c2006-06-28 07:47:15 -07001065 /* mode becomes spi_device.mode, and is essential for chips
1066 * where the default of SPI_CS_HIGH = 0 is wrong.
1067 */
wangyuhangf477b7f2013-08-11 18:15:17 +08001068 u16 mode;
David Brownell980a01c2006-06-28 07:47:15 -07001069
David Brownell8ae12a02006-01-08 13:34:19 -08001070 /* ... may need additional spi_device chip config data here.
1071 * avoid stuff protocol drivers can set; but include stuff
1072 * needed to behave without being bound to a driver:
David Brownell8ae12a02006-01-08 13:34:19 -08001073 * - quirks like clock rate mattering when not selected
1074 */
1075};
1076
1077#ifdef CONFIG_SPI
1078extern int
1079spi_register_board_info(struct spi_board_info const *info, unsigned n);
1080#else
1081/* board init code may ignore whether SPI is configured or not */
1082static inline int
1083spi_register_board_info(struct spi_board_info const *info, unsigned n)
1084 { return 0; }
1085#endif
1086
1087
1088/* If you're hotplugging an adapter with devices (parport, usb, etc)
David Brownell0c868462006-01-08 13:34:25 -08001089 * use spi_new_device() to describe each device. You can also call
1090 * spi_unregister_device() to start making that device vanish, but
1091 * normally that would be handled by spi_unregister_master().
Grant Likelydc87c982008-05-15 16:50:22 -06001092 *
1093 * You can also use spi_alloc_device() and spi_add_device() to use a two
1094 * stage registration sequence for each spi_device. This gives the caller
1095 * some more control over the spi_device structure before it is registered,
1096 * but requires that caller to initialize fields that would otherwise
1097 * be defined using the board info.
David Brownell8ae12a02006-01-08 13:34:19 -08001098 */
1099extern struct spi_device *
Grant Likelydc87c982008-05-15 16:50:22 -06001100spi_alloc_device(struct spi_master *master);
1101
1102extern int
1103spi_add_device(struct spi_device *spi);
1104
1105extern struct spi_device *
David Brownell8ae12a02006-01-08 13:34:19 -08001106spi_new_device(struct spi_master *, struct spi_board_info *);
1107
1108static inline void
1109spi_unregister_device(struct spi_device *spi)
1110{
1111 if (spi)
1112 device_unregister(&spi->dev);
1113}
1114
Anton Vorontsov75368bf2009-09-22 16:46:04 -07001115extern const struct spi_device_id *
1116spi_get_device_id(const struct spi_device *sdev);
1117
Beniamino Galvanib6713582014-11-22 16:21:39 +01001118static inline bool
1119spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer)
1120{
1121 return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers);
1122}
1123
David Brownell8ae12a02006-01-08 13:34:19 -08001124#endif /* __LINUX_SPI_H */