| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2005 David Brownell | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify | 
 | 5 |  * it under the terms of the GNU General Public License as published by | 
 | 6 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 7 |  * (at your option) any later version. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 13 |  */ | 
 | 14 |  | 
 | 15 | #ifndef __LINUX_SPI_H | 
 | 16 | #define __LINUX_SPI_H | 
 | 17 |  | 
| Randy Dunlap | 0a30c5c | 2009-01-04 12:00:47 -0800 | [diff] [blame] | 18 | #include <linux/device.h> | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 19 | #include <linux/mod_devicetable.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 21 | #include <linux/kthread.h> | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 22 | #include <linux/completion.h> | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 23 | #include <linux/scatterlist.h> | 
| Randy Dunlap | 0a30c5c | 2009-01-04 12:00:47 -0800 | [diff] [blame] | 24 |  | 
| Mark Brown | 99adef3 | 2014-01-16 12:22:43 +0000 | [diff] [blame] | 25 | struct dma_chan; | 
| Martin Sperl | eca2ebc | 2015-06-22 13:00:36 +0000 | [diff] [blame] | 26 | struct spi_master; | 
 | 27 | struct spi_transfer; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 28 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 29 | /* | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 30 |  * INTERFACES between SPI master-side drivers and SPI infrastructure. | 
 | 31 |  * (There's no SPI slave support for Linux yet...) | 
 | 32 |  */ | 
 | 33 | extern struct bus_type spi_bus_type; | 
 | 34 |  | 
 | 35 | /** | 
| Martin Sperl | eca2ebc | 2015-06-22 13:00:36 +0000 | [diff] [blame] | 36 |  * struct spi_statistics - statistics for spi transfers | 
| Geliang Tang | 0243ed4 | 2015-09-15 04:59:21 -0700 | [diff] [blame] | 37 |  * @lock:          lock protecting this structure | 
| Martin Sperl | eca2ebc | 2015-06-22 13:00:36 +0000 | [diff] [blame] | 38 |  * | 
 | 39 |  * @messages:      number of spi-messages handled | 
 | 40 |  * @transfers:     number of spi_transfers handled | 
 | 41 |  * @errors:        number of errors during spi_transfer | 
 | 42 |  * @timedout:      number of timeouts during spi_transfer | 
 | 43 |  * | 
 | 44 |  * @spi_sync:      number of times spi_sync is used | 
 | 45 |  * @spi_sync_immediate: | 
 | 46 |  *                 number of times spi_sync is executed immediately | 
 | 47 |  *                 in calling context without queuing and scheduling | 
 | 48 |  * @spi_async:     number of times spi_async is used | 
 | 49 |  * | 
 | 50 |  * @bytes:         number of bytes transferred to/from device | 
 | 51 |  * @bytes_tx:      number of bytes sent to device | 
 | 52 |  * @bytes_rx:      number of bytes received from device | 
 | 53 |  * | 
| Martin Sperl | 6b7bc06 | 2015-06-22 13:02:04 +0000 | [diff] [blame] | 54 |  * @transfer_bytes_histo: | 
 | 55 |  *                 transfer bytes histogramm | 
| Martin Sperl | eca2ebc | 2015-06-22 13:00:36 +0000 | [diff] [blame] | 56 |  */ | 
 | 57 | struct spi_statistics { | 
 | 58 | 	spinlock_t		lock; /* lock for the whole structure */ | 
 | 59 |  | 
 | 60 | 	unsigned long		messages; | 
 | 61 | 	unsigned long		transfers; | 
 | 62 | 	unsigned long		errors; | 
 | 63 | 	unsigned long		timedout; | 
 | 64 |  | 
 | 65 | 	unsigned long		spi_sync; | 
 | 66 | 	unsigned long		spi_sync_immediate; | 
 | 67 | 	unsigned long		spi_async; | 
 | 68 |  | 
 | 69 | 	unsigned long long	bytes; | 
 | 70 | 	unsigned long long	bytes_rx; | 
 | 71 | 	unsigned long long	bytes_tx; | 
 | 72 |  | 
| Martin Sperl | 6b7bc06 | 2015-06-22 13:02:04 +0000 | [diff] [blame] | 73 | #define SPI_STATISTICS_HISTO_SIZE 17 | 
 | 74 | 	unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; | 
| Martin Sperl | eca2ebc | 2015-06-22 13:00:36 +0000 | [diff] [blame] | 75 | }; | 
 | 76 |  | 
 | 77 | void spi_statistics_add_transfer_stats(struct spi_statistics *stats, | 
 | 78 | 				       struct spi_transfer *xfer, | 
 | 79 | 				       struct spi_master *master); | 
 | 80 |  | 
 | 81 | #define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count)	\ | 
 | 82 | 	do {							\ | 
 | 83 | 		unsigned long flags;				\ | 
 | 84 | 		spin_lock_irqsave(&(stats)->lock, flags);	\ | 
 | 85 | 		(stats)->field += count;			\ | 
 | 86 | 		spin_unlock_irqrestore(&(stats)->lock, flags);	\ | 
 | 87 | 	} while (0) | 
 | 88 |  | 
 | 89 | #define SPI_STATISTICS_INCREMENT_FIELD(stats, field)	\ | 
 | 90 | 	SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1) | 
 | 91 |  | 
 | 92 | /** | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 93 |  * struct spi_device - Master side proxy for an SPI slave device | 
 | 94 |  * @dev: Driver model representation of the device. | 
 | 95 |  * @master: SPI controller used with the device. | 
 | 96 |  * @max_speed_hz: Maximum clock rate to be used with this chip | 
 | 97 |  *	(on this board); may be changed by the device's driver. | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 98 |  *	The spi_transfer.speed_hz can override this for each transfer. | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 99 |  * @chip_select: Chipselect, distinguishing chips handled by @master. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 100 |  * @mode: The spi mode defines how data is clocked out and in. | 
 | 101 |  *	This may be changed by the device's driver. | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 102 |  *	The "active low" default for chipselect mode can be overridden | 
 | 103 |  *	(by specifying SPI_CS_HIGH) as can the "MSB first" default for | 
 | 104 |  *	each word in a transfer (by specifying SPI_LSB_FIRST). | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 105 |  * @bits_per_word: Data transfers involve one or more words; word sizes | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 106 |  *	like eight or 12 bits are common.  In-memory wordsizes are | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 107 |  *	powers of two bytes (e.g. 20 bit samples use 32 bits). | 
| David Brownell | ccf77cc | 2006-04-03 15:46:22 -0700 | [diff] [blame] | 108 |  *	This may be changed by the device's driver, or left at the | 
 | 109 |  *	default (0) indicating protocol words are eight bit bytes. | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 110 |  *	The spi_transfer.bits_per_word can override this for each transfer. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 111 |  * @irq: Negative, or the number passed to request_irq() to receive | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 112 |  *	interrupts from this device. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 113 |  * @controller_state: Controller's runtime state | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 114 |  * @controller_data: Board-specific definitions for controller, such as | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 115 |  *	FIFO initialization parameters; from board_info.controller_data | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 116 |  * @modalias: Name of the driver to use with this device, or an alias | 
 | 117 |  *	for that name.  This appears in the sysfs "modalias" attribute | 
 | 118 |  *	for driver coldplugging, and in uevents used for hotplugging | 
| Andreas Larsson | 446411e | 2013-02-13 14:20:25 +0100 | [diff] [blame] | 119 |  * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when | 
| Andreas Larsson | 095c375 | 2013-01-29 15:53:41 +0100 | [diff] [blame] | 120 |  *	when not using a GPIO line) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 121 |  * | 
| Martin Sperl | eca2ebc | 2015-06-22 13:00:36 +0000 | [diff] [blame] | 122 |  * @statistics: statistics for the spi_device | 
 | 123 |  * | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 124 |  * A @spi_device is used to interchange data between an SPI slave | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 125 |  * (usually a discrete chip) and CPU memory. | 
 | 126 |  * | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 127 |  * In @dev, the platform_data is used to hold information about this | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 128 |  * device that's meaningful to the device's protocol driver, but not | 
 | 129 |  * to its controller.  One example might be an identifier for a chip | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 130 |  * variant with slightly different functionality; another might be | 
 | 131 |  * information about how this particular board wires the chip's pins. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 132 |  */ | 
 | 133 | struct spi_device { | 
 | 134 | 	struct device		dev; | 
 | 135 | 	struct spi_master	*master; | 
 | 136 | 	u32			max_speed_hz; | 
 | 137 | 	u8			chip_select; | 
| Trent Piepho | 89c1f6074 | 2013-12-13 18:27:44 -0800 | [diff] [blame] | 138 | 	u8			bits_per_word; | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 139 | 	u16			mode; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 140 | #define	SPI_CPHA	0x01			/* clock phase */ | 
 | 141 | #define	SPI_CPOL	0x02			/* clock polarity */ | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 142 | #define	SPI_MODE_0	(0|0)			/* (original MicroWire) */ | 
 | 143 | #define	SPI_MODE_1	(0|SPI_CPHA) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 144 | #define	SPI_MODE_2	(SPI_CPOL|0) | 
 | 145 | #define	SPI_MODE_3	(SPI_CPOL|SPI_CPHA) | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 146 | #define	SPI_CS_HIGH	0x04			/* chipselect active high? */ | 
| David Brownell | ccf77cc | 2006-04-03 15:46:22 -0700 | [diff] [blame] | 147 | #define	SPI_LSB_FIRST	0x08			/* per-word bits-on-wire */ | 
| David Brownell | c06e677 | 2007-07-17 04:04:03 -0700 | [diff] [blame] | 148 | #define	SPI_3WIRE	0x10			/* SI/SO signals shared */ | 
| Anton Vorontsov | 4ef7af5 | 2007-07-31 00:38:43 -0700 | [diff] [blame] | 149 | #define	SPI_LOOP	0x20			/* loopback mode */ | 
| David Brownell | b55f627 | 2009-06-30 11:41:26 -0700 | [diff] [blame] | 150 | #define	SPI_NO_CS	0x40			/* 1 dev/bus, no chipselect */ | 
 | 151 | #define	SPI_READY	0x80			/* slave pulls low to pause */ | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 152 | #define	SPI_TX_DUAL	0x100			/* transmit with 2 wires */ | 
 | 153 | #define	SPI_TX_QUAD	0x200			/* transmit with 4 wires */ | 
 | 154 | #define	SPI_RX_DUAL	0x400			/* receive with 2 wires */ | 
 | 155 | #define	SPI_RX_QUAD	0x800			/* receive with 4 wires */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 156 | 	int			irq; | 
 | 157 | 	void			*controller_state; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 158 | 	void			*controller_data; | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 159 | 	char			modalias[SPI_NAME_SIZE]; | 
| Jean-Christophe PLAGNIOL-VILLARD | 7431798 | 2012-11-15 20:19:57 +0100 | [diff] [blame] | 160 | 	int			cs_gpio;	/* chip select gpio */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 161 |  | 
| Martin Sperl | eca2ebc | 2015-06-22 13:00:36 +0000 | [diff] [blame] | 162 | 	/* the statistics */ | 
 | 163 | 	struct spi_statistics	statistics; | 
 | 164 |  | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 165 | 	/* | 
 | 166 | 	 * likely need more hooks for more protocol options affecting how | 
 | 167 | 	 * the controller talks to each chip, like: | 
 | 168 | 	 *  - memory packing (12 bit samples into low bits, others zeroed) | 
 | 169 | 	 *  - priority | 
 | 170 | 	 *  - drop chipselect after each word | 
 | 171 | 	 *  - chipselect delays | 
 | 172 | 	 *  - ... | 
 | 173 | 	 */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 174 | }; | 
 | 175 |  | 
 | 176 | static inline struct spi_device *to_spi_device(struct device *dev) | 
 | 177 | { | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 178 | 	return dev ? container_of(dev, struct spi_device, dev) : NULL; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 179 | } | 
 | 180 |  | 
 | 181 | /* most drivers won't need to care about device refcounting */ | 
 | 182 | static inline struct spi_device *spi_dev_get(struct spi_device *spi) | 
 | 183 | { | 
 | 184 | 	return (spi && get_device(&spi->dev)) ? spi : NULL; | 
 | 185 | } | 
 | 186 |  | 
 | 187 | static inline void spi_dev_put(struct spi_device *spi) | 
 | 188 | { | 
 | 189 | 	if (spi) | 
 | 190 | 		put_device(&spi->dev); | 
 | 191 | } | 
 | 192 |  | 
 | 193 | /* ctldata is for the bus_master driver's runtime state */ | 
 | 194 | static inline void *spi_get_ctldata(struct spi_device *spi) | 
 | 195 | { | 
 | 196 | 	return spi->controller_state; | 
 | 197 | } | 
 | 198 |  | 
 | 199 | static inline void spi_set_ctldata(struct spi_device *spi, void *state) | 
 | 200 | { | 
 | 201 | 	spi->controller_state = state; | 
 | 202 | } | 
 | 203 |  | 
| Ben Dooks | 9b40ff4 | 2007-02-12 00:52:41 -0800 | [diff] [blame] | 204 | /* device driver data */ | 
 | 205 |  | 
 | 206 | static inline void spi_set_drvdata(struct spi_device *spi, void *data) | 
 | 207 | { | 
 | 208 | 	dev_set_drvdata(&spi->dev, data); | 
 | 209 | } | 
 | 210 |  | 
 | 211 | static inline void *spi_get_drvdata(struct spi_device *spi) | 
 | 212 | { | 
 | 213 | 	return dev_get_drvdata(&spi->dev); | 
 | 214 | } | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 215 |  | 
 | 216 | struct spi_message; | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 217 | struct spi_transfer; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 218 |  | 
| David Brownell | 2604288 | 2007-07-31 00:39:44 -0700 | [diff] [blame] | 219 | /** | 
 | 220 |  * struct spi_driver - Host side "protocol" driver | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 221 |  * @id_table: List of SPI devices supported by this driver | 
| David Brownell | 2604288 | 2007-07-31 00:39:44 -0700 | [diff] [blame] | 222 |  * @probe: Binds this driver to the spi device.  Drivers can verify | 
 | 223 |  *	that the device is actually present, and may need to configure | 
 | 224 |  *	characteristics (such as bits_per_word) which weren't needed for | 
 | 225 |  *	the initial configuration done during system setup. | 
 | 226 |  * @remove: Unbinds this driver from the spi device | 
 | 227 |  * @shutdown: Standard shutdown callback used during system state | 
 | 228 |  *	transitions such as powerdown/halt and kexec | 
| David Brownell | 2604288 | 2007-07-31 00:39:44 -0700 | [diff] [blame] | 229 |  * @driver: SPI device drivers should initialize the name and owner | 
 | 230 |  *	field of this structure. | 
 | 231 |  * | 
 | 232 |  * This represents the kind of device driver that uses SPI messages to | 
 | 233 |  * interact with the hardware at the other end of a SPI link.  It's called | 
 | 234 |  * a "protocol" driver because it works through messages rather than talking | 
 | 235 |  * directly to SPI hardware (which is what the underlying SPI controller | 
 | 236 |  * driver does to pass those messages).  These protocols are defined in the | 
 | 237 |  * specification for the device(s) supported by the driver. | 
 | 238 |  * | 
 | 239 |  * As a rule, those device protocols represent the lowest level interface | 
 | 240 |  * supported by a driver, and it will support upper level interfaces too. | 
 | 241 |  * Examples of such upper levels include frameworks like MTD, networking, | 
 | 242 |  * MMC, RTC, filesystem character device nodes, and hardware monitoring. | 
 | 243 |  */ | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 244 | struct spi_driver { | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 245 | 	const struct spi_device_id *id_table; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 246 | 	int			(*probe)(struct spi_device *spi); | 
 | 247 | 	int			(*remove)(struct spi_device *spi); | 
 | 248 | 	void			(*shutdown)(struct spi_device *spi); | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 249 | 	struct device_driver	driver; | 
 | 250 | }; | 
 | 251 |  | 
 | 252 | static inline struct spi_driver *to_spi_driver(struct device_driver *drv) | 
 | 253 | { | 
 | 254 | 	return drv ? container_of(drv, struct spi_driver, driver) : NULL; | 
 | 255 | } | 
 | 256 |  | 
| Andrew F. Davis | ca5d248 | 2015-10-23 08:59:10 -0500 | [diff] [blame] | 257 | extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv); | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 258 |  | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 259 | /** | 
 | 260 |  * spi_unregister_driver - reverse effect of spi_register_driver | 
 | 261 |  * @sdrv: the driver to unregister | 
 | 262 |  * Context: can sleep | 
 | 263 |  */ | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 264 | static inline void spi_unregister_driver(struct spi_driver *sdrv) | 
 | 265 | { | 
| Ben Dooks | ddc1e97 | 2007-02-12 00:52:43 -0800 | [diff] [blame] | 266 | 	if (sdrv) | 
 | 267 | 		driver_unregister(&sdrv->driver); | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 268 | } | 
 | 269 |  | 
| Andrew F. Davis | ca5d248 | 2015-10-23 08:59:10 -0500 | [diff] [blame] | 270 | /* use a define to avoid include chaining to get THIS_MODULE */ | 
 | 271 | #define spi_register_driver(driver) \ | 
 | 272 | 	__spi_register_driver(THIS_MODULE, driver) | 
 | 273 |  | 
| Lars-Peter Clausen | 3acbb01 | 2011-11-16 10:13:37 +0100 | [diff] [blame] | 274 | /** | 
 | 275 |  * module_spi_driver() - Helper macro for registering a SPI driver | 
 | 276 |  * @__spi_driver: spi_driver struct | 
 | 277 |  * | 
 | 278 |  * Helper macro for SPI drivers which do not do anything special in module | 
 | 279 |  * init/exit. This eliminates a lot of boilerplate. Each module may only | 
 | 280 |  * use this macro once, and calling it replaces module_init() and module_exit() | 
 | 281 |  */ | 
 | 282 | #define module_spi_driver(__spi_driver) \ | 
 | 283 | 	module_driver(__spi_driver, spi_register_driver, \ | 
 | 284 | 			spi_unregister_driver) | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 285 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 286 | /** | 
 | 287 |  * struct spi_master - interface to SPI master controller | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 288 |  * @dev: device interface to this driver | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 289 |  * @list: link with the global spi_master list | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 290 |  * @bus_num: board-specific (and often SOC-specific) identifier for a | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 291 |  *	given SPI controller. | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 292 |  * @num_chipselect: chipselects are used to distinguish individual | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 293 |  *	SPI slaves, and are numbered from zero to num_chipselects. | 
 | 294 |  *	each slave has a chipselect signal, but it's common that not | 
 | 295 |  *	every chipselect is connected to a slave. | 
| Mike Rapoport | fd5e191 | 2009-04-06 19:00:56 -0700 | [diff] [blame] | 296 |  * @dma_alignment: SPI controller constraint on DMA buffers alignment. | 
| Randy Dunlap | b73b255 | 2009-09-22 16:46:00 -0700 | [diff] [blame] | 297 |  * @mode_bits: flags understood by this controller driver | 
| Stephen Warren | 543bb25 | 2013-03-26 20:37:57 -0600 | [diff] [blame] | 298 |  * @bits_per_word_mask: A mask indicating which values of bits_per_word are | 
 | 299 |  *	supported by the driver. Bit n indicates that a bits_per_word n+1 is | 
| Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 300 |  *	supported. If set, the SPI core will reject any transfer with an | 
| Stephen Warren | 543bb25 | 2013-03-26 20:37:57 -0600 | [diff] [blame] | 301 |  *	unsupported bits_per_word. If not set, this value is simply ignored, | 
 | 302 |  *	and it's up to the individual driver to perform any validation. | 
| Mark Brown | a2fd4f9 | 2013-07-10 14:57:26 +0100 | [diff] [blame] | 303 |  * @min_speed_hz: Lowest supported transfer speed | 
 | 304 |  * @max_speed_hz: Highest supported transfer speed | 
| Randy Dunlap | b73b255 | 2009-09-22 16:46:00 -0700 | [diff] [blame] | 305 |  * @flags: other constraints relevant to this driver | 
| Ernst Schwab | 5c79a5a | 2010-08-16 15:10:11 +0200 | [diff] [blame] | 306 |  * @bus_lock_spinlock: spinlock for SPI bus locking | 
 | 307 |  * @bus_lock_mutex: mutex for SPI bus locking | 
 | 308 |  * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 309 |  * @setup: updates the device mode and clocking records used by a | 
| David Brownell | 8022456 | 2007-02-12 00:52:46 -0800 | [diff] [blame] | 310 |  *	device's SPI controller; protocol code may call this.  This | 
 | 311 |  *	must fail if an unrecognized or unsupported mode is requested. | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 312 |  *	It's always safe to call this unless transfers are pending on | 
 | 313 |  *	the device whose settings are being modified. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 314 |  * @transfer: adds a message to the controller's transfer queue. | 
 | 315 |  * @cleanup: frees controller-specific state | 
| Thierry Reding | 2c67568 | 2014-08-08 13:02:36 +0200 | [diff] [blame] | 316 |  * @can_dma: determine whether this master supports DMA | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 317 |  * @queued: whether this master is providing an internal message queue | 
 | 318 |  * @kworker: thread struct for message pump | 
 | 319 |  * @kworker_task: pointer to task for message pump kworker thread | 
 | 320 |  * @pump_messages: work struct for scheduling work to the message pump | 
 | 321 |  * @queue_lock: spinlock to syncronise access to message queue | 
 | 322 |  * @queue: message queue | 
| Mark Brown | 0461a41 | 2014-12-09 21:38:05 +0000 | [diff] [blame] | 323 |  * @idling: the device is entering idle state | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 324 |  * @cur_msg: the currently in-flight message | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 325 |  * @cur_msg_prepared: spi_prepare_message was called for the currently | 
 | 326 |  *                    in-flight message | 
| Thierry Reding | 2c67568 | 2014-08-08 13:02:36 +0200 | [diff] [blame] | 327 |  * @cur_msg_mapped: message has been mapped for DMA | 
| Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 328 |  * @xfer_completion: used by core transfer_one_message() | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 329 |  * @busy: message pump is busy | 
 | 330 |  * @running: message pump is running | 
 | 331 |  * @rt: whether this queue is set to run as a realtime task | 
| Mark Brown | 49834de | 2013-07-28 14:47:02 +0100 | [diff] [blame] | 332 |  * @auto_runtime_pm: the core should ensure a runtime PM reference is held | 
 | 333 |  *                   while the hardware is prepared, using the parent | 
 | 334 |  *                   device for the spidev | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 335 |  * @max_dma_len: Maximum length of a DMA transfer for the device. | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 336 |  * @prepare_transfer_hardware: a message will soon arrive from the queue | 
 | 337 |  *	so the subsystem requests the driver to prepare the transfer hardware | 
 | 338 |  *	by issuing this call | 
 | 339 |  * @transfer_one_message: the subsystem calls the driver to transfer a single | 
 | 340 |  *	message while queuing transfers that arrive in the meantime. When the | 
 | 341 |  *	driver is finished with this message, it must call | 
 | 342 |  *	spi_finalize_current_message() so the subsystem can issue the next | 
| Baruch Siach | e930533 | 2014-01-25 22:36:15 +0200 | [diff] [blame] | 343 |  *	message | 
| Randy Dunlap | dbabe0d | 2012-04-17 17:03:50 -0700 | [diff] [blame] | 344 |  * @unprepare_transfer_hardware: there are currently no more messages on the | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 345 |  *	queue so the subsystem notifies the driver that it may relax the | 
 | 346 |  *	hardware by issuing this call | 
| Geert Uytterhoeven | bd6857a | 2014-01-21 16:10:07 +0100 | [diff] [blame] | 347 |  * @set_cs: set the logic level of the chip select line.  May be called | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 348 |  *          from interrupt context. | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 349 |  * @prepare_message: set up the controller to transfer a single message, | 
 | 350 |  *                   for example doing DMA mapping.  Called from threaded | 
 | 351 |  *                   context. | 
| Geert Uytterhoeven | 0516712 | 2014-01-21 16:10:06 +0100 | [diff] [blame] | 352 |  * @transfer_one: transfer a single spi_transfer. | 
 | 353 |  *                  - return 0 if the transfer is finished, | 
 | 354 |  *                  - return 1 if the transfer is still in progress. When | 
 | 355 |  *                    the driver is finished with this transfer it must | 
 | 356 |  *                    call spi_finalize_current_transfer() so the subsystem | 
| Baruch Siach | 6e5f526 | 2014-01-25 22:36:13 +0200 | [diff] [blame] | 357 |  *                    can issue the next transfer. Note: transfer_one and | 
 | 358 |  *                    transfer_one_message are mutually exclusive; when both | 
 | 359 |  *                    are set, the generic subsystem does not call your | 
 | 360 |  *                    transfer_one callback. | 
| Geert Uytterhoeven | ff61eb4 | 2015-04-07 20:39:19 +0200 | [diff] [blame] | 361 |  * @handle_err: the subsystem calls the driver to handle an error that occurs | 
| Andy Shevchenko | b716c4f | 2015-02-27 17:34:15 +0200 | [diff] [blame] | 362 |  *		in the generic implementation of transfer_one_message(). | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 363 |  * @unprepare_message: undo any work done by prepare_message(). | 
| Andreas Larsson | 095c375 | 2013-01-29 15:53:41 +0100 | [diff] [blame] | 364 |  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS | 
| Andreas Larsson | 446411e | 2013-02-13 14:20:25 +0100 | [diff] [blame] | 365 |  *	number. Any individual value may be -ENOENT for CS lines that | 
| Andreas Larsson | 095c375 | 2013-01-29 15:53:41 +0100 | [diff] [blame] | 366 |  *	are not GPIOs (driven by the SPI controller itself). | 
| Martin Sperl | eca2ebc | 2015-06-22 13:00:36 +0000 | [diff] [blame] | 367 |  * @statistics: statistics for the spi_master | 
| Thierry Reding | 2c67568 | 2014-08-08 13:02:36 +0200 | [diff] [blame] | 368 |  * @dma_tx: DMA transmit channel | 
 | 369 |  * @dma_rx: DMA receive channel | 
 | 370 |  * @dummy_rx: dummy receive buffer for full-duplex devices | 
 | 371 |  * @dummy_tx: dummy transmit buffer for full-duplex devices | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 372 |  * | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 373 |  * Each SPI master controller can communicate with one or more @spi_device | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 374 |  * children.  These make a small bus, sharing MOSI, MISO and SCK signals | 
 | 375 |  * but not chip select signals.  Each device may be configured to use a | 
 | 376 |  * different clock rate, since those shared signals are ignored unless | 
 | 377 |  * the chip is selected. | 
 | 378 |  * | 
 | 379 |  * The driver for an SPI controller manages access to those devices through | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 380 |  * a queue of spi_message transactions, copying data between CPU memory and | 
 | 381 |  * an SPI slave device.  For each such message it queues, it calls the | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 382 |  * message's completion function when the transaction completes. | 
 | 383 |  */ | 
 | 384 | struct spi_master { | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 385 | 	struct device	dev; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 386 |  | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 387 | 	struct list_head list; | 
 | 388 |  | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 389 | 	/* other than negative (== assign one dynamically), bus_num is fully | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 390 | 	 * board-specific.  usually that simplifies to being SOC-specific. | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 391 | 	 * example:  one SOC has three SPI controllers, numbered 0..2, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 392 | 	 * and one board's schematics might show it using SPI-2.  software | 
 | 393 | 	 * would normally use bus_num=2 for that controller. | 
 | 394 | 	 */ | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 395 | 	s16			bus_num; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 396 |  | 
 | 397 | 	/* chipselects will be integral to many controllers; some others | 
 | 398 | 	 * might use board-specific GPIOs. | 
 | 399 | 	 */ | 
 | 400 | 	u16			num_chipselect; | 
 | 401 |  | 
| Mike Rapoport | fd5e191 | 2009-04-06 19:00:56 -0700 | [diff] [blame] | 402 | 	/* some SPI controllers pose alignment requirements on DMAable | 
 | 403 | 	 * buffers; let protocol drivers know about these requirements. | 
 | 404 | 	 */ | 
 | 405 | 	u16			dma_alignment; | 
 | 406 |  | 
| David Brownell | e7db06b | 2009-06-17 16:26:04 -0700 | [diff] [blame] | 407 | 	/* spi_device.mode flags understood by this controller driver */ | 
 | 408 | 	u16			mode_bits; | 
 | 409 |  | 
| Stephen Warren | 543bb25 | 2013-03-26 20:37:57 -0600 | [diff] [blame] | 410 | 	/* bitmask of supported bits_per_word for transfers */ | 
 | 411 | 	u32			bits_per_word_mask; | 
| Stephen Warren | 2922a8d | 2013-05-21 20:36:34 -0600 | [diff] [blame] | 412 | #define SPI_BPW_MASK(bits) BIT((bits) - 1) | 
| Stephen Warren | b6aa23c | 2013-08-01 16:08:57 -0600 | [diff] [blame] | 413 | #define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1)) | 
| Stephen Warren | eca8960 | 2013-05-30 09:59:40 -0600 | [diff] [blame] | 414 | #define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1)) | 
| Stephen Warren | 543bb25 | 2013-03-26 20:37:57 -0600 | [diff] [blame] | 415 |  | 
| Mark Brown | a2fd4f9 | 2013-07-10 14:57:26 +0100 | [diff] [blame] | 416 | 	/* limits on transfer speed */ | 
 | 417 | 	u32			min_speed_hz; | 
 | 418 | 	u32			max_speed_hz; | 
 | 419 |  | 
| David Brownell | 70d6027 | 2009-06-30 11:41:27 -0700 | [diff] [blame] | 420 | 	/* other constraints relevant to this driver */ | 
 | 421 | 	u16			flags; | 
 | 422 | #define SPI_MASTER_HALF_DUPLEX	BIT(0)		/* can't do full duplex */ | 
| David Brownell | 568d069 | 2009-09-22 16:46:18 -0700 | [diff] [blame] | 423 | #define SPI_MASTER_NO_RX	BIT(1)		/* can't do buffer read */ | 
 | 424 | #define SPI_MASTER_NO_TX	BIT(2)		/* can't do buffer write */ | 
| Mark Brown | 3a2eba9 | 2014-01-28 20:17:03 +0000 | [diff] [blame] | 425 | #define SPI_MASTER_MUST_RX      BIT(3)		/* requires rx */ | 
 | 426 | #define SPI_MASTER_MUST_TX      BIT(4)		/* requires tx */ | 
| David Brownell | 70d6027 | 2009-06-30 11:41:27 -0700 | [diff] [blame] | 427 |  | 
| Michal Suchanek | 4acad4a | 2015-12-02 10:38:21 +0000 | [diff] [blame^] | 428 | 	/* | 
 | 429 | 	 * on some hardware transfer size may be constrained | 
 | 430 | 	 * the limit may depend on device transfer settings | 
 | 431 | 	 */ | 
 | 432 | 	size_t (*max_transfer_size)(struct spi_device *spi); | 
 | 433 |  | 
| Ernst Schwab | cf32b71e | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 434 | 	/* lock and mutex for SPI bus locking */ | 
 | 435 | 	spinlock_t		bus_lock_spinlock; | 
 | 436 | 	struct mutex		bus_lock_mutex; | 
 | 437 |  | 
 | 438 | 	/* flag indicating that the SPI bus is locked for exclusive use */ | 
 | 439 | 	bool			bus_lock_flag; | 
 | 440 |  | 
| David Brownell | 6e538aa | 2009-04-21 12:24:49 -0700 | [diff] [blame] | 441 | 	/* Setup mode and clock, etc (spi driver may call many times). | 
 | 442 | 	 * | 
 | 443 | 	 * IMPORTANT:  this may be called when transfers to another | 
 | 444 | 	 * device are active.  DO NOT UPDATE SHARED REGISTERS in ways | 
 | 445 | 	 * which could break those transfers. | 
 | 446 | 	 */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 447 | 	int			(*setup)(struct spi_device *spi); | 
 | 448 |  | 
 | 449 | 	/* bidirectional bulk transfers | 
 | 450 | 	 * | 
 | 451 | 	 * + The transfer() method may not sleep; its main role is | 
 | 452 | 	 *   just to add the message to the queue. | 
 | 453 | 	 * + For now there's no remove-from-queue operation, or | 
 | 454 | 	 *   any other request management | 
 | 455 | 	 * + To a given spi_device, message queueing is pure fifo | 
 | 456 | 	 * | 
 | 457 | 	 * + The master's main job is to process its message queue, | 
 | 458 | 	 *   selecting a chip then transferring data | 
 | 459 | 	 * + If there are multiple spi_device children, the i/o queue | 
 | 460 | 	 *   arbitration algorithm is unspecified (round robin, fifo, | 
 | 461 | 	 *   priority, reservations, preemption, etc) | 
 | 462 | 	 * | 
 | 463 | 	 * + Chipselect stays active during the entire message | 
 | 464 | 	 *   (unless modified by spi_transfer.cs_change != 0). | 
 | 465 | 	 * + The message transfers use clock and SPI mode parameters | 
 | 466 | 	 *   previously established by setup() for this device | 
 | 467 | 	 */ | 
 | 468 | 	int			(*transfer)(struct spi_device *spi, | 
 | 469 | 						struct spi_message *mesg); | 
 | 470 |  | 
 | 471 | 	/* called on release() to free memory provided by spi_master */ | 
| Hans-Peter Nilsson | 0ffa028 | 2007-02-12 00:52:45 -0800 | [diff] [blame] | 472 | 	void			(*cleanup)(struct spi_device *spi); | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 473 |  | 
 | 474 | 	/* | 
| Mark Brown | 99adef3 | 2014-01-16 12:22:43 +0000 | [diff] [blame] | 475 | 	 * Used to enable core support for DMA handling, if can_dma() | 
 | 476 | 	 * exists and returns true then the transfer will be mapped | 
 | 477 | 	 * prior to transfer_one() being called.  The driver should | 
 | 478 | 	 * not modify or store xfer and dma_tx and dma_rx must be set | 
 | 479 | 	 * while the device is prepared. | 
 | 480 | 	 */ | 
 | 481 | 	bool			(*can_dma)(struct spi_master *master, | 
 | 482 | 					   struct spi_device *spi, | 
 | 483 | 					   struct spi_transfer *xfer); | 
 | 484 |  | 
 | 485 | 	/* | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 486 | 	 * These hooks are for drivers that want to use the generic | 
 | 487 | 	 * master transfer queueing mechanism. If these are used, the | 
 | 488 | 	 * transfer() function above must NOT be specified by the driver. | 
 | 489 | 	 * Over time we expect SPI drivers to be phased over to this API. | 
 | 490 | 	 */ | 
 | 491 | 	bool				queued; | 
 | 492 | 	struct kthread_worker		kworker; | 
 | 493 | 	struct task_struct		*kworker_task; | 
 | 494 | 	struct kthread_work		pump_messages; | 
 | 495 | 	spinlock_t			queue_lock; | 
 | 496 | 	struct list_head		queue; | 
 | 497 | 	struct spi_message		*cur_msg; | 
| Mark Brown | 0461a41 | 2014-12-09 21:38:05 +0000 | [diff] [blame] | 498 | 	bool				idling; | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 499 | 	bool				busy; | 
 | 500 | 	bool				running; | 
 | 501 | 	bool				rt; | 
| Mark Brown | 49834de | 2013-07-28 14:47:02 +0100 | [diff] [blame] | 502 | 	bool				auto_runtime_pm; | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 503 | 	bool                            cur_msg_prepared; | 
| Mark Brown | 99adef3 | 2014-01-16 12:22:43 +0000 | [diff] [blame] | 504 | 	bool				cur_msg_mapped; | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 505 | 	struct completion               xfer_completion; | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 506 | 	size_t				max_dma_len; | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 507 |  | 
 | 508 | 	int (*prepare_transfer_hardware)(struct spi_master *master); | 
 | 509 | 	int (*transfer_one_message)(struct spi_master *master, | 
 | 510 | 				    struct spi_message *mesg); | 
 | 511 | 	int (*unprepare_transfer_hardware)(struct spi_master *master); | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 512 | 	int (*prepare_message)(struct spi_master *master, | 
 | 513 | 			       struct spi_message *message); | 
 | 514 | 	int (*unprepare_message)(struct spi_master *master, | 
 | 515 | 				 struct spi_message *message); | 
| Mark Brown | 49834de | 2013-07-28 14:47:02 +0100 | [diff] [blame] | 516 |  | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 517 | 	/* | 
 | 518 | 	 * These hooks are for drivers that use a generic implementation | 
 | 519 | 	 * of transfer_one_message() provied by the core. | 
 | 520 | 	 */ | 
 | 521 | 	void (*set_cs)(struct spi_device *spi, bool enable); | 
 | 522 | 	int (*transfer_one)(struct spi_master *master, struct spi_device *spi, | 
 | 523 | 			    struct spi_transfer *transfer); | 
| Andy Shevchenko | b716c4f | 2015-02-27 17:34:15 +0200 | [diff] [blame] | 524 | 	void (*handle_err)(struct spi_master *master, | 
 | 525 | 			   struct spi_message *message); | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 526 |  | 
| Jean-Christophe PLAGNIOL-VILLARD | 7431798 | 2012-11-15 20:19:57 +0100 | [diff] [blame] | 527 | 	/* gpio chip select */ | 
 | 528 | 	int			*cs_gpios; | 
| Mark Brown | 99adef3 | 2014-01-16 12:22:43 +0000 | [diff] [blame] | 529 |  | 
| Martin Sperl | eca2ebc | 2015-06-22 13:00:36 +0000 | [diff] [blame] | 530 | 	/* statistics */ | 
 | 531 | 	struct spi_statistics	statistics; | 
 | 532 |  | 
| Mark Brown | 99adef3 | 2014-01-16 12:22:43 +0000 | [diff] [blame] | 533 | 	/* DMA channels for use with core dmaengine helpers */ | 
 | 534 | 	struct dma_chan		*dma_tx; | 
 | 535 | 	struct dma_chan		*dma_rx; | 
| Mark Brown | 3a2eba9 | 2014-01-28 20:17:03 +0000 | [diff] [blame] | 536 |  | 
 | 537 | 	/* dummy data for full duplex devices */ | 
 | 538 | 	void			*dummy_rx; | 
 | 539 | 	void			*dummy_tx; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 540 | }; | 
 | 541 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 542 | static inline void *spi_master_get_devdata(struct spi_master *master) | 
 | 543 | { | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 544 | 	return dev_get_drvdata(&master->dev); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 545 | } | 
 | 546 |  | 
 | 547 | static inline void spi_master_set_devdata(struct spi_master *master, void *data) | 
 | 548 | { | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 549 | 	dev_set_drvdata(&master->dev, data); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 550 | } | 
 | 551 |  | 
 | 552 | static inline struct spi_master *spi_master_get(struct spi_master *master) | 
 | 553 | { | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 554 | 	if (!master || !get_device(&master->dev)) | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 555 | 		return NULL; | 
 | 556 | 	return master; | 
 | 557 | } | 
 | 558 |  | 
 | 559 | static inline void spi_master_put(struct spi_master *master) | 
 | 560 | { | 
 | 561 | 	if (master) | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 562 | 		put_device(&master->dev); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 563 | } | 
 | 564 |  | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 565 | /* PM calls that need to be issued by the driver */ | 
 | 566 | extern int spi_master_suspend(struct spi_master *master); | 
 | 567 | extern int spi_master_resume(struct spi_master *master); | 
 | 568 |  | 
 | 569 | /* Calls the driver make to interact with the message queue */ | 
 | 570 | extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); | 
 | 571 | extern void spi_finalize_current_message(struct spi_master *master); | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 572 | extern void spi_finalize_current_transfer(struct spi_master *master); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 573 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 574 | /* the spi driver core manages memory for the spi_master classdev */ | 
 | 575 | extern struct spi_master * | 
 | 576 | spi_alloc_master(struct device *host, unsigned size); | 
 | 577 |  | 
 | 578 | extern int spi_register_master(struct spi_master *master); | 
| Mark Brown | 666d5b4 | 2013-08-31 18:50:52 +0100 | [diff] [blame] | 579 | extern int devm_spi_register_master(struct device *dev, | 
 | 580 | 				    struct spi_master *master); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 581 | extern void spi_unregister_master(struct spi_master *master); | 
 | 582 |  | 
 | 583 | extern struct spi_master *spi_busnum_to_master(u16 busnum); | 
 | 584 |  | 
 | 585 | /*---------------------------------------------------------------------------*/ | 
 | 586 |  | 
 | 587 | /* | 
 | 588 |  * I/O INTERFACE between SPI controller and protocol drivers | 
 | 589 |  * | 
 | 590 |  * Protocol drivers use a queue of spi_messages, each transferring data | 
 | 591 |  * between the controller and memory buffers. | 
 | 592 |  * | 
 | 593 |  * The spi_messages themselves consist of a series of read+write transfer | 
 | 594 |  * segments.  Those segments always read the same number of bits as they | 
 | 595 |  * write; but one or the other is easily ignored by passing a null buffer | 
 | 596 |  * pointer.  (This is unlike most types of I/O API, because SPI hardware | 
 | 597 |  * is full duplex.) | 
 | 598 |  * | 
 | 599 |  * NOTE:  Allocation of spi_transfer and spi_message memory is entirely | 
 | 600 |  * up to the protocol driver, which guarantees the integrity of both (as | 
 | 601 |  * well as the data buffers) for as long as the message is queued. | 
 | 602 |  */ | 
 | 603 |  | 
 | 604 | /** | 
 | 605 |  * struct spi_transfer - a read/write buffer pair | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 606 |  * @tx_buf: data to be written (dma-safe memory), or NULL | 
 | 607 |  * @rx_buf: data to be read (dma-safe memory), or NULL | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 608 |  * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped | 
 | 609 |  * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped | 
| Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 610 |  * @tx_nbits: number of bits used for writing. If 0 the default | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 611 |  *      (SPI_NBITS_SINGLE) is used. | 
 | 612 |  * @rx_nbits: number of bits used for reading. If 0 the default | 
 | 613 |  *      (SPI_NBITS_SINGLE) is used. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 614 |  * @len: size of rx and tx buffers (in bytes) | 
| Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 615 |  * @speed_hz: Select a speed other than the device default for this | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 616 |  *      transfer. If 0 the default (from @spi_device) is used. | 
| Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 617 |  * @bits_per_word: select a bits_per_word other than the device default | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 618 |  *      for this transfer. If 0 the default (from @spi_device) is used. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 619 |  * @cs_change: affects chipselect after this transfer completes | 
 | 620 |  * @delay_usecs: microseconds to delay after this transfer before | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 621 |  *	(optionally) changing the chipselect status, then starting | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 622 |  *	the next transfer or completing this @spi_message. | 
 | 623 |  * @transfer_list: transfers are sequenced through @spi_message.transfers | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 624 |  * @tx_sg: Scatterlist for transmit, currently not for client use | 
 | 625 |  * @rx_sg: Scatterlist for receive, currently not for client use | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 626 |  * | 
 | 627 |  * SPI transfers always write the same number of bytes as they read. | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 628 |  * Protocol drivers should always provide @rx_buf and/or @tx_buf. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 629 |  * In some cases, they may also want to provide DMA addresses for | 
 | 630 |  * the data being transferred; that may reduce overhead, when the | 
 | 631 |  * underlying driver uses dma. | 
 | 632 |  * | 
| David Brownell | 4b1badf | 2006-12-29 16:48:39 -0800 | [diff] [blame] | 633 |  * If the transmit buffer is null, zeroes will be shifted out | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 634 |  * while filling @rx_buf.  If the receive buffer is null, the data | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 635 |  * shifted in will be discarded.  Only "len" bytes shift out (or in). | 
 | 636 |  * It's an error to try to shift out a partial word.  (For example, by | 
 | 637 |  * shifting out three bytes with word size of sixteen or twenty bits; | 
 | 638 |  * the former uses two bytes per word, the latter uses four bytes.) | 
 | 639 |  * | 
| David Brownell | 8022456 | 2007-02-12 00:52:46 -0800 | [diff] [blame] | 640 |  * In-memory data values are always in native CPU byte order, translated | 
 | 641 |  * from the wire byte order (big-endian except with SPI_LSB_FIRST).  So | 
 | 642 |  * for example when bits_per_word is sixteen, buffers are 2N bytes long | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 643 |  * (@len = 2N) and hold N sixteen bit words in CPU byte order. | 
| David Brownell | 8022456 | 2007-02-12 00:52:46 -0800 | [diff] [blame] | 644 |  * | 
 | 645 |  * When the word size of the SPI transfer is not a power-of-two multiple | 
 | 646 |  * of eight bits, those in-memory words include extra bits.  In-memory | 
 | 647 |  * words are always seen by protocol drivers as right-justified, so the | 
 | 648 |  * undefined (rx) or unused (tx) bits are always the most significant bits. | 
 | 649 |  * | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 650 |  * All SPI transfers start with the relevant chipselect active.  Normally | 
 | 651 |  * it stays selected until after the last transfer in a message.  Drivers | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 652 |  * can affect the chipselect signal using cs_change. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 653 |  * | 
 | 654 |  * (i) If the transfer isn't the last one in the message, this flag is | 
 | 655 |  * used to make the chipselect briefly go inactive in the middle of the | 
 | 656 |  * message.  Toggling chipselect in this way may be needed to terminate | 
 | 657 |  * a chip command, letting a single spi_message perform all of group of | 
 | 658 |  * chip transactions together. | 
 | 659 |  * | 
 | 660 |  * (ii) When the transfer is the last one in the message, the chip may | 
| David Brownell | f5a9c77 | 2007-06-16 10:16:08 -0700 | [diff] [blame] | 661 |  * stay selected until the next transfer.  On multi-device SPI busses | 
 | 662 |  * with nothing blocking messages going to other devices, this is just | 
 | 663 |  * a performance hint; starting a message to another device deselects | 
 | 664 |  * this one.  But in other cases, this can be used to ensure correctness. | 
 | 665 |  * Some devices need protocol transactions to be built from a series of | 
 | 666 |  * spi_message submissions, where the content of one message is determined | 
 | 667 |  * by the results of previous messages and where the whole transaction | 
 | 668 |  * ends when the chipselect goes intactive. | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 669 |  * | 
| Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 670 |  * When SPI can transfer in 1x,2x or 4x. It can get this transfer information | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 671 |  * from device through @tx_nbits and @rx_nbits. In Bi-direction, these | 
 | 672 |  * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x) | 
 | 673 |  * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer. | 
 | 674 |  * | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 675 |  * The code that submits an spi_message (and its spi_transfers) | 
 | 676 |  * to the lower layers is responsible for managing its memory. | 
 | 677 |  * Zero-initialize every field you don't set up explicitly, to | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 678 |  * insulate against future API updates.  After you submit a message | 
 | 679 |  * and its transfers, ignore them until its completion callback. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 680 |  */ | 
 | 681 | struct spi_transfer { | 
 | 682 | 	/* it's ok if tx_buf == rx_buf (right?) | 
 | 683 | 	 * for MicroWire, one buffer must be null | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 684 | 	 * buffers must work with dma_*map_single() calls, unless | 
 | 685 | 	 *   spi_message.is_dma_mapped reports a pre-existing mapping | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 686 | 	 */ | 
 | 687 | 	const void	*tx_buf; | 
 | 688 | 	void		*rx_buf; | 
 | 689 | 	unsigned	len; | 
 | 690 |  | 
 | 691 | 	dma_addr_t	tx_dma; | 
 | 692 | 	dma_addr_t	rx_dma; | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 693 | 	struct sg_table tx_sg; | 
 | 694 | 	struct sg_table rx_sg; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 695 |  | 
 | 696 | 	unsigned	cs_change:1; | 
| Mark Brown | d3fbd45 | 2014-01-10 17:09:53 +0000 | [diff] [blame] | 697 | 	unsigned	tx_nbits:3; | 
 | 698 | 	unsigned	rx_nbits:3; | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 699 | #define	SPI_NBITS_SINGLE	0x01 /* 1bit transfer */ | 
 | 700 | #define	SPI_NBITS_DUAL		0x02 /* 2bits transfer */ | 
 | 701 | #define	SPI_NBITS_QUAD		0x04 /* 4bits transfer */ | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 702 | 	u8		bits_per_word; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 703 | 	u16		delay_usecs; | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 704 | 	u32		speed_hz; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 705 |  | 
 | 706 | 	struct list_head transfer_list; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 707 | }; | 
 | 708 |  | 
 | 709 | /** | 
 | 710 |  * struct spi_message - one multi-segment SPI transaction | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 711 |  * @transfers: list of transfer segments in this transaction | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 712 |  * @spi: SPI device to which the transaction is queued | 
 | 713 |  * @is_dma_mapped: if true, the caller provided both dma and cpu virtual | 
 | 714 |  *	addresses for each transfer buffer | 
 | 715 |  * @complete: called to report transaction completions | 
 | 716 |  * @context: the argument to complete() when it's called | 
| Thierry Reding | 2c67568 | 2014-08-08 13:02:36 +0200 | [diff] [blame] | 717 |  * @frame_length: the total number of bytes in the message | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 718 |  * @actual_length: the total number of bytes that were transferred in all | 
 | 719 |  *	successful segments | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 720 |  * @status: zero for success, else negative errno | 
 | 721 |  * @queue: for use by whichever driver currently owns the message | 
 | 722 |  * @state: for use by whichever driver currently owns the message | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 723 |  * | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 724 |  * A @spi_message is used to execute an atomic sequence of data transfers, | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 725 |  * each represented by a struct spi_transfer.  The sequence is "atomic" | 
 | 726 |  * in the sense that no other spi_message may use that SPI bus until that | 
 | 727 |  * sequence completes.  On some systems, many such sequences can execute as | 
 | 728 |  * as single programmed DMA transfer.  On all systems, these messages are | 
 | 729 |  * queued, and might complete after transactions to other devices.  Messages | 
| Marcin Bis | c6331ba | 2015-03-01 13:49:32 +0100 | [diff] [blame] | 730 |  * sent to a given spi_device are always executed in FIFO order. | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 731 |  * | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 732 |  * The code that submits an spi_message (and its spi_transfers) | 
 | 733 |  * to the lower layers is responsible for managing its memory. | 
 | 734 |  * Zero-initialize every field you don't set up explicitly, to | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 735 |  * insulate against future API updates.  After you submit a message | 
 | 736 |  * and its transfers, ignore them until its completion callback. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 737 |  */ | 
 | 738 | struct spi_message { | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 739 | 	struct list_head	transfers; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 740 |  | 
 | 741 | 	struct spi_device	*spi; | 
 | 742 |  | 
 | 743 | 	unsigned		is_dma_mapped:1; | 
 | 744 |  | 
 | 745 | 	/* REVISIT:  we might want a flag affecting the behavior of the | 
 | 746 | 	 * last transfer ... allowing things like "read 16 bit length L" | 
 | 747 | 	 * immediately followed by "read L bytes".  Basically imposing | 
 | 748 | 	 * a specific message scheduling algorithm. | 
 | 749 | 	 * | 
 | 750 | 	 * Some controller drivers (message-at-a-time queue processing) | 
 | 751 | 	 * could provide that as their default scheduling algorithm.  But | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 752 | 	 * others (with multi-message pipelines) could need a flag to | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 753 | 	 * tell them about such special cases. | 
 | 754 | 	 */ | 
 | 755 |  | 
 | 756 | 	/* completion is reported through a callback */ | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 757 | 	void			(*complete)(void *context); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 758 | 	void			*context; | 
| Sourav Poddar | 078726c | 2013-07-18 15:31:25 +0530 | [diff] [blame] | 759 | 	unsigned		frame_length; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 760 | 	unsigned		actual_length; | 
 | 761 | 	int			status; | 
 | 762 |  | 
 | 763 | 	/* for optional use by whatever driver currently owns the | 
 | 764 | 	 * spi_message ...  between calls to spi_async and then later | 
 | 765 | 	 * complete(), that's the spi_master controller driver. | 
 | 766 | 	 */ | 
 | 767 | 	struct list_head	queue; | 
 | 768 | 	void			*state; | 
 | 769 | }; | 
 | 770 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 771 | static inline void spi_message_init(struct spi_message *m) | 
 | 772 | { | 
 | 773 | 	memset(m, 0, sizeof *m); | 
 | 774 | 	INIT_LIST_HEAD(&m->transfers); | 
 | 775 | } | 
 | 776 |  | 
 | 777 | static inline void | 
 | 778 | spi_message_add_tail(struct spi_transfer *t, struct spi_message *m) | 
 | 779 | { | 
 | 780 | 	list_add_tail(&t->transfer_list, &m->transfers); | 
 | 781 | } | 
 | 782 |  | 
 | 783 | static inline void | 
 | 784 | spi_transfer_del(struct spi_transfer *t) | 
 | 785 | { | 
 | 786 | 	list_del(&t->transfer_list); | 
 | 787 | } | 
 | 788 |  | 
| Lars-Peter Clausen | 6d9eecd | 2013-01-09 17:31:00 +0000 | [diff] [blame] | 789 | /** | 
 | 790 |  * spi_message_init_with_transfers - Initialize spi_message and append transfers | 
 | 791 |  * @m: spi_message to be initialized | 
 | 792 |  * @xfers: An array of spi transfers | 
 | 793 |  * @num_xfers: Number of items in the xfer array | 
 | 794 |  * | 
 | 795 |  * This function initializes the given spi_message and adds each spi_transfer in | 
 | 796 |  * the given array to the message. | 
 | 797 |  */ | 
 | 798 | static inline void | 
 | 799 | spi_message_init_with_transfers(struct spi_message *m, | 
 | 800 | struct spi_transfer *xfers, unsigned int num_xfers) | 
 | 801 | { | 
 | 802 | 	unsigned int i; | 
 | 803 |  | 
 | 804 | 	spi_message_init(m); | 
 | 805 | 	for (i = 0; i < num_xfers; ++i) | 
 | 806 | 		spi_message_add_tail(&xfers[i], m); | 
 | 807 | } | 
 | 808 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 809 | /* It's fine to embed message and transaction structures in other data | 
 | 810 |  * structures so long as you don't free them while they're in use. | 
 | 811 |  */ | 
 | 812 |  | 
 | 813 | static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) | 
 | 814 | { | 
 | 815 | 	struct spi_message *m; | 
 | 816 |  | 
 | 817 | 	m = kzalloc(sizeof(struct spi_message) | 
 | 818 | 			+ ntrans * sizeof(struct spi_transfer), | 
 | 819 | 			flags); | 
 | 820 | 	if (m) { | 
| Shubhrajyoti D | 8f53602 | 2012-02-27 19:29:05 +0530 | [diff] [blame] | 821 | 		unsigned i; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 822 | 		struct spi_transfer *t = (struct spi_transfer *)(m + 1); | 
 | 823 |  | 
 | 824 | 		INIT_LIST_HEAD(&m->transfers); | 
 | 825 | 		for (i = 0; i < ntrans; i++, t++) | 
 | 826 | 			spi_message_add_tail(t, m); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 827 | 	} | 
 | 828 | 	return m; | 
 | 829 | } | 
 | 830 |  | 
 | 831 | static inline void spi_message_free(struct spi_message *m) | 
 | 832 | { | 
 | 833 | 	kfree(m); | 
 | 834 | } | 
 | 835 |  | 
| David Brownell | 7d07719 | 2009-06-17 16:26:03 -0700 | [diff] [blame] | 836 | extern int spi_setup(struct spi_device *spi); | 
| David Brownell | 568d069 | 2009-09-22 16:46:18 -0700 | [diff] [blame] | 837 | extern int spi_async(struct spi_device *spi, struct spi_message *message); | 
| Ernst Schwab | cf32b71e | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 838 | extern int spi_async_locked(struct spi_device *spi, | 
 | 839 | 			    struct spi_message *message); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 840 |  | 
| Michal Suchanek | 4acad4a | 2015-12-02 10:38:21 +0000 | [diff] [blame^] | 841 | static inline size_t | 
 | 842 | spi_max_transfer_size(struct spi_device *spi) | 
 | 843 | { | 
 | 844 | 	struct spi_master *master = spi->master; | 
 | 845 | 	if (!master->max_transfer_size) | 
 | 846 | 		return SIZE_MAX; | 
 | 847 | 	return master->max_transfer_size(spi); | 
 | 848 | } | 
 | 849 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 850 | /*---------------------------------------------------------------------------*/ | 
 | 851 |  | 
 | 852 | /* All these synchronous SPI transfer routines are utilities layered | 
 | 853 |  * over the core async transfer primitive.  Here, "synchronous" means | 
 | 854 |  * they will sleep uninterruptibly until the async transfer completes. | 
 | 855 |  */ | 
 | 856 |  | 
 | 857 | extern int spi_sync(struct spi_device *spi, struct spi_message *message); | 
| Ernst Schwab | cf32b71e | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 858 | extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); | 
 | 859 | extern int spi_bus_lock(struct spi_master *master); | 
 | 860 | extern int spi_bus_unlock(struct spi_master *master); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 861 |  | 
 | 862 | /** | 
 | 863 |  * spi_write - SPI synchronous write | 
 | 864 |  * @spi: device to which data will be written | 
 | 865 |  * @buf: data buffer | 
 | 866 |  * @len: data buffer size | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 867 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 868 |  * | 
| Javier Martinez Canillas | a1fdeaa | 2015-10-22 18:59:22 +0200 | [diff] [blame] | 869 |  * This function writes the buffer @buf. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 870 |  * Callable only from contexts that can sleep. | 
| Javier Martinez Canillas | a1fdeaa | 2015-10-22 18:59:22 +0200 | [diff] [blame] | 871 |  * | 
 | 872 |  * Return: zero on success, else a negative error code. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 873 |  */ | 
 | 874 | static inline int | 
| Mark Brown | 0c4a159 | 2011-05-11 00:09:30 +0200 | [diff] [blame] | 875 | spi_write(struct spi_device *spi, const void *buf, size_t len) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 876 | { | 
 | 877 | 	struct spi_transfer	t = { | 
 | 878 | 			.tx_buf		= buf, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 879 | 			.len		= len, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 880 | 		}; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 881 | 	struct spi_message	m; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 882 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 883 | 	spi_message_init(&m); | 
 | 884 | 	spi_message_add_tail(&t, &m); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 885 | 	return spi_sync(spi, &m); | 
 | 886 | } | 
 | 887 |  | 
 | 888 | /** | 
 | 889 |  * spi_read - SPI synchronous read | 
 | 890 |  * @spi: device from which data will be read | 
 | 891 |  * @buf: data buffer | 
 | 892 |  * @len: data buffer size | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 893 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 894 |  * | 
| Javier Martinez Canillas | a1fdeaa | 2015-10-22 18:59:22 +0200 | [diff] [blame] | 895 |  * This function reads the buffer @buf. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 896 |  * Callable only from contexts that can sleep. | 
| Javier Martinez Canillas | a1fdeaa | 2015-10-22 18:59:22 +0200 | [diff] [blame] | 897 |  * | 
 | 898 |  * Return: zero on success, else a negative error code. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 899 |  */ | 
 | 900 | static inline int | 
| Mark Brown | 0c4a159 | 2011-05-11 00:09:30 +0200 | [diff] [blame] | 901 | spi_read(struct spi_device *spi, void *buf, size_t len) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 902 | { | 
 | 903 | 	struct spi_transfer	t = { | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 904 | 			.rx_buf		= buf, | 
 | 905 | 			.len		= len, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 906 | 		}; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 907 | 	struct spi_message	m; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 908 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 909 | 	spi_message_init(&m); | 
 | 910 | 	spi_message_add_tail(&t, &m); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 911 | 	return spi_sync(spi, &m); | 
 | 912 | } | 
 | 913 |  | 
| Lars-Peter Clausen | 6d9eecd | 2013-01-09 17:31:00 +0000 | [diff] [blame] | 914 | /** | 
 | 915 |  * spi_sync_transfer - synchronous SPI data transfer | 
 | 916 |  * @spi: device with which data will be exchanged | 
 | 917 |  * @xfers: An array of spi_transfers | 
 | 918 |  * @num_xfers: Number of items in the xfer array | 
 | 919 |  * Context: can sleep | 
 | 920 |  * | 
 | 921 |  * Does a synchronous SPI data transfer of the given spi_transfer array. | 
 | 922 |  * | 
 | 923 |  * For more specific semantics see spi_sync(). | 
 | 924 |  * | 
| Javier Martinez Canillas | a1fdeaa | 2015-10-22 18:59:22 +0200 | [diff] [blame] | 925 |  * Return: Return: zero on success, else a negative error code. | 
| Lars-Peter Clausen | 6d9eecd | 2013-01-09 17:31:00 +0000 | [diff] [blame] | 926 |  */ | 
 | 927 | static inline int | 
 | 928 | spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, | 
 | 929 | 	unsigned int num_xfers) | 
 | 930 | { | 
 | 931 | 	struct spi_message msg; | 
 | 932 |  | 
 | 933 | 	spi_message_init_with_transfers(&msg, xfers, num_xfers); | 
 | 934 |  | 
 | 935 | 	return spi_sync(spi, &msg); | 
 | 936 | } | 
 | 937 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 938 | /* this copies txbuf and rxbuf data; for small transfers only! */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 939 | extern int spi_write_then_read(struct spi_device *spi, | 
| Mark Brown | 0c4a159 | 2011-05-11 00:09:30 +0200 | [diff] [blame] | 940 | 		const void *txbuf, unsigned n_tx, | 
 | 941 | 		void *rxbuf, unsigned n_rx); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 942 |  | 
 | 943 | /** | 
 | 944 |  * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read | 
 | 945 |  * @spi: device with which data will be exchanged | 
 | 946 |  * @cmd: command to be written before data is read back | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 947 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 948 |  * | 
| Javier Martinez Canillas | a1fdeaa | 2015-10-22 18:59:22 +0200 | [diff] [blame] | 949 |  * Callable only from contexts that can sleep. | 
 | 950 |  * | 
 | 951 |  * Return: the (unsigned) eight bit number returned by the | 
 | 952 |  * device, or else a negative error code. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 953 |  */ | 
 | 954 | static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) | 
 | 955 | { | 
 | 956 | 	ssize_t			status; | 
 | 957 | 	u8			result; | 
 | 958 |  | 
 | 959 | 	status = spi_write_then_read(spi, &cmd, 1, &result, 1); | 
 | 960 |  | 
 | 961 | 	/* return negative errno or unsigned value */ | 
 | 962 | 	return (status < 0) ? status : result; | 
 | 963 | } | 
 | 964 |  | 
 | 965 | /** | 
 | 966 |  * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read | 
 | 967 |  * @spi: device with which data will be exchanged | 
 | 968 |  * @cmd: command to be written before data is read back | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 969 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 970 |  * | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 971 |  * The number is returned in wire-order, which is at least sometimes | 
 | 972 |  * big-endian. | 
| Javier Martinez Canillas | a1fdeaa | 2015-10-22 18:59:22 +0200 | [diff] [blame] | 973 |  * | 
 | 974 |  * Callable only from contexts that can sleep. | 
 | 975 |  * | 
 | 976 |  * Return: the (unsigned) sixteen bit number returned by the | 
 | 977 |  * device, or else a negative error code. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 978 |  */ | 
 | 979 | static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) | 
 | 980 | { | 
 | 981 | 	ssize_t			status; | 
 | 982 | 	u16			result; | 
 | 983 |  | 
| Geert Uytterhoeven | 269ccca | 2014-01-12 13:59:06 +0100 | [diff] [blame] | 984 | 	status = spi_write_then_read(spi, &cmd, 1, &result, 2); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 985 |  | 
 | 986 | 	/* return negative errno or unsigned value */ | 
 | 987 | 	return (status < 0) ? status : result; | 
 | 988 | } | 
 | 989 |  | 
| Lars-Peter Clausen | 05071aa | 2013-09-27 16:34:27 +0200 | [diff] [blame] | 990 | /** | 
 | 991 |  * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read | 
 | 992 |  * @spi: device with which data will be exchanged | 
 | 993 |  * @cmd: command to be written before data is read back | 
 | 994 |  * Context: can sleep | 
 | 995 |  * | 
| Lars-Peter Clausen | 05071aa | 2013-09-27 16:34:27 +0200 | [diff] [blame] | 996 |  * This function is similar to spi_w8r16, with the exception that it will | 
 | 997 |  * convert the read 16 bit data word from big-endian to native endianness. | 
 | 998 |  * | 
| Javier Martinez Canillas | a1fdeaa | 2015-10-22 18:59:22 +0200 | [diff] [blame] | 999 |  * Callable only from contexts that can sleep. | 
 | 1000 |  * | 
 | 1001 |  * Return: the (unsigned) sixteen bit number returned by the device in cpu | 
 | 1002 |  * endianness, or else a negative error code. | 
| Lars-Peter Clausen | 05071aa | 2013-09-27 16:34:27 +0200 | [diff] [blame] | 1003 |  */ | 
 | 1004 | static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) | 
 | 1005 |  | 
 | 1006 | { | 
 | 1007 | 	ssize_t status; | 
 | 1008 | 	__be16 result; | 
 | 1009 |  | 
 | 1010 | 	status = spi_write_then_read(spi, &cmd, 1, &result, 2); | 
 | 1011 | 	if (status < 0) | 
 | 1012 | 		return status; | 
 | 1013 |  | 
 | 1014 | 	return be16_to_cpu(result); | 
 | 1015 | } | 
 | 1016 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1017 | /*---------------------------------------------------------------------------*/ | 
 | 1018 |  | 
 | 1019 | /* | 
 | 1020 |  * INTERFACE between board init code and SPI infrastructure. | 
 | 1021 |  * | 
 | 1022 |  * No SPI driver ever sees these SPI device table segments, but | 
 | 1023 |  * it's how the SPI core (or adapters that get hotplugged) grows | 
 | 1024 |  * the driver model tree. | 
 | 1025 |  * | 
 | 1026 |  * As a rule, SPI devices can't be probed.  Instead, board init code | 
 | 1027 |  * provides a table listing the devices which are present, with enough | 
 | 1028 |  * information to bind and set up the device's driver.  There's basic | 
 | 1029 |  * support for nonstatic configurations too; enough to handle adding | 
 | 1030 |  * parport adapters, or microcontrollers acting as USB-to-SPI bridges. | 
 | 1031 |  */ | 
 | 1032 |  | 
| David Brownell | 2604288 | 2007-07-31 00:39:44 -0700 | [diff] [blame] | 1033 | /** | 
 | 1034 |  * struct spi_board_info - board-specific template for a SPI device | 
 | 1035 |  * @modalias: Initializes spi_device.modalias; identifies the driver. | 
 | 1036 |  * @platform_data: Initializes spi_device.platform_data; the particular | 
 | 1037 |  *	data stored there is driver-specific. | 
 | 1038 |  * @controller_data: Initializes spi_device.controller_data; some | 
 | 1039 |  *	controllers need hints about hardware setup, e.g. for DMA. | 
 | 1040 |  * @irq: Initializes spi_device.irq; depends on how the board is wired. | 
 | 1041 |  * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits | 
 | 1042 |  *	from the chip datasheet and board-specific signal quality issues. | 
 | 1043 |  * @bus_num: Identifies which spi_master parents the spi_device; unused | 
 | 1044 |  *	by spi_new_device(), and otherwise depends on board wiring. | 
 | 1045 |  * @chip_select: Initializes spi_device.chip_select; depends on how | 
 | 1046 |  *	the board is wired. | 
 | 1047 |  * @mode: Initializes spi_device.mode; based on the chip datasheet, board | 
 | 1048 |  *	wiring (some devices support both 3WIRE and standard modes), and | 
 | 1049 |  *	possibly presence of an inverter in the chipselect path. | 
 | 1050 |  * | 
 | 1051 |  * When adding new SPI devices to the device tree, these structures serve | 
 | 1052 |  * as a partial device template.  They hold information which can't always | 
 | 1053 |  * be determined by drivers.  Information that probe() can establish (such | 
 | 1054 |  * as the default transfer wordsize) is not included here. | 
 | 1055 |  * | 
 | 1056 |  * These structures are used in two places.  Their primary role is to | 
 | 1057 |  * be stored in tables of board-specific device descriptors, which are | 
 | 1058 |  * declared early in board initialization and then used (much later) to | 
 | 1059 |  * populate a controller's device tree after the that controller's driver | 
 | 1060 |  * initializes.  A secondary (and atypical) role is as a parameter to | 
 | 1061 |  * spi_new_device() call, which happens after those controller drivers | 
 | 1062 |  * are active in some dynamic board configuration models. | 
 | 1063 |  */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1064 | struct spi_board_info { | 
 | 1065 | 	/* the device name and module name are coupled, like platform_bus; | 
 | 1066 | 	 * "modalias" is normally the driver name. | 
 | 1067 | 	 * | 
 | 1068 | 	 * platform_data goes to spi_device.dev.platform_data, | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 1069 | 	 * controller_data goes to spi_device.controller_data, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1070 | 	 * irq is copied too | 
 | 1071 | 	 */ | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 1072 | 	char		modalias[SPI_NAME_SIZE]; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1073 | 	const void	*platform_data; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 1074 | 	void		*controller_data; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1075 | 	int		irq; | 
 | 1076 |  | 
 | 1077 | 	/* slower signaling on noisy or low voltage boards */ | 
 | 1078 | 	u32		max_speed_hz; | 
 | 1079 |  | 
 | 1080 |  | 
 | 1081 | 	/* bus_num is board specific and matches the bus_num of some | 
 | 1082 | 	 * spi_master that will probably be registered later. | 
 | 1083 | 	 * | 
 | 1084 | 	 * chip_select reflects how this chip is wired to that master; | 
 | 1085 | 	 * it's less than num_chipselect. | 
 | 1086 | 	 */ | 
 | 1087 | 	u16		bus_num; | 
 | 1088 | 	u16		chip_select; | 
 | 1089 |  | 
| David Brownell | 980a01c | 2006-06-28 07:47:15 -0700 | [diff] [blame] | 1090 | 	/* mode becomes spi_device.mode, and is essential for chips | 
 | 1091 | 	 * where the default of SPI_CS_HIGH = 0 is wrong. | 
 | 1092 | 	 */ | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 1093 | 	u16		mode; | 
| David Brownell | 980a01c | 2006-06-28 07:47:15 -0700 | [diff] [blame] | 1094 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1095 | 	/* ... may need additional spi_device chip config data here. | 
 | 1096 | 	 * avoid stuff protocol drivers can set; but include stuff | 
 | 1097 | 	 * needed to behave without being bound to a driver: | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1098 | 	 *  - quirks like clock rate mattering when not selected | 
 | 1099 | 	 */ | 
 | 1100 | }; | 
 | 1101 |  | 
 | 1102 | #ifdef	CONFIG_SPI | 
 | 1103 | extern int | 
 | 1104 | spi_register_board_info(struct spi_board_info const *info, unsigned n); | 
 | 1105 | #else | 
 | 1106 | /* board init code may ignore whether SPI is configured or not */ | 
 | 1107 | static inline int | 
 | 1108 | spi_register_board_info(struct spi_board_info const *info, unsigned n) | 
 | 1109 | 	{ return 0; } | 
 | 1110 | #endif | 
 | 1111 |  | 
 | 1112 |  | 
 | 1113 | /* If you're hotplugging an adapter with devices (parport, usb, etc) | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 1114 |  * use spi_new_device() to describe each device.  You can also call | 
 | 1115 |  * spi_unregister_device() to start making that device vanish, but | 
 | 1116 |  * normally that would be handled by spi_unregister_master(). | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 1117 |  * | 
 | 1118 |  * You can also use spi_alloc_device() and spi_add_device() to use a two | 
 | 1119 |  * stage registration sequence for each spi_device.  This gives the caller | 
 | 1120 |  * some more control over the spi_device structure before it is registered, | 
 | 1121 |  * but requires that caller to initialize fields that would otherwise | 
 | 1122 |  * be defined using the board info. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1123 |  */ | 
 | 1124 | extern struct spi_device * | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 1125 | spi_alloc_device(struct spi_master *master); | 
 | 1126 |  | 
 | 1127 | extern int | 
 | 1128 | spi_add_device(struct spi_device *spi); | 
 | 1129 |  | 
 | 1130 | extern struct spi_device * | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1131 | spi_new_device(struct spi_master *, struct spi_board_info *); | 
 | 1132 |  | 
 | 1133 | static inline void | 
 | 1134 | spi_unregister_device(struct spi_device *spi) | 
 | 1135 | { | 
 | 1136 | 	if (spi) | 
 | 1137 | 		device_unregister(&spi->dev); | 
 | 1138 | } | 
 | 1139 |  | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 1140 | extern const struct spi_device_id * | 
 | 1141 | spi_get_device_id(const struct spi_device *sdev); | 
 | 1142 |  | 
| Beniamino Galvani | b671358 | 2014-11-22 16:21:39 +0100 | [diff] [blame] | 1143 | static inline bool | 
 | 1144 | spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) | 
 | 1145 | { | 
 | 1146 | 	return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); | 
 | 1147 | } | 
 | 1148 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1149 | #endif /* __LINUX_SPI_H */ |