| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2005 David Brownell | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify | 
 | 5 |  * it under the terms of the GNU General Public License as published by | 
 | 6 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 7 |  * (at your option) any later version. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 13 |  */ | 
 | 14 |  | 
 | 15 | #ifndef __LINUX_SPI_H | 
 | 16 | #define __LINUX_SPI_H | 
 | 17 |  | 
| Randy Dunlap | 0a30c5c | 2009-01-04 12:00:47 -0800 | [diff] [blame] | 18 | #include <linux/device.h> | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 19 | #include <linux/mod_devicetable.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 21 | #include <linux/kthread.h> | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 22 | #include <linux/completion.h> | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 23 | #include <linux/scatterlist.h> | 
| Randy Dunlap | 0a30c5c | 2009-01-04 12:00:47 -0800 | [diff] [blame] | 24 |  | 
| Mark Brown | 99adef3 | 2014-01-16 12:22:43 +0000 | [diff] [blame] | 25 | struct dma_chan; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 26 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 27 | /* | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 28 |  * INTERFACES between SPI master-side drivers and SPI infrastructure. | 
 | 29 |  * (There's no SPI slave support for Linux yet...) | 
 | 30 |  */ | 
 | 31 | extern struct bus_type spi_bus_type; | 
 | 32 |  | 
 | 33 | /** | 
 | 34 |  * struct spi_device - Master side proxy for an SPI slave device | 
 | 35 |  * @dev: Driver model representation of the device. | 
 | 36 |  * @master: SPI controller used with the device. | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 37 |  * @max_speed_hz: Maximum clock rate to be used with this chip | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 38 |  *	(on this board); may be changed by the device's driver. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 39 |  *	The spi_transfer.speed_hz can override this for each transfer. | 
 | 40 |  * @chip_select: Chipselect, distinguishing chips handled by @master. | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 41 |  * @mode: The spi mode defines how data is clocked out and in. | 
 | 42 |  *	This may be changed by the device's driver. | 
 | 43 |  *	The "active low" default for chipselect mode can be overridden | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 44 |  *	(by specifying SPI_CS_HIGH) as can the "MSB first" default for | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 45 |  *	each word in a transfer (by specifying SPI_LSB_FIRST). | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 46 |  * @bits_per_word: Data transfers involve one or more words; word sizes | 
| David Brownell | ccf77cc | 2006-04-03 15:46:22 -0700 | [diff] [blame] | 47 |  *	like eight or 12 bits are common.  In-memory wordsizes are | 
 | 48 |  *	powers of two bytes (e.g. 20 bit samples use 32 bits). | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 49 |  *	This may be changed by the device's driver, or left at the | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 50 |  *	default (0) indicating protocol words are eight bit bytes. | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 51 |  *	The spi_transfer.bits_per_word can override this for each transfer. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 52 |  * @irq: Negative, or the number passed to request_irq() to receive | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 53 |  *	interrupts from this device. | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 54 |  * @controller_state: Controller's runtime state | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 55 |  * @controller_data: Board-specific definitions for controller, such as | 
 | 56 |  *	FIFO initialization parameters; from board_info.controller_data | 
 | 57 |  * @modalias: Name of the driver to use with this device, or an alias | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 58 |  *	for that name.  This appears in the sysfs "modalias" attribute | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 59 |  *	for driver coldplugging, and in uevents used for hotplugging | 
| Andreas Larsson | 446411e | 2013-02-13 14:20:25 +0100 | [diff] [blame] | 60 |  * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when | 
| Andreas Larsson | 095c375 | 2013-01-29 15:53:41 +0100 | [diff] [blame] | 61 |  *	when not using a GPIO line) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 62 |  * | 
 | 63 |  * A @spi_device is used to interchange data between an SPI slave | 
 | 64 |  * (usually a discrete chip) and CPU memory. | 
 | 65 |  * | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 66 |  * In @dev, the platform_data is used to hold information about this | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 67 |  * device that's meaningful to the device's protocol driver, but not | 
 | 68 |  * to its controller.  One example might be an identifier for a chip | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 69 |  * variant with slightly different functionality; another might be | 
 | 70 |  * information about how this particular board wires the chip's pins. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 71 |  */ | 
 | 72 | struct spi_device { | 
 | 73 | 	struct device		dev; | 
 | 74 | 	struct spi_master	*master; | 
 | 75 | 	u32			max_speed_hz; | 
 | 76 | 	u8			chip_select; | 
| Trent Piepho | 89c1f6074 | 2013-12-13 18:27:44 -0800 | [diff] [blame] | 77 | 	u8			bits_per_word; | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 78 | 	u16			mode; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 79 | #define	SPI_CPHA	0x01			/* clock phase */ | 
 | 80 | #define	SPI_CPOL	0x02			/* clock polarity */ | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 81 | #define	SPI_MODE_0	(0|0)			/* (original MicroWire) */ | 
 | 82 | #define	SPI_MODE_1	(0|SPI_CPHA) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 83 | #define	SPI_MODE_2	(SPI_CPOL|0) | 
 | 84 | #define	SPI_MODE_3	(SPI_CPOL|SPI_CPHA) | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 85 | #define	SPI_CS_HIGH	0x04			/* chipselect active high? */ | 
| David Brownell | ccf77cc | 2006-04-03 15:46:22 -0700 | [diff] [blame] | 86 | #define	SPI_LSB_FIRST	0x08			/* per-word bits-on-wire */ | 
| David Brownell | c06e677 | 2007-07-17 04:04:03 -0700 | [diff] [blame] | 87 | #define	SPI_3WIRE	0x10			/* SI/SO signals shared */ | 
| Anton Vorontsov | 4ef7af5 | 2007-07-31 00:38:43 -0700 | [diff] [blame] | 88 | #define	SPI_LOOP	0x20			/* loopback mode */ | 
| David Brownell | b55f627 | 2009-06-30 11:41:26 -0700 | [diff] [blame] | 89 | #define	SPI_NO_CS	0x40			/* 1 dev/bus, no chipselect */ | 
 | 90 | #define	SPI_READY	0x80			/* slave pulls low to pause */ | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 91 | #define	SPI_TX_DUAL	0x100			/* transmit with 2 wires */ | 
 | 92 | #define	SPI_TX_QUAD	0x200			/* transmit with 4 wires */ | 
 | 93 | #define	SPI_RX_DUAL	0x400			/* receive with 2 wires */ | 
 | 94 | #define	SPI_RX_QUAD	0x800			/* receive with 4 wires */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 95 | 	int			irq; | 
 | 96 | 	void			*controller_state; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 97 | 	void			*controller_data; | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 98 | 	char			modalias[SPI_NAME_SIZE]; | 
| Jean-Christophe PLAGNIOL-VILLARD | 7431798 | 2012-11-15 20:19:57 +0100 | [diff] [blame] | 99 | 	int			cs_gpio;	/* chip select gpio */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 100 |  | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 101 | 	/* | 
 | 102 | 	 * likely need more hooks for more protocol options affecting how | 
 | 103 | 	 * the controller talks to each chip, like: | 
 | 104 | 	 *  - memory packing (12 bit samples into low bits, others zeroed) | 
 | 105 | 	 *  - priority | 
 | 106 | 	 *  - drop chipselect after each word | 
 | 107 | 	 *  - chipselect delays | 
 | 108 | 	 *  - ... | 
 | 109 | 	 */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 110 | }; | 
 | 111 |  | 
 | 112 | static inline struct spi_device *to_spi_device(struct device *dev) | 
 | 113 | { | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 114 | 	return dev ? container_of(dev, struct spi_device, dev) : NULL; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 115 | } | 
 | 116 |  | 
 | 117 | /* most drivers won't need to care about device refcounting */ | 
 | 118 | static inline struct spi_device *spi_dev_get(struct spi_device *spi) | 
 | 119 | { | 
 | 120 | 	return (spi && get_device(&spi->dev)) ? spi : NULL; | 
 | 121 | } | 
 | 122 |  | 
 | 123 | static inline void spi_dev_put(struct spi_device *spi) | 
 | 124 | { | 
 | 125 | 	if (spi) | 
 | 126 | 		put_device(&spi->dev); | 
 | 127 | } | 
 | 128 |  | 
 | 129 | /* ctldata is for the bus_master driver's runtime state */ | 
 | 130 | static inline void *spi_get_ctldata(struct spi_device *spi) | 
 | 131 | { | 
 | 132 | 	return spi->controller_state; | 
 | 133 | } | 
 | 134 |  | 
 | 135 | static inline void spi_set_ctldata(struct spi_device *spi, void *state) | 
 | 136 | { | 
 | 137 | 	spi->controller_state = state; | 
 | 138 | } | 
 | 139 |  | 
| Ben Dooks | 9b40ff4 | 2007-02-12 00:52:41 -0800 | [diff] [blame] | 140 | /* device driver data */ | 
 | 141 |  | 
 | 142 | static inline void spi_set_drvdata(struct spi_device *spi, void *data) | 
 | 143 | { | 
 | 144 | 	dev_set_drvdata(&spi->dev, data); | 
 | 145 | } | 
 | 146 |  | 
 | 147 | static inline void *spi_get_drvdata(struct spi_device *spi) | 
 | 148 | { | 
 | 149 | 	return dev_get_drvdata(&spi->dev); | 
 | 150 | } | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 151 |  | 
 | 152 | struct spi_message; | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 153 | struct spi_transfer; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 154 |  | 
| David Brownell | 2604288 | 2007-07-31 00:39:44 -0700 | [diff] [blame] | 155 | /** | 
 | 156 |  * struct spi_driver - Host side "protocol" driver | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 157 |  * @id_table: List of SPI devices supported by this driver | 
| David Brownell | 2604288 | 2007-07-31 00:39:44 -0700 | [diff] [blame] | 158 |  * @probe: Binds this driver to the spi device.  Drivers can verify | 
 | 159 |  *	that the device is actually present, and may need to configure | 
 | 160 |  *	characteristics (such as bits_per_word) which weren't needed for | 
 | 161 |  *	the initial configuration done during system setup. | 
 | 162 |  * @remove: Unbinds this driver from the spi device | 
 | 163 |  * @shutdown: Standard shutdown callback used during system state | 
 | 164 |  *	transitions such as powerdown/halt and kexec | 
 | 165 |  * @suspend: Standard suspend callback used during system state transitions | 
 | 166 |  * @resume: Standard resume callback used during system state transitions | 
 | 167 |  * @driver: SPI device drivers should initialize the name and owner | 
 | 168 |  *	field of this structure. | 
 | 169 |  * | 
 | 170 |  * This represents the kind of device driver that uses SPI messages to | 
 | 171 |  * interact with the hardware at the other end of a SPI link.  It's called | 
 | 172 |  * a "protocol" driver because it works through messages rather than talking | 
 | 173 |  * directly to SPI hardware (which is what the underlying SPI controller | 
 | 174 |  * driver does to pass those messages).  These protocols are defined in the | 
 | 175 |  * specification for the device(s) supported by the driver. | 
 | 176 |  * | 
 | 177 |  * As a rule, those device protocols represent the lowest level interface | 
 | 178 |  * supported by a driver, and it will support upper level interfaces too. | 
 | 179 |  * Examples of such upper levels include frameworks like MTD, networking, | 
 | 180 |  * MMC, RTC, filesystem character device nodes, and hardware monitoring. | 
 | 181 |  */ | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 182 | struct spi_driver { | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 183 | 	const struct spi_device_id *id_table; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 184 | 	int			(*probe)(struct spi_device *spi); | 
 | 185 | 	int			(*remove)(struct spi_device *spi); | 
 | 186 | 	void			(*shutdown)(struct spi_device *spi); | 
 | 187 | 	int			(*suspend)(struct spi_device *spi, pm_message_t mesg); | 
 | 188 | 	int			(*resume)(struct spi_device *spi); | 
 | 189 | 	struct device_driver	driver; | 
 | 190 | }; | 
 | 191 |  | 
 | 192 | static inline struct spi_driver *to_spi_driver(struct device_driver *drv) | 
 | 193 | { | 
 | 194 | 	return drv ? container_of(drv, struct spi_driver, driver) : NULL; | 
 | 195 | } | 
 | 196 |  | 
 | 197 | extern int spi_register_driver(struct spi_driver *sdrv); | 
 | 198 |  | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 199 | /** | 
 | 200 |  * spi_unregister_driver - reverse effect of spi_register_driver | 
 | 201 |  * @sdrv: the driver to unregister | 
 | 202 |  * Context: can sleep | 
 | 203 |  */ | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 204 | static inline void spi_unregister_driver(struct spi_driver *sdrv) | 
 | 205 | { | 
| Ben Dooks | ddc1e97 | 2007-02-12 00:52:43 -0800 | [diff] [blame] | 206 | 	if (sdrv) | 
 | 207 | 		driver_unregister(&sdrv->driver); | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 208 | } | 
 | 209 |  | 
| Lars-Peter Clausen | 3acbb01 | 2011-11-16 10:13:37 +0100 | [diff] [blame] | 210 | /** | 
 | 211 |  * module_spi_driver() - Helper macro for registering a SPI driver | 
 | 212 |  * @__spi_driver: spi_driver struct | 
 | 213 |  * | 
 | 214 |  * Helper macro for SPI drivers which do not do anything special in module | 
 | 215 |  * init/exit. This eliminates a lot of boilerplate. Each module may only | 
 | 216 |  * use this macro once, and calling it replaces module_init() and module_exit() | 
 | 217 |  */ | 
 | 218 | #define module_spi_driver(__spi_driver) \ | 
 | 219 | 	module_driver(__spi_driver, spi_register_driver, \ | 
 | 220 | 			spi_unregister_driver) | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 221 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 222 | /** | 
 | 223 |  * struct spi_master - interface to SPI master controller | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 224 |  * @dev: device interface to this driver | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 225 |  * @list: link with the global spi_master list | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 226 |  * @bus_num: board-specific (and often SOC-specific) identifier for a | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 227 |  *	given SPI controller. | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 228 |  * @num_chipselect: chipselects are used to distinguish individual | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 229 |  *	SPI slaves, and are numbered from zero to num_chipselects. | 
 | 230 |  *	each slave has a chipselect signal, but it's common that not | 
 | 231 |  *	every chipselect is connected to a slave. | 
| Mike Rapoport | fd5e191 | 2009-04-06 19:00:56 -0700 | [diff] [blame] | 232 |  * @dma_alignment: SPI controller constraint on DMA buffers alignment. | 
| Randy Dunlap | b73b255 | 2009-09-22 16:46:00 -0700 | [diff] [blame] | 233 |  * @mode_bits: flags understood by this controller driver | 
| Stephen Warren | 543bb25 | 2013-03-26 20:37:57 -0600 | [diff] [blame] | 234 |  * @bits_per_word_mask: A mask indicating which values of bits_per_word are | 
 | 235 |  *	supported by the driver. Bit n indicates that a bits_per_word n+1 is | 
| Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 236 |  *	supported. If set, the SPI core will reject any transfer with an | 
| Stephen Warren | 543bb25 | 2013-03-26 20:37:57 -0600 | [diff] [blame] | 237 |  *	unsupported bits_per_word. If not set, this value is simply ignored, | 
 | 238 |  *	and it's up to the individual driver to perform any validation. | 
| Mark Brown | a2fd4f9 | 2013-07-10 14:57:26 +0100 | [diff] [blame] | 239 |  * @min_speed_hz: Lowest supported transfer speed | 
 | 240 |  * @max_speed_hz: Highest supported transfer speed | 
| Randy Dunlap | b73b255 | 2009-09-22 16:46:00 -0700 | [diff] [blame] | 241 |  * @flags: other constraints relevant to this driver | 
| Ernst Schwab | 5c79a5a | 2010-08-16 15:10:11 +0200 | [diff] [blame] | 242 |  * @bus_lock_spinlock: spinlock for SPI bus locking | 
 | 243 |  * @bus_lock_mutex: mutex for SPI bus locking | 
 | 244 |  * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 245 |  * @setup: updates the device mode and clocking records used by a | 
| David Brownell | 8022456 | 2007-02-12 00:52:46 -0800 | [diff] [blame] | 246 |  *	device's SPI controller; protocol code may call this.  This | 
 | 247 |  *	must fail if an unrecognized or unsupported mode is requested. | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 248 |  *	It's always safe to call this unless transfers are pending on | 
 | 249 |  *	the device whose settings are being modified. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 250 |  * @transfer: adds a message to the controller's transfer queue. | 
 | 251 |  * @cleanup: frees controller-specific state | 
| Thierry Reding | 2c67568 | 2014-08-08 13:02:36 +0200 | [diff] [blame] | 252 |  * @can_dma: determine whether this master supports DMA | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 253 |  * @queued: whether this master is providing an internal message queue | 
 | 254 |  * @kworker: thread struct for message pump | 
 | 255 |  * @kworker_task: pointer to task for message pump kworker thread | 
 | 256 |  * @pump_messages: work struct for scheduling work to the message pump | 
 | 257 |  * @queue_lock: spinlock to syncronise access to message queue | 
 | 258 |  * @queue: message queue | 
| Mark Brown | 0461a41 | 2014-12-09 21:38:05 +0000 | [diff] [blame] | 259 |  * @idling: the device is entering idle state | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 260 |  * @cur_msg: the currently in-flight message | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 261 |  * @cur_msg_prepared: spi_prepare_message was called for the currently | 
 | 262 |  *                    in-flight message | 
| Thierry Reding | 2c67568 | 2014-08-08 13:02:36 +0200 | [diff] [blame] | 263 |  * @cur_msg_mapped: message has been mapped for DMA | 
| Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 264 |  * @xfer_completion: used by core transfer_one_message() | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 265 |  * @busy: message pump is busy | 
 | 266 |  * @running: message pump is running | 
 | 267 |  * @rt: whether this queue is set to run as a realtime task | 
| Mark Brown | 49834de | 2013-07-28 14:47:02 +0100 | [diff] [blame] | 268 |  * @auto_runtime_pm: the core should ensure a runtime PM reference is held | 
 | 269 |  *                   while the hardware is prepared, using the parent | 
 | 270 |  *                   device for the spidev | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 271 |  * @max_dma_len: Maximum length of a DMA transfer for the device. | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 272 |  * @prepare_transfer_hardware: a message will soon arrive from the queue | 
 | 273 |  *	so the subsystem requests the driver to prepare the transfer hardware | 
 | 274 |  *	by issuing this call | 
 | 275 |  * @transfer_one_message: the subsystem calls the driver to transfer a single | 
 | 276 |  *	message while queuing transfers that arrive in the meantime. When the | 
 | 277 |  *	driver is finished with this message, it must call | 
 | 278 |  *	spi_finalize_current_message() so the subsystem can issue the next | 
| Baruch Siach | e930533 | 2014-01-25 22:36:15 +0200 | [diff] [blame] | 279 |  *	message | 
| Randy Dunlap | dbabe0d | 2012-04-17 17:03:50 -0700 | [diff] [blame] | 280 |  * @unprepare_transfer_hardware: there are currently no more messages on the | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 281 |  *	queue so the subsystem notifies the driver that it may relax the | 
 | 282 |  *	hardware by issuing this call | 
| Geert Uytterhoeven | bd6857a | 2014-01-21 16:10:07 +0100 | [diff] [blame] | 283 |  * @set_cs: set the logic level of the chip select line.  May be called | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 284 |  *          from interrupt context. | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 285 |  * @prepare_message: set up the controller to transfer a single message, | 
 | 286 |  *                   for example doing DMA mapping.  Called from threaded | 
 | 287 |  *                   context. | 
| Geert Uytterhoeven | 0516712 | 2014-01-21 16:10:06 +0100 | [diff] [blame] | 288 |  * @transfer_one: transfer a single spi_transfer. | 
 | 289 |  *                  - return 0 if the transfer is finished, | 
 | 290 |  *                  - return 1 if the transfer is still in progress. When | 
 | 291 |  *                    the driver is finished with this transfer it must | 
 | 292 |  *                    call spi_finalize_current_transfer() so the subsystem | 
| Baruch Siach | 6e5f526 | 2014-01-25 22:36:13 +0200 | [diff] [blame] | 293 |  *                    can issue the next transfer. Note: transfer_one and | 
 | 294 |  *                    transfer_one_message are mutually exclusive; when both | 
 | 295 |  *                    are set, the generic subsystem does not call your | 
 | 296 |  *                    transfer_one callback. | 
| Andy Shevchenko | b716c4f | 2015-02-27 17:34:15 +0200 | [diff] [blame^] | 297 |  * @handle_err: the subsystem calls the driver to handle and error that occurs | 
 | 298 |  *		in the generic implementation of transfer_one_message(). | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 299 |  * @unprepare_message: undo any work done by prepare_message(). | 
| Andreas Larsson | 095c375 | 2013-01-29 15:53:41 +0100 | [diff] [blame] | 300 |  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS | 
| Andreas Larsson | 446411e | 2013-02-13 14:20:25 +0100 | [diff] [blame] | 301 |  *	number. Any individual value may be -ENOENT for CS lines that | 
| Andreas Larsson | 095c375 | 2013-01-29 15:53:41 +0100 | [diff] [blame] | 302 |  *	are not GPIOs (driven by the SPI controller itself). | 
| Thierry Reding | 2c67568 | 2014-08-08 13:02:36 +0200 | [diff] [blame] | 303 |  * @dma_tx: DMA transmit channel | 
 | 304 |  * @dma_rx: DMA receive channel | 
 | 305 |  * @dummy_rx: dummy receive buffer for full-duplex devices | 
 | 306 |  * @dummy_tx: dummy transmit buffer for full-duplex devices | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 307 |  * | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 308 |  * Each SPI master controller can communicate with one or more @spi_device | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 309 |  * children.  These make a small bus, sharing MOSI, MISO and SCK signals | 
 | 310 |  * but not chip select signals.  Each device may be configured to use a | 
 | 311 |  * different clock rate, since those shared signals are ignored unless | 
 | 312 |  * the chip is selected. | 
 | 313 |  * | 
 | 314 |  * The driver for an SPI controller manages access to those devices through | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 315 |  * a queue of spi_message transactions, copying data between CPU memory and | 
 | 316 |  * an SPI slave device.  For each such message it queues, it calls the | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 317 |  * message's completion function when the transaction completes. | 
 | 318 |  */ | 
 | 319 | struct spi_master { | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 320 | 	struct device	dev; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 321 |  | 
| Feng Tang | 2b9603a | 2010-08-02 15:52:15 +0800 | [diff] [blame] | 322 | 	struct list_head list; | 
 | 323 |  | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 324 | 	/* other than negative (== assign one dynamically), bus_num is fully | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 325 | 	 * board-specific.  usually that simplifies to being SOC-specific. | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 326 | 	 * example:  one SOC has three SPI controllers, numbered 0..2, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 327 | 	 * and one board's schematics might show it using SPI-2.  software | 
 | 328 | 	 * would normally use bus_num=2 for that controller. | 
 | 329 | 	 */ | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 330 | 	s16			bus_num; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 331 |  | 
 | 332 | 	/* chipselects will be integral to many controllers; some others | 
 | 333 | 	 * might use board-specific GPIOs. | 
 | 334 | 	 */ | 
 | 335 | 	u16			num_chipselect; | 
 | 336 |  | 
| Mike Rapoport | fd5e191 | 2009-04-06 19:00:56 -0700 | [diff] [blame] | 337 | 	/* some SPI controllers pose alignment requirements on DMAable | 
 | 338 | 	 * buffers; let protocol drivers know about these requirements. | 
 | 339 | 	 */ | 
 | 340 | 	u16			dma_alignment; | 
 | 341 |  | 
| David Brownell | e7db06b | 2009-06-17 16:26:04 -0700 | [diff] [blame] | 342 | 	/* spi_device.mode flags understood by this controller driver */ | 
 | 343 | 	u16			mode_bits; | 
 | 344 |  | 
| Stephen Warren | 543bb25 | 2013-03-26 20:37:57 -0600 | [diff] [blame] | 345 | 	/* bitmask of supported bits_per_word for transfers */ | 
 | 346 | 	u32			bits_per_word_mask; | 
| Stephen Warren | 2922a8d | 2013-05-21 20:36:34 -0600 | [diff] [blame] | 347 | #define SPI_BPW_MASK(bits) BIT((bits) - 1) | 
| Stephen Warren | b6aa23c | 2013-08-01 16:08:57 -0600 | [diff] [blame] | 348 | #define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1)) | 
| Stephen Warren | eca8960 | 2013-05-30 09:59:40 -0600 | [diff] [blame] | 349 | #define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1)) | 
| Stephen Warren | 543bb25 | 2013-03-26 20:37:57 -0600 | [diff] [blame] | 350 |  | 
| Mark Brown | a2fd4f9 | 2013-07-10 14:57:26 +0100 | [diff] [blame] | 351 | 	/* limits on transfer speed */ | 
 | 352 | 	u32			min_speed_hz; | 
 | 353 | 	u32			max_speed_hz; | 
 | 354 |  | 
| David Brownell | 70d6027 | 2009-06-30 11:41:27 -0700 | [diff] [blame] | 355 | 	/* other constraints relevant to this driver */ | 
 | 356 | 	u16			flags; | 
 | 357 | #define SPI_MASTER_HALF_DUPLEX	BIT(0)		/* can't do full duplex */ | 
| David Brownell | 568d069 | 2009-09-22 16:46:18 -0700 | [diff] [blame] | 358 | #define SPI_MASTER_NO_RX	BIT(1)		/* can't do buffer read */ | 
 | 359 | #define SPI_MASTER_NO_TX	BIT(2)		/* can't do buffer write */ | 
| Mark Brown | 3a2eba9 | 2014-01-28 20:17:03 +0000 | [diff] [blame] | 360 | #define SPI_MASTER_MUST_RX      BIT(3)		/* requires rx */ | 
 | 361 | #define SPI_MASTER_MUST_TX      BIT(4)		/* requires tx */ | 
| David Brownell | 70d6027 | 2009-06-30 11:41:27 -0700 | [diff] [blame] | 362 |  | 
| Ernst Schwab | cf32b71e | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 363 | 	/* lock and mutex for SPI bus locking */ | 
 | 364 | 	spinlock_t		bus_lock_spinlock; | 
 | 365 | 	struct mutex		bus_lock_mutex; | 
 | 366 |  | 
 | 367 | 	/* flag indicating that the SPI bus is locked for exclusive use */ | 
 | 368 | 	bool			bus_lock_flag; | 
 | 369 |  | 
| David Brownell | 6e538aa | 2009-04-21 12:24:49 -0700 | [diff] [blame] | 370 | 	/* Setup mode and clock, etc (spi driver may call many times). | 
 | 371 | 	 * | 
 | 372 | 	 * IMPORTANT:  this may be called when transfers to another | 
 | 373 | 	 * device are active.  DO NOT UPDATE SHARED REGISTERS in ways | 
 | 374 | 	 * which could break those transfers. | 
 | 375 | 	 */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 376 | 	int			(*setup)(struct spi_device *spi); | 
 | 377 |  | 
 | 378 | 	/* bidirectional bulk transfers | 
 | 379 | 	 * | 
 | 380 | 	 * + The transfer() method may not sleep; its main role is | 
 | 381 | 	 *   just to add the message to the queue. | 
 | 382 | 	 * + For now there's no remove-from-queue operation, or | 
 | 383 | 	 *   any other request management | 
 | 384 | 	 * + To a given spi_device, message queueing is pure fifo | 
 | 385 | 	 * | 
 | 386 | 	 * + The master's main job is to process its message queue, | 
 | 387 | 	 *   selecting a chip then transferring data | 
 | 388 | 	 * + If there are multiple spi_device children, the i/o queue | 
 | 389 | 	 *   arbitration algorithm is unspecified (round robin, fifo, | 
 | 390 | 	 *   priority, reservations, preemption, etc) | 
 | 391 | 	 * | 
 | 392 | 	 * + Chipselect stays active during the entire message | 
 | 393 | 	 *   (unless modified by spi_transfer.cs_change != 0). | 
 | 394 | 	 * + The message transfers use clock and SPI mode parameters | 
 | 395 | 	 *   previously established by setup() for this device | 
 | 396 | 	 */ | 
 | 397 | 	int			(*transfer)(struct spi_device *spi, | 
 | 398 | 						struct spi_message *mesg); | 
 | 399 |  | 
 | 400 | 	/* called on release() to free memory provided by spi_master */ | 
| Hans-Peter Nilsson | 0ffa028 | 2007-02-12 00:52:45 -0800 | [diff] [blame] | 401 | 	void			(*cleanup)(struct spi_device *spi); | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 402 |  | 
 | 403 | 	/* | 
| Mark Brown | 99adef3 | 2014-01-16 12:22:43 +0000 | [diff] [blame] | 404 | 	 * Used to enable core support for DMA handling, if can_dma() | 
 | 405 | 	 * exists and returns true then the transfer will be mapped | 
 | 406 | 	 * prior to transfer_one() being called.  The driver should | 
 | 407 | 	 * not modify or store xfer and dma_tx and dma_rx must be set | 
 | 408 | 	 * while the device is prepared. | 
 | 409 | 	 */ | 
 | 410 | 	bool			(*can_dma)(struct spi_master *master, | 
 | 411 | 					   struct spi_device *spi, | 
 | 412 | 					   struct spi_transfer *xfer); | 
 | 413 |  | 
 | 414 | 	/* | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 415 | 	 * These hooks are for drivers that want to use the generic | 
 | 416 | 	 * master transfer queueing mechanism. If these are used, the | 
 | 417 | 	 * transfer() function above must NOT be specified by the driver. | 
 | 418 | 	 * Over time we expect SPI drivers to be phased over to this API. | 
 | 419 | 	 */ | 
 | 420 | 	bool				queued; | 
 | 421 | 	struct kthread_worker		kworker; | 
 | 422 | 	struct task_struct		*kworker_task; | 
 | 423 | 	struct kthread_work		pump_messages; | 
 | 424 | 	spinlock_t			queue_lock; | 
 | 425 | 	struct list_head		queue; | 
 | 426 | 	struct spi_message		*cur_msg; | 
| Mark Brown | 0461a41 | 2014-12-09 21:38:05 +0000 | [diff] [blame] | 427 | 	bool				idling; | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 428 | 	bool				busy; | 
 | 429 | 	bool				running; | 
 | 430 | 	bool				rt; | 
| Mark Brown | 49834de | 2013-07-28 14:47:02 +0100 | [diff] [blame] | 431 | 	bool				auto_runtime_pm; | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 432 | 	bool                            cur_msg_prepared; | 
| Mark Brown | 99adef3 | 2014-01-16 12:22:43 +0000 | [diff] [blame] | 433 | 	bool				cur_msg_mapped; | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 434 | 	struct completion               xfer_completion; | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 435 | 	size_t				max_dma_len; | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 436 |  | 
 | 437 | 	int (*prepare_transfer_hardware)(struct spi_master *master); | 
 | 438 | 	int (*transfer_one_message)(struct spi_master *master, | 
 | 439 | 				    struct spi_message *mesg); | 
 | 440 | 	int (*unprepare_transfer_hardware)(struct spi_master *master); | 
| Mark Brown | 2841a5f | 2013-10-05 00:23:12 +0100 | [diff] [blame] | 441 | 	int (*prepare_message)(struct spi_master *master, | 
 | 442 | 			       struct spi_message *message); | 
 | 443 | 	int (*unprepare_message)(struct spi_master *master, | 
 | 444 | 				 struct spi_message *message); | 
| Mark Brown | 49834de | 2013-07-28 14:47:02 +0100 | [diff] [blame] | 445 |  | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 446 | 	/* | 
 | 447 | 	 * These hooks are for drivers that use a generic implementation | 
 | 448 | 	 * of transfer_one_message() provied by the core. | 
 | 449 | 	 */ | 
 | 450 | 	void (*set_cs)(struct spi_device *spi, bool enable); | 
 | 451 | 	int (*transfer_one)(struct spi_master *master, struct spi_device *spi, | 
 | 452 | 			    struct spi_transfer *transfer); | 
| Andy Shevchenko | b716c4f | 2015-02-27 17:34:15 +0200 | [diff] [blame^] | 453 | 	void (*handle_err)(struct spi_master *master, | 
 | 454 | 			   struct spi_message *message); | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 455 |  | 
| Jean-Christophe PLAGNIOL-VILLARD | 7431798 | 2012-11-15 20:19:57 +0100 | [diff] [blame] | 456 | 	/* gpio chip select */ | 
 | 457 | 	int			*cs_gpios; | 
| Mark Brown | 99adef3 | 2014-01-16 12:22:43 +0000 | [diff] [blame] | 458 |  | 
 | 459 | 	/* DMA channels for use with core dmaengine helpers */ | 
 | 460 | 	struct dma_chan		*dma_tx; | 
 | 461 | 	struct dma_chan		*dma_rx; | 
| Mark Brown | 3a2eba9 | 2014-01-28 20:17:03 +0000 | [diff] [blame] | 462 |  | 
 | 463 | 	/* dummy data for full duplex devices */ | 
 | 464 | 	void			*dummy_rx; | 
 | 465 | 	void			*dummy_tx; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 466 | }; | 
 | 467 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 468 | static inline void *spi_master_get_devdata(struct spi_master *master) | 
 | 469 | { | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 470 | 	return dev_get_drvdata(&master->dev); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 471 | } | 
 | 472 |  | 
 | 473 | static inline void spi_master_set_devdata(struct spi_master *master, void *data) | 
 | 474 | { | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 475 | 	dev_set_drvdata(&master->dev, data); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 476 | } | 
 | 477 |  | 
 | 478 | static inline struct spi_master *spi_master_get(struct spi_master *master) | 
 | 479 | { | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 480 | 	if (!master || !get_device(&master->dev)) | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 481 | 		return NULL; | 
 | 482 | 	return master; | 
 | 483 | } | 
 | 484 |  | 
 | 485 | static inline void spi_master_put(struct spi_master *master) | 
 | 486 | { | 
 | 487 | 	if (master) | 
| Tony Jones | 49dce68 | 2007-10-16 01:27:48 -0700 | [diff] [blame] | 488 | 		put_device(&master->dev); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 489 | } | 
 | 490 |  | 
| Linus Walleij | ffbbdd21 | 2012-02-22 10:05:38 +0100 | [diff] [blame] | 491 | /* PM calls that need to be issued by the driver */ | 
 | 492 | extern int spi_master_suspend(struct spi_master *master); | 
 | 493 | extern int spi_master_resume(struct spi_master *master); | 
 | 494 |  | 
 | 495 | /* Calls the driver make to interact with the message queue */ | 
 | 496 | extern struct spi_message *spi_get_next_queued_message(struct spi_master *master); | 
 | 497 | extern void spi_finalize_current_message(struct spi_master *master); | 
| Mark Brown | b158935 | 2013-10-05 11:50:40 +0100 | [diff] [blame] | 498 | extern void spi_finalize_current_transfer(struct spi_master *master); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 499 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 500 | /* the spi driver core manages memory for the spi_master classdev */ | 
 | 501 | extern struct spi_master * | 
 | 502 | spi_alloc_master(struct device *host, unsigned size); | 
 | 503 |  | 
 | 504 | extern int spi_register_master(struct spi_master *master); | 
| Mark Brown | 666d5b4 | 2013-08-31 18:50:52 +0100 | [diff] [blame] | 505 | extern int devm_spi_register_master(struct device *dev, | 
 | 506 | 				    struct spi_master *master); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 507 | extern void spi_unregister_master(struct spi_master *master); | 
 | 508 |  | 
 | 509 | extern struct spi_master *spi_busnum_to_master(u16 busnum); | 
 | 510 |  | 
 | 511 | /*---------------------------------------------------------------------------*/ | 
 | 512 |  | 
 | 513 | /* | 
 | 514 |  * I/O INTERFACE between SPI controller and protocol drivers | 
 | 515 |  * | 
 | 516 |  * Protocol drivers use a queue of spi_messages, each transferring data | 
 | 517 |  * between the controller and memory buffers. | 
 | 518 |  * | 
 | 519 |  * The spi_messages themselves consist of a series of read+write transfer | 
 | 520 |  * segments.  Those segments always read the same number of bits as they | 
 | 521 |  * write; but one or the other is easily ignored by passing a null buffer | 
 | 522 |  * pointer.  (This is unlike most types of I/O API, because SPI hardware | 
 | 523 |  * is full duplex.) | 
 | 524 |  * | 
 | 525 |  * NOTE:  Allocation of spi_transfer and spi_message memory is entirely | 
 | 526 |  * up to the protocol driver, which guarantees the integrity of both (as | 
 | 527 |  * well as the data buffers) for as long as the message is queued. | 
 | 528 |  */ | 
 | 529 |  | 
 | 530 | /** | 
 | 531 |  * struct spi_transfer - a read/write buffer pair | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 532 |  * @tx_buf: data to be written (dma-safe memory), or NULL | 
 | 533 |  * @rx_buf: data to be read (dma-safe memory), or NULL | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 534 |  * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped | 
 | 535 |  * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped | 
| Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 536 |  * @tx_nbits: number of bits used for writing. If 0 the default | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 537 |  *      (SPI_NBITS_SINGLE) is used. | 
 | 538 |  * @rx_nbits: number of bits used for reading. If 0 the default | 
 | 539 |  *      (SPI_NBITS_SINGLE) is used. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 540 |  * @len: size of rx and tx buffers (in bytes) | 
| Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 541 |  * @speed_hz: Select a speed other than the device default for this | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 542 |  *      transfer. If 0 the default (from @spi_device) is used. | 
| Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 543 |  * @bits_per_word: select a bits_per_word other than the device default | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 544 |  *      for this transfer. If 0 the default (from @spi_device) is used. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 545 |  * @cs_change: affects chipselect after this transfer completes | 
 | 546 |  * @delay_usecs: microseconds to delay after this transfer before | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 547 |  *	(optionally) changing the chipselect status, then starting | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 548 |  *	the next transfer or completing this @spi_message. | 
 | 549 |  * @transfer_list: transfers are sequenced through @spi_message.transfers | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 550 |  * @tx_sg: Scatterlist for transmit, currently not for client use | 
 | 551 |  * @rx_sg: Scatterlist for receive, currently not for client use | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 552 |  * | 
 | 553 |  * SPI transfers always write the same number of bytes as they read. | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 554 |  * Protocol drivers should always provide @rx_buf and/or @tx_buf. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 555 |  * In some cases, they may also want to provide DMA addresses for | 
 | 556 |  * the data being transferred; that may reduce overhead, when the | 
 | 557 |  * underlying driver uses dma. | 
 | 558 |  * | 
| David Brownell | 4b1badf | 2006-12-29 16:48:39 -0800 | [diff] [blame] | 559 |  * If the transmit buffer is null, zeroes will be shifted out | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 560 |  * while filling @rx_buf.  If the receive buffer is null, the data | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 561 |  * shifted in will be discarded.  Only "len" bytes shift out (or in). | 
 | 562 |  * It's an error to try to shift out a partial word.  (For example, by | 
 | 563 |  * shifting out three bytes with word size of sixteen or twenty bits; | 
 | 564 |  * the former uses two bytes per word, the latter uses four bytes.) | 
 | 565 |  * | 
| David Brownell | 8022456 | 2007-02-12 00:52:46 -0800 | [diff] [blame] | 566 |  * In-memory data values are always in native CPU byte order, translated | 
 | 567 |  * from the wire byte order (big-endian except with SPI_LSB_FIRST).  So | 
 | 568 |  * for example when bits_per_word is sixteen, buffers are 2N bytes long | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 569 |  * (@len = 2N) and hold N sixteen bit words in CPU byte order. | 
| David Brownell | 8022456 | 2007-02-12 00:52:46 -0800 | [diff] [blame] | 570 |  * | 
 | 571 |  * When the word size of the SPI transfer is not a power-of-two multiple | 
 | 572 |  * of eight bits, those in-memory words include extra bits.  In-memory | 
 | 573 |  * words are always seen by protocol drivers as right-justified, so the | 
 | 574 |  * undefined (rx) or unused (tx) bits are always the most significant bits. | 
 | 575 |  * | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 576 |  * All SPI transfers start with the relevant chipselect active.  Normally | 
 | 577 |  * it stays selected until after the last transfer in a message.  Drivers | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 578 |  * can affect the chipselect signal using cs_change. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 579 |  * | 
 | 580 |  * (i) If the transfer isn't the last one in the message, this flag is | 
 | 581 |  * used to make the chipselect briefly go inactive in the middle of the | 
 | 582 |  * message.  Toggling chipselect in this way may be needed to terminate | 
 | 583 |  * a chip command, letting a single spi_message perform all of group of | 
 | 584 |  * chip transactions together. | 
 | 585 |  * | 
 | 586 |  * (ii) When the transfer is the last one in the message, the chip may | 
| David Brownell | f5a9c77 | 2007-06-16 10:16:08 -0700 | [diff] [blame] | 587 |  * stay selected until the next transfer.  On multi-device SPI busses | 
 | 588 |  * with nothing blocking messages going to other devices, this is just | 
 | 589 |  * a performance hint; starting a message to another device deselects | 
 | 590 |  * this one.  But in other cases, this can be used to ensure correctness. | 
 | 591 |  * Some devices need protocol transactions to be built from a series of | 
 | 592 |  * spi_message submissions, where the content of one message is determined | 
 | 593 |  * by the results of previous messages and where the whole transaction | 
 | 594 |  * ends when the chipselect goes intactive. | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 595 |  * | 
| Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 596 |  * When SPI can transfer in 1x,2x or 4x. It can get this transfer information | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 597 |  * from device through @tx_nbits and @rx_nbits. In Bi-direction, these | 
 | 598 |  * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x) | 
 | 599 |  * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer. | 
 | 600 |  * | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 601 |  * The code that submits an spi_message (and its spi_transfers) | 
 | 602 |  * to the lower layers is responsible for managing its memory. | 
 | 603 |  * Zero-initialize every field you don't set up explicitly, to | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 604 |  * insulate against future API updates.  After you submit a message | 
 | 605 |  * and its transfers, ignore them until its completion callback. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 606 |  */ | 
 | 607 | struct spi_transfer { | 
 | 608 | 	/* it's ok if tx_buf == rx_buf (right?) | 
 | 609 | 	 * for MicroWire, one buffer must be null | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 610 | 	 * buffers must work with dma_*map_single() calls, unless | 
 | 611 | 	 *   spi_message.is_dma_mapped reports a pre-existing mapping | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 612 | 	 */ | 
 | 613 | 	const void	*tx_buf; | 
 | 614 | 	void		*rx_buf; | 
 | 615 | 	unsigned	len; | 
 | 616 |  | 
 | 617 | 	dma_addr_t	tx_dma; | 
 | 618 | 	dma_addr_t	rx_dma; | 
| Mark Brown | 6ad45a2 | 2014-02-02 13:47:47 +0000 | [diff] [blame] | 619 | 	struct sg_table tx_sg; | 
 | 620 | 	struct sg_table rx_sg; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 621 |  | 
 | 622 | 	unsigned	cs_change:1; | 
| Mark Brown | d3fbd45 | 2014-01-10 17:09:53 +0000 | [diff] [blame] | 623 | 	unsigned	tx_nbits:3; | 
 | 624 | 	unsigned	rx_nbits:3; | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 625 | #define	SPI_NBITS_SINGLE	0x01 /* 1bit transfer */ | 
 | 626 | #define	SPI_NBITS_DUAL		0x02 /* 2bits transfer */ | 
 | 627 | #define	SPI_NBITS_QUAD		0x04 /* 4bits transfer */ | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 628 | 	u8		bits_per_word; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 629 | 	u16		delay_usecs; | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 630 | 	u32		speed_hz; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 631 |  | 
 | 632 | 	struct list_head transfer_list; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 633 | }; | 
 | 634 |  | 
 | 635 | /** | 
 | 636 |  * struct spi_message - one multi-segment SPI transaction | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 637 |  * @transfers: list of transfer segments in this transaction | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 638 |  * @spi: SPI device to which the transaction is queued | 
 | 639 |  * @is_dma_mapped: if true, the caller provided both dma and cpu virtual | 
 | 640 |  *	addresses for each transfer buffer | 
 | 641 |  * @complete: called to report transaction completions | 
 | 642 |  * @context: the argument to complete() when it's called | 
| Thierry Reding | 2c67568 | 2014-08-08 13:02:36 +0200 | [diff] [blame] | 643 |  * @frame_length: the total number of bytes in the message | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 644 |  * @actual_length: the total number of bytes that were transferred in all | 
 | 645 |  *	successful segments | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 646 |  * @status: zero for success, else negative errno | 
 | 647 |  * @queue: for use by whichever driver currently owns the message | 
 | 648 |  * @state: for use by whichever driver currently owns the message | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 649 |  * | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 650 |  * A @spi_message is used to execute an atomic sequence of data transfers, | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 651 |  * each represented by a struct spi_transfer.  The sequence is "atomic" | 
 | 652 |  * in the sense that no other spi_message may use that SPI bus until that | 
 | 653 |  * sequence completes.  On some systems, many such sequences can execute as | 
 | 654 |  * as single programmed DMA transfer.  On all systems, these messages are | 
 | 655 |  * queued, and might complete after transactions to other devices.  Messages | 
 | 656 |  * sent to a given spi_device are alway executed in FIFO order. | 
 | 657 |  * | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 658 |  * The code that submits an spi_message (and its spi_transfers) | 
 | 659 |  * to the lower layers is responsible for managing its memory. | 
 | 660 |  * Zero-initialize every field you don't set up explicitly, to | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 661 |  * insulate against future API updates.  After you submit a message | 
 | 662 |  * and its transfers, ignore them until its completion callback. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 663 |  */ | 
 | 664 | struct spi_message { | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 665 | 	struct list_head	transfers; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 666 |  | 
 | 667 | 	struct spi_device	*spi; | 
 | 668 |  | 
 | 669 | 	unsigned		is_dma_mapped:1; | 
 | 670 |  | 
 | 671 | 	/* REVISIT:  we might want a flag affecting the behavior of the | 
 | 672 | 	 * last transfer ... allowing things like "read 16 bit length L" | 
 | 673 | 	 * immediately followed by "read L bytes".  Basically imposing | 
 | 674 | 	 * a specific message scheduling algorithm. | 
 | 675 | 	 * | 
 | 676 | 	 * Some controller drivers (message-at-a-time queue processing) | 
 | 677 | 	 * could provide that as their default scheduling algorithm.  But | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 678 | 	 * others (with multi-message pipelines) could need a flag to | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 679 | 	 * tell them about such special cases. | 
 | 680 | 	 */ | 
 | 681 |  | 
 | 682 | 	/* completion is reported through a callback */ | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 683 | 	void			(*complete)(void *context); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 684 | 	void			*context; | 
| Sourav Poddar | 078726c | 2013-07-18 15:31:25 +0530 | [diff] [blame] | 685 | 	unsigned		frame_length; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 686 | 	unsigned		actual_length; | 
 | 687 | 	int			status; | 
 | 688 |  | 
 | 689 | 	/* for optional use by whatever driver currently owns the | 
 | 690 | 	 * spi_message ...  between calls to spi_async and then later | 
 | 691 | 	 * complete(), that's the spi_master controller driver. | 
 | 692 | 	 */ | 
 | 693 | 	struct list_head	queue; | 
 | 694 | 	void			*state; | 
 | 695 | }; | 
 | 696 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 697 | static inline void spi_message_init(struct spi_message *m) | 
 | 698 | { | 
 | 699 | 	memset(m, 0, sizeof *m); | 
 | 700 | 	INIT_LIST_HEAD(&m->transfers); | 
 | 701 | } | 
 | 702 |  | 
 | 703 | static inline void | 
 | 704 | spi_message_add_tail(struct spi_transfer *t, struct spi_message *m) | 
 | 705 | { | 
 | 706 | 	list_add_tail(&t->transfer_list, &m->transfers); | 
 | 707 | } | 
 | 708 |  | 
 | 709 | static inline void | 
 | 710 | spi_transfer_del(struct spi_transfer *t) | 
 | 711 | { | 
 | 712 | 	list_del(&t->transfer_list); | 
 | 713 | } | 
 | 714 |  | 
| Lars-Peter Clausen | 6d9eecd | 2013-01-09 17:31:00 +0000 | [diff] [blame] | 715 | /** | 
 | 716 |  * spi_message_init_with_transfers - Initialize spi_message and append transfers | 
 | 717 |  * @m: spi_message to be initialized | 
 | 718 |  * @xfers: An array of spi transfers | 
 | 719 |  * @num_xfers: Number of items in the xfer array | 
 | 720 |  * | 
 | 721 |  * This function initializes the given spi_message and adds each spi_transfer in | 
 | 722 |  * the given array to the message. | 
 | 723 |  */ | 
 | 724 | static inline void | 
 | 725 | spi_message_init_with_transfers(struct spi_message *m, | 
 | 726 | struct spi_transfer *xfers, unsigned int num_xfers) | 
 | 727 | { | 
 | 728 | 	unsigned int i; | 
 | 729 |  | 
 | 730 | 	spi_message_init(m); | 
 | 731 | 	for (i = 0; i < num_xfers; ++i) | 
 | 732 | 		spi_message_add_tail(&xfers[i], m); | 
 | 733 | } | 
 | 734 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 735 | /* It's fine to embed message and transaction structures in other data | 
 | 736 |  * structures so long as you don't free them while they're in use. | 
 | 737 |  */ | 
 | 738 |  | 
 | 739 | static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) | 
 | 740 | { | 
 | 741 | 	struct spi_message *m; | 
 | 742 |  | 
 | 743 | 	m = kzalloc(sizeof(struct spi_message) | 
 | 744 | 			+ ntrans * sizeof(struct spi_transfer), | 
 | 745 | 			flags); | 
 | 746 | 	if (m) { | 
| Shubhrajyoti D | 8f53602 | 2012-02-27 19:29:05 +0530 | [diff] [blame] | 747 | 		unsigned i; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 748 | 		struct spi_transfer *t = (struct spi_transfer *)(m + 1); | 
 | 749 |  | 
 | 750 | 		INIT_LIST_HEAD(&m->transfers); | 
 | 751 | 		for (i = 0; i < ntrans; i++, t++) | 
 | 752 | 			spi_message_add_tail(t, m); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 753 | 	} | 
 | 754 | 	return m; | 
 | 755 | } | 
 | 756 |  | 
 | 757 | static inline void spi_message_free(struct spi_message *m) | 
 | 758 | { | 
 | 759 | 	kfree(m); | 
 | 760 | } | 
 | 761 |  | 
| David Brownell | 7d07719 | 2009-06-17 16:26:03 -0700 | [diff] [blame] | 762 | extern int spi_setup(struct spi_device *spi); | 
| David Brownell | 568d069 | 2009-09-22 16:46:18 -0700 | [diff] [blame] | 763 | extern int spi_async(struct spi_device *spi, struct spi_message *message); | 
| Ernst Schwab | cf32b71e | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 764 | extern int spi_async_locked(struct spi_device *spi, | 
 | 765 | 			    struct spi_message *message); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 766 |  | 
 | 767 | /*---------------------------------------------------------------------------*/ | 
 | 768 |  | 
 | 769 | /* All these synchronous SPI transfer routines are utilities layered | 
 | 770 |  * over the core async transfer primitive.  Here, "synchronous" means | 
 | 771 |  * they will sleep uninterruptibly until the async transfer completes. | 
 | 772 |  */ | 
 | 773 |  | 
 | 774 | extern int spi_sync(struct spi_device *spi, struct spi_message *message); | 
| Ernst Schwab | cf32b71e | 2010-06-28 17:49:29 -0700 | [diff] [blame] | 775 | extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message); | 
 | 776 | extern int spi_bus_lock(struct spi_master *master); | 
 | 777 | extern int spi_bus_unlock(struct spi_master *master); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 778 |  | 
 | 779 | /** | 
 | 780 |  * spi_write - SPI synchronous write | 
 | 781 |  * @spi: device to which data will be written | 
 | 782 |  * @buf: data buffer | 
 | 783 |  * @len: data buffer size | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 784 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 785 |  * | 
 | 786 |  * This writes the buffer and returns zero or a negative error code. | 
 | 787 |  * Callable only from contexts that can sleep. | 
 | 788 |  */ | 
 | 789 | static inline int | 
| Mark Brown | 0c4a159 | 2011-05-11 00:09:30 +0200 | [diff] [blame] | 790 | spi_write(struct spi_device *spi, const void *buf, size_t len) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 791 | { | 
 | 792 | 	struct spi_transfer	t = { | 
 | 793 | 			.tx_buf		= buf, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 794 | 			.len		= len, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 795 | 		}; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 796 | 	struct spi_message	m; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 797 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 798 | 	spi_message_init(&m); | 
 | 799 | 	spi_message_add_tail(&t, &m); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 800 | 	return spi_sync(spi, &m); | 
 | 801 | } | 
 | 802 |  | 
 | 803 | /** | 
 | 804 |  * spi_read - SPI synchronous read | 
 | 805 |  * @spi: device from which data will be read | 
 | 806 |  * @buf: data buffer | 
 | 807 |  * @len: data buffer size | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 808 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 809 |  * | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 810 |  * This reads the buffer and returns zero or a negative error code. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 811 |  * Callable only from contexts that can sleep. | 
 | 812 |  */ | 
 | 813 | static inline int | 
| Mark Brown | 0c4a159 | 2011-05-11 00:09:30 +0200 | [diff] [blame] | 814 | spi_read(struct spi_device *spi, void *buf, size_t len) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 815 | { | 
 | 816 | 	struct spi_transfer	t = { | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 817 | 			.rx_buf		= buf, | 
 | 818 | 			.len		= len, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 819 | 		}; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 820 | 	struct spi_message	m; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 821 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 822 | 	spi_message_init(&m); | 
 | 823 | 	spi_message_add_tail(&t, &m); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 824 | 	return spi_sync(spi, &m); | 
 | 825 | } | 
 | 826 |  | 
| Lars-Peter Clausen | 6d9eecd | 2013-01-09 17:31:00 +0000 | [diff] [blame] | 827 | /** | 
 | 828 |  * spi_sync_transfer - synchronous SPI data transfer | 
 | 829 |  * @spi: device with which data will be exchanged | 
 | 830 |  * @xfers: An array of spi_transfers | 
 | 831 |  * @num_xfers: Number of items in the xfer array | 
 | 832 |  * Context: can sleep | 
 | 833 |  * | 
 | 834 |  * Does a synchronous SPI data transfer of the given spi_transfer array. | 
 | 835 |  * | 
 | 836 |  * For more specific semantics see spi_sync(). | 
 | 837 |  * | 
 | 838 |  * It returns zero on success, else a negative error code. | 
 | 839 |  */ | 
 | 840 | static inline int | 
 | 841 | spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, | 
 | 842 | 	unsigned int num_xfers) | 
 | 843 | { | 
 | 844 | 	struct spi_message msg; | 
 | 845 |  | 
 | 846 | 	spi_message_init_with_transfers(&msg, xfers, num_xfers); | 
 | 847 |  | 
 | 848 | 	return spi_sync(spi, &msg); | 
 | 849 | } | 
 | 850 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 851 | /* this copies txbuf and rxbuf data; for small transfers only! */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 852 | extern int spi_write_then_read(struct spi_device *spi, | 
| Mark Brown | 0c4a159 | 2011-05-11 00:09:30 +0200 | [diff] [blame] | 853 | 		const void *txbuf, unsigned n_tx, | 
 | 854 | 		void *rxbuf, unsigned n_rx); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 855 |  | 
 | 856 | /** | 
 | 857 |  * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read | 
 | 858 |  * @spi: device with which data will be exchanged | 
 | 859 |  * @cmd: command to be written before data is read back | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 860 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 861 |  * | 
 | 862 |  * This returns the (unsigned) eight bit number returned by the | 
 | 863 |  * device, or else a negative error code.  Callable only from | 
 | 864 |  * contexts that can sleep. | 
 | 865 |  */ | 
 | 866 | static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) | 
 | 867 | { | 
 | 868 | 	ssize_t			status; | 
 | 869 | 	u8			result; | 
 | 870 |  | 
 | 871 | 	status = spi_write_then_read(spi, &cmd, 1, &result, 1); | 
 | 872 |  | 
 | 873 | 	/* return negative errno or unsigned value */ | 
 | 874 | 	return (status < 0) ? status : result; | 
 | 875 | } | 
 | 876 |  | 
 | 877 | /** | 
 | 878 |  * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read | 
 | 879 |  * @spi: device with which data will be exchanged | 
 | 880 |  * @cmd: command to be written before data is read back | 
| David Brownell | 33e34dc | 2007-05-08 00:32:21 -0700 | [diff] [blame] | 881 |  * Context: can sleep | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 882 |  * | 
 | 883 |  * This returns the (unsigned) sixteen bit number returned by the | 
 | 884 |  * device, or else a negative error code.  Callable only from | 
 | 885 |  * contexts that can sleep. | 
 | 886 |  * | 
 | 887 |  * The number is returned in wire-order, which is at least sometimes | 
 | 888 |  * big-endian. | 
 | 889 |  */ | 
 | 890 | static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) | 
 | 891 | { | 
 | 892 | 	ssize_t			status; | 
 | 893 | 	u16			result; | 
 | 894 |  | 
| Geert Uytterhoeven | 269ccca | 2014-01-12 13:59:06 +0100 | [diff] [blame] | 895 | 	status = spi_write_then_read(spi, &cmd, 1, &result, 2); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 896 |  | 
 | 897 | 	/* return negative errno or unsigned value */ | 
 | 898 | 	return (status < 0) ? status : result; | 
 | 899 | } | 
 | 900 |  | 
| Lars-Peter Clausen | 05071aa | 2013-09-27 16:34:27 +0200 | [diff] [blame] | 901 | /** | 
 | 902 |  * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read | 
 | 903 |  * @spi: device with which data will be exchanged | 
 | 904 |  * @cmd: command to be written before data is read back | 
 | 905 |  * Context: can sleep | 
 | 906 |  * | 
 | 907 |  * This returns the (unsigned) sixteen bit number returned by the device in cpu | 
 | 908 |  * endianness, or else a negative error code. Callable only from contexts that | 
 | 909 |  * can sleep. | 
 | 910 |  * | 
 | 911 |  * This function is similar to spi_w8r16, with the exception that it will | 
 | 912 |  * convert the read 16 bit data word from big-endian to native endianness. | 
 | 913 |  * | 
 | 914 |  */ | 
 | 915 | static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) | 
 | 916 |  | 
 | 917 | { | 
 | 918 | 	ssize_t status; | 
 | 919 | 	__be16 result; | 
 | 920 |  | 
 | 921 | 	status = spi_write_then_read(spi, &cmd, 1, &result, 2); | 
 | 922 | 	if (status < 0) | 
 | 923 | 		return status; | 
 | 924 |  | 
 | 925 | 	return be16_to_cpu(result); | 
 | 926 | } | 
 | 927 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 928 | /*---------------------------------------------------------------------------*/ | 
 | 929 |  | 
 | 930 | /* | 
 | 931 |  * INTERFACE between board init code and SPI infrastructure. | 
 | 932 |  * | 
 | 933 |  * No SPI driver ever sees these SPI device table segments, but | 
 | 934 |  * it's how the SPI core (or adapters that get hotplugged) grows | 
 | 935 |  * the driver model tree. | 
 | 936 |  * | 
 | 937 |  * As a rule, SPI devices can't be probed.  Instead, board init code | 
 | 938 |  * provides a table listing the devices which are present, with enough | 
 | 939 |  * information to bind and set up the device's driver.  There's basic | 
 | 940 |  * support for nonstatic configurations too; enough to handle adding | 
 | 941 |  * parport adapters, or microcontrollers acting as USB-to-SPI bridges. | 
 | 942 |  */ | 
 | 943 |  | 
| David Brownell | 2604288 | 2007-07-31 00:39:44 -0700 | [diff] [blame] | 944 | /** | 
 | 945 |  * struct spi_board_info - board-specific template for a SPI device | 
 | 946 |  * @modalias: Initializes spi_device.modalias; identifies the driver. | 
 | 947 |  * @platform_data: Initializes spi_device.platform_data; the particular | 
 | 948 |  *	data stored there is driver-specific. | 
 | 949 |  * @controller_data: Initializes spi_device.controller_data; some | 
 | 950 |  *	controllers need hints about hardware setup, e.g. for DMA. | 
 | 951 |  * @irq: Initializes spi_device.irq; depends on how the board is wired. | 
 | 952 |  * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits | 
 | 953 |  *	from the chip datasheet and board-specific signal quality issues. | 
 | 954 |  * @bus_num: Identifies which spi_master parents the spi_device; unused | 
 | 955 |  *	by spi_new_device(), and otherwise depends on board wiring. | 
 | 956 |  * @chip_select: Initializes spi_device.chip_select; depends on how | 
 | 957 |  *	the board is wired. | 
 | 958 |  * @mode: Initializes spi_device.mode; based on the chip datasheet, board | 
 | 959 |  *	wiring (some devices support both 3WIRE and standard modes), and | 
 | 960 |  *	possibly presence of an inverter in the chipselect path. | 
 | 961 |  * | 
 | 962 |  * When adding new SPI devices to the device tree, these structures serve | 
 | 963 |  * as a partial device template.  They hold information which can't always | 
 | 964 |  * be determined by drivers.  Information that probe() can establish (such | 
 | 965 |  * as the default transfer wordsize) is not included here. | 
 | 966 |  * | 
 | 967 |  * These structures are used in two places.  Their primary role is to | 
 | 968 |  * be stored in tables of board-specific device descriptors, which are | 
 | 969 |  * declared early in board initialization and then used (much later) to | 
 | 970 |  * populate a controller's device tree after the that controller's driver | 
 | 971 |  * initializes.  A secondary (and atypical) role is as a parameter to | 
 | 972 |  * spi_new_device() call, which happens after those controller drivers | 
 | 973 |  * are active in some dynamic board configuration models. | 
 | 974 |  */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 975 | struct spi_board_info { | 
 | 976 | 	/* the device name and module name are coupled, like platform_bus; | 
 | 977 | 	 * "modalias" is normally the driver name. | 
 | 978 | 	 * | 
 | 979 | 	 * platform_data goes to spi_device.dev.platform_data, | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 980 | 	 * controller_data goes to spi_device.controller_data, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 981 | 	 * irq is copied too | 
 | 982 | 	 */ | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 983 | 	char		modalias[SPI_NAME_SIZE]; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 984 | 	const void	*platform_data; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 985 | 	void		*controller_data; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 986 | 	int		irq; | 
 | 987 |  | 
 | 988 | 	/* slower signaling on noisy or low voltage boards */ | 
 | 989 | 	u32		max_speed_hz; | 
 | 990 |  | 
 | 991 |  | 
 | 992 | 	/* bus_num is board specific and matches the bus_num of some | 
 | 993 | 	 * spi_master that will probably be registered later. | 
 | 994 | 	 * | 
 | 995 | 	 * chip_select reflects how this chip is wired to that master; | 
 | 996 | 	 * it's less than num_chipselect. | 
 | 997 | 	 */ | 
 | 998 | 	u16		bus_num; | 
 | 999 | 	u16		chip_select; | 
 | 1000 |  | 
| David Brownell | 980a01c | 2006-06-28 07:47:15 -0700 | [diff] [blame] | 1001 | 	/* mode becomes spi_device.mode, and is essential for chips | 
 | 1002 | 	 * where the default of SPI_CS_HIGH = 0 is wrong. | 
 | 1003 | 	 */ | 
| wangyuhang | f477b7f | 2013-08-11 18:15:17 +0800 | [diff] [blame] | 1004 | 	u16		mode; | 
| David Brownell | 980a01c | 2006-06-28 07:47:15 -0700 | [diff] [blame] | 1005 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1006 | 	/* ... may need additional spi_device chip config data here. | 
 | 1007 | 	 * avoid stuff protocol drivers can set; but include stuff | 
 | 1008 | 	 * needed to behave without being bound to a driver: | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1009 | 	 *  - quirks like clock rate mattering when not selected | 
 | 1010 | 	 */ | 
 | 1011 | }; | 
 | 1012 |  | 
 | 1013 | #ifdef	CONFIG_SPI | 
 | 1014 | extern int | 
 | 1015 | spi_register_board_info(struct spi_board_info const *info, unsigned n); | 
 | 1016 | #else | 
 | 1017 | /* board init code may ignore whether SPI is configured or not */ | 
 | 1018 | static inline int | 
 | 1019 | spi_register_board_info(struct spi_board_info const *info, unsigned n) | 
 | 1020 | 	{ return 0; } | 
 | 1021 | #endif | 
 | 1022 |  | 
 | 1023 |  | 
 | 1024 | /* If you're hotplugging an adapter with devices (parport, usb, etc) | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 1025 |  * use spi_new_device() to describe each device.  You can also call | 
 | 1026 |  * spi_unregister_device() to start making that device vanish, but | 
 | 1027 |  * normally that would be handled by spi_unregister_master(). | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 1028 |  * | 
 | 1029 |  * You can also use spi_alloc_device() and spi_add_device() to use a two | 
 | 1030 |  * stage registration sequence for each spi_device.  This gives the caller | 
 | 1031 |  * some more control over the spi_device structure before it is registered, | 
 | 1032 |  * but requires that caller to initialize fields that would otherwise | 
 | 1033 |  * be defined using the board info. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1034 |  */ | 
 | 1035 | extern struct spi_device * | 
| Grant Likely | dc87c98 | 2008-05-15 16:50:22 -0600 | [diff] [blame] | 1036 | spi_alloc_device(struct spi_master *master); | 
 | 1037 |  | 
 | 1038 | extern int | 
 | 1039 | spi_add_device(struct spi_device *spi); | 
 | 1040 |  | 
 | 1041 | extern struct spi_device * | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1042 | spi_new_device(struct spi_master *, struct spi_board_info *); | 
 | 1043 |  | 
 | 1044 | static inline void | 
 | 1045 | spi_unregister_device(struct spi_device *spi) | 
 | 1046 | { | 
 | 1047 | 	if (spi) | 
 | 1048 | 		device_unregister(&spi->dev); | 
 | 1049 | } | 
 | 1050 |  | 
| Anton Vorontsov | 75368bf | 2009-09-22 16:46:04 -0700 | [diff] [blame] | 1051 | extern const struct spi_device_id * | 
 | 1052 | spi_get_device_id(const struct spi_device *sdev); | 
 | 1053 |  | 
| Beniamino Galvani | b671358 | 2014-11-22 16:21:39 +0100 | [diff] [blame] | 1054 | static inline bool | 
 | 1055 | spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) | 
 | 1056 | { | 
 | 1057 | 	return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); | 
 | 1058 | } | 
 | 1059 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1060 | #endif /* __LINUX_SPI_H */ |