| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2005 David Brownell | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or modify | 
|  | 5 | * it under the terms of the GNU General Public License as published by | 
|  | 6 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 7 | * (at your option) any later version. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope that it will be useful, | 
|  | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | * GNU General Public License for more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public License | 
|  | 15 | * along with this program; if not, write to the Free Software | 
|  | 16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 
|  | 17 | */ | 
|  | 18 |  | 
|  | 19 | #ifndef __LINUX_SPI_H | 
|  | 20 | #define __LINUX_SPI_H | 
|  | 21 |  | 
|  | 22 | /* | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 23 | * INTERFACES between SPI master-side drivers and SPI infrastructure. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 24 | * (There's no SPI slave support for Linux yet...) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 25 | */ | 
|  | 26 | extern struct bus_type spi_bus_type; | 
|  | 27 |  | 
|  | 28 | /** | 
|  | 29 | * struct spi_device - Master side proxy for an SPI slave device | 
|  | 30 | * @dev: Driver model representation of the device. | 
|  | 31 | * @master: SPI controller used with the device. | 
|  | 32 | * @max_speed_hz: Maximum clock rate to be used with this chip | 
|  | 33 | *	(on this board); may be changed by the device's driver. | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 34 | *	The spi_transfer.speed_hz can override this for each transfer. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 35 | * @chip-select: Chipselect, distinguishing chips handled by "master". | 
|  | 36 | * @mode: The spi mode defines how data is clocked out and in. | 
|  | 37 | *	This may be changed by the device's driver. | 
| David Brownell | ccf77cc | 2006-04-03 15:46:22 -0700 | [diff] [blame] | 38 | *	The "active low" default for chipselect mode can be overridden, | 
|  | 39 | *	as can the "MSB first" default for each word in a transfer. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 40 | * @bits_per_word: Data transfers involve one or more words; word sizes | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 41 | *	like eight or 12 bits are common.  In-memory wordsizes are | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 42 | *	powers of two bytes (e.g. 20 bit samples use 32 bits). | 
| David Brownell | ccf77cc | 2006-04-03 15:46:22 -0700 | [diff] [blame] | 43 | *	This may be changed by the device's driver, or left at the | 
|  | 44 | *	default (0) indicating protocol words are eight bit bytes. | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 45 | *	The spi_transfer.bits_per_word can override this for each transfer. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 46 | * @irq: Negative, or the number passed to request_irq() to receive | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 47 | *	interrupts from this device. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 48 | * @controller_state: Controller's runtime state | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 49 | * @controller_data: Board-specific definitions for controller, such as | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 50 | *	FIFO initialization parameters; from board_info.controller_data | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 51 | * | 
|  | 52 | * An spi_device is used to interchange data between an SPI slave | 
|  | 53 | * (usually a discrete chip) and CPU memory. | 
|  | 54 | * | 
|  | 55 | * In "dev", the platform_data is used to hold information about this | 
|  | 56 | * device that's meaningful to the device's protocol driver, but not | 
|  | 57 | * to its controller.  One example might be an identifier for a chip | 
|  | 58 | * variant with slightly different functionality. | 
|  | 59 | */ | 
|  | 60 | struct spi_device { | 
|  | 61 | struct device		dev; | 
|  | 62 | struct spi_master	*master; | 
|  | 63 | u32			max_speed_hz; | 
|  | 64 | u8			chip_select; | 
|  | 65 | u8			mode; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 66 | #define	SPI_CPHA	0x01			/* clock phase */ | 
|  | 67 | #define	SPI_CPOL	0x02			/* clock polarity */ | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 68 | #define	SPI_MODE_0	(0|0)			/* (original MicroWire) */ | 
|  | 69 | #define	SPI_MODE_1	(0|SPI_CPHA) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 70 | #define	SPI_MODE_2	(SPI_CPOL|0) | 
|  | 71 | #define	SPI_MODE_3	(SPI_CPOL|SPI_CPHA) | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 72 | #define	SPI_CS_HIGH	0x04			/* chipselect active high? */ | 
| David Brownell | ccf77cc | 2006-04-03 15:46:22 -0700 | [diff] [blame] | 73 | #define	SPI_LSB_FIRST	0x08			/* per-word bits-on-wire */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 74 | u8			bits_per_word; | 
|  | 75 | int			irq; | 
|  | 76 | void			*controller_state; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 77 | void			*controller_data; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 78 | const char		*modalias; | 
|  | 79 |  | 
|  | 80 | // likely need more hooks for more protocol options affecting how | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 81 | // the controller talks to each chip, like: | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 82 | //  - memory packing (12 bit samples into low bits, others zeroed) | 
|  | 83 | //  - priority | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 84 | //  - drop chipselect after each word | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 85 | //  - chipselect delays | 
|  | 86 | //  - ... | 
|  | 87 | }; | 
|  | 88 |  | 
|  | 89 | static inline struct spi_device *to_spi_device(struct device *dev) | 
|  | 90 | { | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 91 | return dev ? container_of(dev, struct spi_device, dev) : NULL; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 92 | } | 
|  | 93 |  | 
|  | 94 | /* most drivers won't need to care about device refcounting */ | 
|  | 95 | static inline struct spi_device *spi_dev_get(struct spi_device *spi) | 
|  | 96 | { | 
|  | 97 | return (spi && get_device(&spi->dev)) ? spi : NULL; | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | static inline void spi_dev_put(struct spi_device *spi) | 
|  | 101 | { | 
|  | 102 | if (spi) | 
|  | 103 | put_device(&spi->dev); | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | /* ctldata is for the bus_master driver's runtime state */ | 
|  | 107 | static inline void *spi_get_ctldata(struct spi_device *spi) | 
|  | 108 | { | 
|  | 109 | return spi->controller_state; | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | static inline void spi_set_ctldata(struct spi_device *spi, void *state) | 
|  | 113 | { | 
|  | 114 | spi->controller_state = state; | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 |  | 
|  | 118 | struct spi_message; | 
|  | 119 |  | 
|  | 120 |  | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 121 |  | 
|  | 122 | struct spi_driver { | 
|  | 123 | int			(*probe)(struct spi_device *spi); | 
|  | 124 | int			(*remove)(struct spi_device *spi); | 
|  | 125 | void			(*shutdown)(struct spi_device *spi); | 
|  | 126 | int			(*suspend)(struct spi_device *spi, pm_message_t mesg); | 
|  | 127 | int			(*resume)(struct spi_device *spi); | 
|  | 128 | struct device_driver	driver; | 
|  | 129 | }; | 
|  | 130 |  | 
|  | 131 | static inline struct spi_driver *to_spi_driver(struct device_driver *drv) | 
|  | 132 | { | 
|  | 133 | return drv ? container_of(drv, struct spi_driver, driver) : NULL; | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | extern int spi_register_driver(struct spi_driver *sdrv); | 
|  | 137 |  | 
|  | 138 | static inline void spi_unregister_driver(struct spi_driver *sdrv) | 
|  | 139 | { | 
|  | 140 | if (!sdrv) | 
|  | 141 | return; | 
|  | 142 | driver_unregister(&sdrv->driver); | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 |  | 
|  | 146 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 147 | /** | 
|  | 148 | * struct spi_master - interface to SPI master controller | 
|  | 149 | * @cdev: class interface to this driver | 
|  | 150 | * @bus_num: board-specific (and often SOC-specific) identifier for a | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 151 | *	given SPI controller. | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 152 | * @num_chipselect: chipselects are used to distinguish individual | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 153 | *	SPI slaves, and are numbered from zero to num_chipselects. | 
|  | 154 | *	each slave has a chipselect signal, but it's common that not | 
|  | 155 | *	every chipselect is connected to a slave. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 156 | * @setup: updates the device mode and clocking records used by a | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 157 | *	device's SPI controller; protocol code may call this. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 158 | * @transfer: adds a message to the controller's transfer queue. | 
|  | 159 | * @cleanup: frees controller-specific state | 
|  | 160 | * | 
|  | 161 | * Each SPI master controller can communicate with one or more spi_device | 
|  | 162 | * children.  These make a small bus, sharing MOSI, MISO and SCK signals | 
|  | 163 | * but not chip select signals.  Each device may be configured to use a | 
|  | 164 | * different clock rate, since those shared signals are ignored unless | 
|  | 165 | * the chip is selected. | 
|  | 166 | * | 
|  | 167 | * The driver for an SPI controller manages access to those devices through | 
|  | 168 | * a queue of spi_message transactions, copyin data between CPU memory and | 
|  | 169 | * an SPI slave device).  For each such message it queues, it calls the | 
|  | 170 | * message's completion function when the transaction completes. | 
|  | 171 | */ | 
|  | 172 | struct spi_master { | 
|  | 173 | struct class_device	cdev; | 
|  | 174 |  | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 175 | /* other than negative (== assign one dynamically), bus_num is fully | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 176 | * board-specific.  usually that simplifies to being SOC-specific. | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 177 | * example:  one SOC has three SPI controllers, numbered 0..2, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 178 | * and one board's schematics might show it using SPI-2.  software | 
|  | 179 | * would normally use bus_num=2 for that controller. | 
|  | 180 | */ | 
| David Brownell | a020ed7 | 2006-04-03 15:49:04 -0700 | [diff] [blame] | 181 | s16			bus_num; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 182 |  | 
|  | 183 | /* chipselects will be integral to many controllers; some others | 
|  | 184 | * might use board-specific GPIOs. | 
|  | 185 | */ | 
|  | 186 | u16			num_chipselect; | 
|  | 187 |  | 
|  | 188 | /* setup mode and clock, etc (spi driver may call many times) */ | 
|  | 189 | int			(*setup)(struct spi_device *spi); | 
|  | 190 |  | 
|  | 191 | /* bidirectional bulk transfers | 
|  | 192 | * | 
|  | 193 | * + The transfer() method may not sleep; its main role is | 
|  | 194 | *   just to add the message to the queue. | 
|  | 195 | * + For now there's no remove-from-queue operation, or | 
|  | 196 | *   any other request management | 
|  | 197 | * + To a given spi_device, message queueing is pure fifo | 
|  | 198 | * | 
|  | 199 | * + The master's main job is to process its message queue, | 
|  | 200 | *   selecting a chip then transferring data | 
|  | 201 | * + If there are multiple spi_device children, the i/o queue | 
|  | 202 | *   arbitration algorithm is unspecified (round robin, fifo, | 
|  | 203 | *   priority, reservations, preemption, etc) | 
|  | 204 | * | 
|  | 205 | * + Chipselect stays active during the entire message | 
|  | 206 | *   (unless modified by spi_transfer.cs_change != 0). | 
|  | 207 | * + The message transfers use clock and SPI mode parameters | 
|  | 208 | *   previously established by setup() for this device | 
|  | 209 | */ | 
|  | 210 | int			(*transfer)(struct spi_device *spi, | 
|  | 211 | struct spi_message *mesg); | 
|  | 212 |  | 
|  | 213 | /* called on release() to free memory provided by spi_master */ | 
|  | 214 | void			(*cleanup)(const struct spi_device *spi); | 
|  | 215 | }; | 
|  | 216 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 217 | static inline void *spi_master_get_devdata(struct spi_master *master) | 
|  | 218 | { | 
|  | 219 | return class_get_devdata(&master->cdev); | 
|  | 220 | } | 
|  | 221 |  | 
|  | 222 | static inline void spi_master_set_devdata(struct spi_master *master, void *data) | 
|  | 223 | { | 
|  | 224 | class_set_devdata(&master->cdev, data); | 
|  | 225 | } | 
|  | 226 |  | 
|  | 227 | static inline struct spi_master *spi_master_get(struct spi_master *master) | 
|  | 228 | { | 
|  | 229 | if (!master || !class_device_get(&master->cdev)) | 
|  | 230 | return NULL; | 
|  | 231 | return master; | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | static inline void spi_master_put(struct spi_master *master) | 
|  | 235 | { | 
|  | 236 | if (master) | 
|  | 237 | class_device_put(&master->cdev); | 
|  | 238 | } | 
|  | 239 |  | 
|  | 240 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 241 | /* the spi driver core manages memory for the spi_master classdev */ | 
|  | 242 | extern struct spi_master * | 
|  | 243 | spi_alloc_master(struct device *host, unsigned size); | 
|  | 244 |  | 
|  | 245 | extern int spi_register_master(struct spi_master *master); | 
|  | 246 | extern void spi_unregister_master(struct spi_master *master); | 
|  | 247 |  | 
|  | 248 | extern struct spi_master *spi_busnum_to_master(u16 busnum); | 
|  | 249 |  | 
|  | 250 | /*---------------------------------------------------------------------------*/ | 
|  | 251 |  | 
|  | 252 | /* | 
|  | 253 | * I/O INTERFACE between SPI controller and protocol drivers | 
|  | 254 | * | 
|  | 255 | * Protocol drivers use a queue of spi_messages, each transferring data | 
|  | 256 | * between the controller and memory buffers. | 
|  | 257 | * | 
|  | 258 | * The spi_messages themselves consist of a series of read+write transfer | 
|  | 259 | * segments.  Those segments always read the same number of bits as they | 
|  | 260 | * write; but one or the other is easily ignored by passing a null buffer | 
|  | 261 | * pointer.  (This is unlike most types of I/O API, because SPI hardware | 
|  | 262 | * is full duplex.) | 
|  | 263 | * | 
|  | 264 | * NOTE:  Allocation of spi_transfer and spi_message memory is entirely | 
|  | 265 | * up to the protocol driver, which guarantees the integrity of both (as | 
|  | 266 | * well as the data buffers) for as long as the message is queued. | 
|  | 267 | */ | 
|  | 268 |  | 
|  | 269 | /** | 
|  | 270 | * struct spi_transfer - a read/write buffer pair | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 271 | * @tx_buf: data to be written (dma-safe memory), or NULL | 
|  | 272 | * @rx_buf: data to be read (dma-safe memory), or NULL | 
|  | 273 | * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped | 
|  | 274 | * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 275 | * @len: size of rx and tx buffers (in bytes) | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 276 | * @speed_hz: Select a speed other then the device default for this | 
|  | 277 | *      transfer. If 0 the default (from spi_device) is used. | 
|  | 278 | * @bits_per_word: select a bits_per_word other then the device default | 
|  | 279 | *      for this transfer. If 0 the default (from spi_device) is used. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 280 | * @cs_change: affects chipselect after this transfer completes | 
|  | 281 | * @delay_usecs: microseconds to delay after this transfer before | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 282 | *	(optionally) changing the chipselect status, then starting | 
|  | 283 | *	the next transfer or completing this spi_message. | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 284 | * @transfer_list: transfers are sequenced through spi_message.transfers | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 285 | * | 
|  | 286 | * SPI transfers always write the same number of bytes as they read. | 
|  | 287 | * Protocol drivers should always provide rx_buf and/or tx_buf. | 
|  | 288 | * In some cases, they may also want to provide DMA addresses for | 
|  | 289 | * the data being transferred; that may reduce overhead, when the | 
|  | 290 | * underlying driver uses dma. | 
|  | 291 | * | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 292 | * If the transmit buffer is null, undefined data will be shifted out | 
|  | 293 | * while filling rx_buf.  If the receive buffer is null, the data | 
|  | 294 | * shifted in will be discarded.  Only "len" bytes shift out (or in). | 
|  | 295 | * It's an error to try to shift out a partial word.  (For example, by | 
|  | 296 | * shifting out three bytes with word size of sixteen or twenty bits; | 
|  | 297 | * the former uses two bytes per word, the latter uses four bytes.) | 
|  | 298 | * | 
|  | 299 | * All SPI transfers start with the relevant chipselect active.  Normally | 
|  | 300 | * it stays selected until after the last transfer in a message.  Drivers | 
|  | 301 | * can affect the chipselect signal using cs_change: | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 302 | * | 
|  | 303 | * (i) If the transfer isn't the last one in the message, this flag is | 
|  | 304 | * used to make the chipselect briefly go inactive in the middle of the | 
|  | 305 | * message.  Toggling chipselect in this way may be needed to terminate | 
|  | 306 | * a chip command, letting a single spi_message perform all of group of | 
|  | 307 | * chip transactions together. | 
|  | 308 | * | 
|  | 309 | * (ii) When the transfer is the last one in the message, the chip may | 
|  | 310 | * stay selected until the next transfer.  This is purely a performance | 
|  | 311 | * hint; the controller driver may need to select a different device | 
|  | 312 | * for the next message. | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 313 | * | 
|  | 314 | * The code that submits an spi_message (and its spi_transfers) | 
|  | 315 | * to the lower layers is responsible for managing its memory. | 
|  | 316 | * Zero-initialize every field you don't set up explicitly, to | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 317 | * insulate against future API updates.  After you submit a message | 
|  | 318 | * and its transfers, ignore them until its completion callback. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 319 | */ | 
|  | 320 | struct spi_transfer { | 
|  | 321 | /* it's ok if tx_buf == rx_buf (right?) | 
|  | 322 | * for MicroWire, one buffer must be null | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 323 | * buffers must work with dma_*map_single() calls, unless | 
|  | 324 | *   spi_message.is_dma_mapped reports a pre-existing mapping | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 325 | */ | 
|  | 326 | const void	*tx_buf; | 
|  | 327 | void		*rx_buf; | 
|  | 328 | unsigned	len; | 
|  | 329 |  | 
|  | 330 | dma_addr_t	tx_dma; | 
|  | 331 | dma_addr_t	rx_dma; | 
|  | 332 |  | 
|  | 333 | unsigned	cs_change:1; | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 334 | u8		bits_per_word; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 335 | u16		delay_usecs; | 
| Imre Deak | 4cff33f | 2006-02-17 10:02:18 -0800 | [diff] [blame] | 336 | u32		speed_hz; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 337 |  | 
|  | 338 | struct list_head transfer_list; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 339 | }; | 
|  | 340 |  | 
|  | 341 | /** | 
|  | 342 | * struct spi_message - one multi-segment SPI transaction | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 343 | * @transfers: list of transfer segments in this transaction | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 344 | * @spi: SPI device to which the transaction is queued | 
|  | 345 | * @is_dma_mapped: if true, the caller provided both dma and cpu virtual | 
|  | 346 | *	addresses for each transfer buffer | 
|  | 347 | * @complete: called to report transaction completions | 
|  | 348 | * @context: the argument to complete() when it's called | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 349 | * @actual_length: the total number of bytes that were transferred in all | 
|  | 350 | *	successful segments | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 351 | * @status: zero for success, else negative errno | 
|  | 352 | * @queue: for use by whichever driver currently owns the message | 
|  | 353 | * @state: for use by whichever driver currently owns the message | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 354 | * | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 355 | * An spi_message is used to execute an atomic sequence of data transfers, | 
|  | 356 | * each represented by a struct spi_transfer.  The sequence is "atomic" | 
|  | 357 | * in the sense that no other spi_message may use that SPI bus until that | 
|  | 358 | * sequence completes.  On some systems, many such sequences can execute as | 
|  | 359 | * as single programmed DMA transfer.  On all systems, these messages are | 
|  | 360 | * queued, and might complete after transactions to other devices.  Messages | 
|  | 361 | * sent to a given spi_device are alway executed in FIFO order. | 
|  | 362 | * | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 363 | * The code that submits an spi_message (and its spi_transfers) | 
|  | 364 | * to the lower layers is responsible for managing its memory. | 
|  | 365 | * Zero-initialize every field you don't set up explicitly, to | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 366 | * insulate against future API updates.  After you submit a message | 
|  | 367 | * and its transfers, ignore them until its completion callback. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 368 | */ | 
|  | 369 | struct spi_message { | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 370 | struct list_head	transfers; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 371 |  | 
|  | 372 | struct spi_device	*spi; | 
|  | 373 |  | 
|  | 374 | unsigned		is_dma_mapped:1; | 
|  | 375 |  | 
|  | 376 | /* REVISIT:  we might want a flag affecting the behavior of the | 
|  | 377 | * last transfer ... allowing things like "read 16 bit length L" | 
|  | 378 | * immediately followed by "read L bytes".  Basically imposing | 
|  | 379 | * a specific message scheduling algorithm. | 
|  | 380 | * | 
|  | 381 | * Some controller drivers (message-at-a-time queue processing) | 
|  | 382 | * could provide that as their default scheduling algorithm.  But | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 383 | * others (with multi-message pipelines) could need a flag to | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 384 | * tell them about such special cases. | 
|  | 385 | */ | 
|  | 386 |  | 
|  | 387 | /* completion is reported through a callback */ | 
| David Brownell | 747d844 | 2006-04-02 10:33:37 -0800 | [diff] [blame] | 388 | void			(*complete)(void *context); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 389 | void			*context; | 
|  | 390 | unsigned		actual_length; | 
|  | 391 | int			status; | 
|  | 392 |  | 
|  | 393 | /* for optional use by whatever driver currently owns the | 
|  | 394 | * spi_message ...  between calls to spi_async and then later | 
|  | 395 | * complete(), that's the spi_master controller driver. | 
|  | 396 | */ | 
|  | 397 | struct list_head	queue; | 
|  | 398 | void			*state; | 
|  | 399 | }; | 
|  | 400 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 401 | static inline void spi_message_init(struct spi_message *m) | 
|  | 402 | { | 
|  | 403 | memset(m, 0, sizeof *m); | 
|  | 404 | INIT_LIST_HEAD(&m->transfers); | 
|  | 405 | } | 
|  | 406 |  | 
|  | 407 | static inline void | 
|  | 408 | spi_message_add_tail(struct spi_transfer *t, struct spi_message *m) | 
|  | 409 | { | 
|  | 410 | list_add_tail(&t->transfer_list, &m->transfers); | 
|  | 411 | } | 
|  | 412 |  | 
|  | 413 | static inline void | 
|  | 414 | spi_transfer_del(struct spi_transfer *t) | 
|  | 415 | { | 
|  | 416 | list_del(&t->transfer_list); | 
|  | 417 | } | 
|  | 418 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 419 | /* It's fine to embed message and transaction structures in other data | 
|  | 420 | * structures so long as you don't free them while they're in use. | 
|  | 421 | */ | 
|  | 422 |  | 
|  | 423 | static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags) | 
|  | 424 | { | 
|  | 425 | struct spi_message *m; | 
|  | 426 |  | 
|  | 427 | m = kzalloc(sizeof(struct spi_message) | 
|  | 428 | + ntrans * sizeof(struct spi_transfer), | 
|  | 429 | flags); | 
|  | 430 | if (m) { | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 431 | int i; | 
|  | 432 | struct spi_transfer *t = (struct spi_transfer *)(m + 1); | 
|  | 433 |  | 
|  | 434 | INIT_LIST_HEAD(&m->transfers); | 
|  | 435 | for (i = 0; i < ntrans; i++, t++) | 
|  | 436 | spi_message_add_tail(t, m); | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 437 | } | 
|  | 438 | return m; | 
|  | 439 | } | 
|  | 440 |  | 
|  | 441 | static inline void spi_message_free(struct spi_message *m) | 
|  | 442 | { | 
|  | 443 | kfree(m); | 
|  | 444 | } | 
|  | 445 |  | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 446 | /** | 
|  | 447 | * spi_setup -- setup SPI mode and clock rate | 
|  | 448 | * @spi: the device whose settings are being modified | 
|  | 449 | * | 
|  | 450 | * SPI protocol drivers may need to update the transfer mode if the | 
|  | 451 | * device doesn't work with the mode 0 default.  They may likewise need | 
|  | 452 | * to update clock rates or word sizes from initial values.  This function | 
|  | 453 | * changes those settings, and must be called from a context that can sleep. | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 454 | * The changes take effect the next time the device is selected and data | 
|  | 455 | * is transferred to or from it. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 456 | */ | 
|  | 457 | static inline int | 
|  | 458 | spi_setup(struct spi_device *spi) | 
|  | 459 | { | 
|  | 460 | return spi->master->setup(spi); | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 |  | 
|  | 464 | /** | 
|  | 465 | * spi_async -- asynchronous SPI transfer | 
|  | 466 | * @spi: device with which data will be exchanged | 
|  | 467 | * @message: describes the data transfers, including completion callback | 
|  | 468 | * | 
|  | 469 | * This call may be used in_irq and other contexts which can't sleep, | 
|  | 470 | * as well as from task contexts which can sleep. | 
|  | 471 | * | 
|  | 472 | * The completion callback is invoked in a context which can't sleep. | 
|  | 473 | * Before that invocation, the value of message->status is undefined. | 
|  | 474 | * When the callback is issued, message->status holds either zero (to | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 475 | * indicate complete success) or a negative error code.  After that | 
|  | 476 | * callback returns, the driver which issued the transfer request may | 
|  | 477 | * deallocate the associated memory; it's no longer in use by any SPI | 
|  | 478 | * core or controller driver code. | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 479 | * | 
|  | 480 | * Note that although all messages to a spi_device are handled in | 
|  | 481 | * FIFO order, messages may go to different devices in other orders. | 
|  | 482 | * Some device might be higher priority, or have various "hard" access | 
|  | 483 | * time requirements, for example. | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 484 | * | 
|  | 485 | * On detection of any fault during the transfer, processing of | 
|  | 486 | * the entire message is aborted, and the device is deselected. | 
|  | 487 | * Until returning from the associated message completion callback, | 
|  | 488 | * no other spi_message queued to that device will be processed. | 
|  | 489 | * (This rule applies equally to all the synchronous transfer calls, | 
|  | 490 | * which are wrappers around this core asynchronous primitive.) | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 491 | */ | 
|  | 492 | static inline int | 
|  | 493 | spi_async(struct spi_device *spi, struct spi_message *message) | 
|  | 494 | { | 
|  | 495 | message->spi = spi; | 
|  | 496 | return spi->master->transfer(spi, message); | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | /*---------------------------------------------------------------------------*/ | 
|  | 500 |  | 
|  | 501 | /* All these synchronous SPI transfer routines are utilities layered | 
|  | 502 | * over the core async transfer primitive.  Here, "synchronous" means | 
|  | 503 | * they will sleep uninterruptibly until the async transfer completes. | 
|  | 504 | */ | 
|  | 505 |  | 
|  | 506 | extern int spi_sync(struct spi_device *spi, struct spi_message *message); | 
|  | 507 |  | 
|  | 508 | /** | 
|  | 509 | * spi_write - SPI synchronous write | 
|  | 510 | * @spi: device to which data will be written | 
|  | 511 | * @buf: data buffer | 
|  | 512 | * @len: data buffer size | 
|  | 513 | * | 
|  | 514 | * This writes the buffer and returns zero or a negative error code. | 
|  | 515 | * Callable only from contexts that can sleep. | 
|  | 516 | */ | 
|  | 517 | static inline int | 
|  | 518 | spi_write(struct spi_device *spi, const u8 *buf, size_t len) | 
|  | 519 | { | 
|  | 520 | struct spi_transfer	t = { | 
|  | 521 | .tx_buf		= buf, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 522 | .len		= len, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 523 | }; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 524 | struct spi_message	m; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 525 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 526 | spi_message_init(&m); | 
|  | 527 | spi_message_add_tail(&t, &m); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 528 | return spi_sync(spi, &m); | 
|  | 529 | } | 
|  | 530 |  | 
|  | 531 | /** | 
|  | 532 | * spi_read - SPI synchronous read | 
|  | 533 | * @spi: device from which data will be read | 
|  | 534 | * @buf: data buffer | 
|  | 535 | * @len: data buffer size | 
|  | 536 | * | 
|  | 537 | * This writes the buffer and returns zero or a negative error code. | 
|  | 538 | * Callable only from contexts that can sleep. | 
|  | 539 | */ | 
|  | 540 | static inline int | 
|  | 541 | spi_read(struct spi_device *spi, u8 *buf, size_t len) | 
|  | 542 | { | 
|  | 543 | struct spi_transfer	t = { | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 544 | .rx_buf		= buf, | 
|  | 545 | .len		= len, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 546 | }; | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 547 | struct spi_message	m; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 548 |  | 
| Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 549 | spi_message_init(&m); | 
|  | 550 | spi_message_add_tail(&t, &m); | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 551 | return spi_sync(spi, &m); | 
|  | 552 | } | 
|  | 553 |  | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 554 | /* this copies txbuf and rxbuf data; for small transfers only! */ | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 555 | extern int spi_write_then_read(struct spi_device *spi, | 
|  | 556 | const u8 *txbuf, unsigned n_tx, | 
|  | 557 | u8 *rxbuf, unsigned n_rx); | 
|  | 558 |  | 
|  | 559 | /** | 
|  | 560 | * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read | 
|  | 561 | * @spi: device with which data will be exchanged | 
|  | 562 | * @cmd: command to be written before data is read back | 
|  | 563 | * | 
|  | 564 | * This returns the (unsigned) eight bit number returned by the | 
|  | 565 | * device, or else a negative error code.  Callable only from | 
|  | 566 | * contexts that can sleep. | 
|  | 567 | */ | 
|  | 568 | static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) | 
|  | 569 | { | 
|  | 570 | ssize_t			status; | 
|  | 571 | u8			result; | 
|  | 572 |  | 
|  | 573 | status = spi_write_then_read(spi, &cmd, 1, &result, 1); | 
|  | 574 |  | 
|  | 575 | /* return negative errno or unsigned value */ | 
|  | 576 | return (status < 0) ? status : result; | 
|  | 577 | } | 
|  | 578 |  | 
|  | 579 | /** | 
|  | 580 | * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read | 
|  | 581 | * @spi: device with which data will be exchanged | 
|  | 582 | * @cmd: command to be written before data is read back | 
|  | 583 | * | 
|  | 584 | * This returns the (unsigned) sixteen bit number returned by the | 
|  | 585 | * device, or else a negative error code.  Callable only from | 
|  | 586 | * contexts that can sleep. | 
|  | 587 | * | 
|  | 588 | * The number is returned in wire-order, which is at least sometimes | 
|  | 589 | * big-endian. | 
|  | 590 | */ | 
|  | 591 | static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) | 
|  | 592 | { | 
|  | 593 | ssize_t			status; | 
|  | 594 | u16			result; | 
|  | 595 |  | 
|  | 596 | status = spi_write_then_read(spi, &cmd, 1, (u8 *) &result, 2); | 
|  | 597 |  | 
|  | 598 | /* return negative errno or unsigned value */ | 
|  | 599 | return (status < 0) ? status : result; | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | /*---------------------------------------------------------------------------*/ | 
|  | 603 |  | 
|  | 604 | /* | 
|  | 605 | * INTERFACE between board init code and SPI infrastructure. | 
|  | 606 | * | 
|  | 607 | * No SPI driver ever sees these SPI device table segments, but | 
|  | 608 | * it's how the SPI core (or adapters that get hotplugged) grows | 
|  | 609 | * the driver model tree. | 
|  | 610 | * | 
|  | 611 | * As a rule, SPI devices can't be probed.  Instead, board init code | 
|  | 612 | * provides a table listing the devices which are present, with enough | 
|  | 613 | * information to bind and set up the device's driver.  There's basic | 
|  | 614 | * support for nonstatic configurations too; enough to handle adding | 
|  | 615 | * parport adapters, or microcontrollers acting as USB-to-SPI bridges. | 
|  | 616 | */ | 
|  | 617 |  | 
|  | 618 | /* board-specific information about each SPI device */ | 
|  | 619 | struct spi_board_info { | 
|  | 620 | /* the device name and module name are coupled, like platform_bus; | 
|  | 621 | * "modalias" is normally the driver name. | 
|  | 622 | * | 
|  | 623 | * platform_data goes to spi_device.dev.platform_data, | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 624 | * controller_data goes to spi_device.controller_data, | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 625 | * irq is copied too | 
|  | 626 | */ | 
|  | 627 | char		modalias[KOBJ_NAME_LEN]; | 
|  | 628 | const void	*platform_data; | 
| David Brownell | b885244 | 2006-01-08 13:34:23 -0800 | [diff] [blame] | 629 | void		*controller_data; | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 630 | int		irq; | 
|  | 631 |  | 
|  | 632 | /* slower signaling on noisy or low voltage boards */ | 
|  | 633 | u32		max_speed_hz; | 
|  | 634 |  | 
|  | 635 |  | 
|  | 636 | /* bus_num is board specific and matches the bus_num of some | 
|  | 637 | * spi_master that will probably be registered later. | 
|  | 638 | * | 
|  | 639 | * chip_select reflects how this chip is wired to that master; | 
|  | 640 | * it's less than num_chipselect. | 
|  | 641 | */ | 
|  | 642 | u16		bus_num; | 
|  | 643 | u16		chip_select; | 
|  | 644 |  | 
|  | 645 | /* ... may need additional spi_device chip config data here. | 
|  | 646 | * avoid stuff protocol drivers can set; but include stuff | 
|  | 647 | * needed to behave without being bound to a driver: | 
|  | 648 | *  - chipselect polarity | 
|  | 649 | *  - quirks like clock rate mattering when not selected | 
|  | 650 | */ | 
|  | 651 | }; | 
|  | 652 |  | 
|  | 653 | #ifdef	CONFIG_SPI | 
|  | 654 | extern int | 
|  | 655 | spi_register_board_info(struct spi_board_info const *info, unsigned n); | 
|  | 656 | #else | 
|  | 657 | /* board init code may ignore whether SPI is configured or not */ | 
|  | 658 | static inline int | 
|  | 659 | spi_register_board_info(struct spi_board_info const *info, unsigned n) | 
|  | 660 | { return 0; } | 
|  | 661 | #endif | 
|  | 662 |  | 
|  | 663 |  | 
|  | 664 | /* If you're hotplugging an adapter with devices (parport, usb, etc) | 
| David Brownell | 0c86846 | 2006-01-08 13:34:25 -0800 | [diff] [blame] | 665 | * use spi_new_device() to describe each device.  You can also call | 
|  | 666 | * spi_unregister_device() to start making that device vanish, but | 
|  | 667 | * normally that would be handled by spi_unregister_master(). | 
| David Brownell | 8ae12a0 | 2006-01-08 13:34:19 -0800 | [diff] [blame] | 668 | */ | 
|  | 669 | extern struct spi_device * | 
|  | 670 | spi_new_device(struct spi_master *, struct spi_board_info *); | 
|  | 671 |  | 
|  | 672 | static inline void | 
|  | 673 | spi_unregister_device(struct spi_device *spi) | 
|  | 674 | { | 
|  | 675 | if (spi) | 
|  | 676 | device_unregister(&spi->dev); | 
|  | 677 | } | 
|  | 678 |  | 
|  | 679 | #endif /* __LINUX_SPI_H */ |