blob: 7689af0f63a952baa756cc3ae5df303f3025cfa3 [file] [log] [blame]
Viresh Kumar15d651b2015-01-23 13:07:45 +05301/*
2 * SPI bridge driver for the Greybus "generic" SPI module.
3 *
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +00004 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
Viresh Kumar15d651b2015-01-23 13:07:45 +05306 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/bitops.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/spi/spi.h>
15
16#include "greybus.h"
17
18struct gb_spi {
19 struct gb_connection *connection;
Viresh Kumar15d651b2015-01-23 13:07:45 +053020 u16 mode;
Viresh Kumar15d651b2015-01-23 13:07:45 +053021 u16 flags;
Viresh Kumar15d651b2015-01-23 13:07:45 +053022 u32 bits_per_word_mask;
Rui Miguel Silva50014e02015-12-10 14:24:58 +000023 u8 num_chipselect;
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +000024 u32 min_speed_hz;
25 u32 max_speed_hz;
Viresh Kumar15d651b2015-01-23 13:07:45 +053026};
27
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +000028static struct spi_master *get_master_from_spi(struct gb_spi *spi)
29{
30 return spi->connection->private;
31}
32
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +000033static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
34{
35 size_t headers_size;
36
37 data_max -= sizeof(struct gb_spi_transfer_request);
38 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
39
40 return tx_size + headers_size > data_max ? 0 : 1;
41}
42
43static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
44 size_t data_max)
45{
46 size_t rx_xfer_size;
47
48 data_max -= sizeof(struct gb_spi_transfer_response);
49
50 if (rx_size + len > data_max)
51 rx_xfer_size = data_max - rx_size;
52 else
53 rx_xfer_size = len;
54
55 /* if this is a write_read, for symmetry read the same as write */
56 if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
57 rx_xfer_size = *tx_xfer_size;
58 if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
59 *tx_xfer_size = rx_xfer_size;
60
61 return rx_xfer_size;
62}
63
64static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
65 size_t data_max)
66{
67 size_t headers_size;
68
69 data_max -= sizeof(struct gb_spi_transfer_request);
70 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
71
72 if (tx_size + headers_size + len > data_max)
73 return data_max - (tx_size + sizeof(struct gb_spi_transfer));
74
75 return len;
76}
77
Viresh Kumar15d651b2015-01-23 13:07:45 +053078/* Routines to transfer data */
79static struct gb_operation *
80gb_spi_operation_create(struct gb_connection *connection,
81 struct spi_message *msg, u32 *total_len)
82{
83 struct gb_spi_transfer_request *request;
84 struct spi_device *dev = msg->spi;
85 struct spi_transfer *xfer;
86 struct gb_spi_transfer *gb_xfer;
87 struct gb_operation *operation;
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +000088 struct spi_transfer *last_xfer = NULL;
89 u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
90 u32 tx_xfer_size = 0, rx_xfer_size = 0, last_xfer_size = 0;
91 size_t data_max;
Viresh Kumar15d651b2015-01-23 13:07:45 +053092 void *tx_data;
93
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +000094 data_max = gb_operation_get_payload_size_max(connection);
95
Viresh Kumar15d651b2015-01-23 13:07:45 +053096 /* Find number of transfers queued and tx/rx length in the message */
97 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
98 if (!xfer->tx_buf && !xfer->rx_buf) {
Greg Kroah-Hartman1cb9e382015-10-14 11:15:38 -070099 dev_err(&connection->bundle->dev,
Johan Hovold25eb7322015-03-19 16:46:17 +0100100 "bufferless transfer, length %u\n", xfer->len);
Viresh Kumar15d651b2015-01-23 13:07:45 +0530101 return NULL;
102 }
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +0000103 last_xfer = xfer;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530104
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +0000105 tx_xfer_size = 0;
106 rx_xfer_size = 0;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530107
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +0000108 if (xfer->tx_buf) {
109 if (!tx_header_fit_operation(tx_size, count, data_max))
110 break;
111 tx_xfer_size = calc_tx_xfer_size(tx_size, count,
112 xfer->len, data_max);
113 last_xfer_size = tx_xfer_size;
114 }
115
116 if (xfer->rx_buf) {
117 rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
118 xfer->len, data_max);
119 last_xfer_size = rx_xfer_size;
120 }
121
122 tx_size += tx_xfer_size;
123 rx_size += rx_xfer_size;
124
125 *total_len += last_xfer_size;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530126 count++;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530127
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +0000128 if (xfer->len != last_xfer_size)
129 break;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530130 }
131
132 /*
133 * In addition to space for all message descriptors we need
134 * to have enough to hold all tx data.
135 */
136 request_size = sizeof(*request);
137 request_size += count * sizeof(*gb_xfer);
138 request_size += tx_size;
139
140 /* Response consists only of incoming data */
141 operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
Johan Hovolde4207212015-07-01 12:37:22 +0200142 request_size, rx_size, GFP_KERNEL);
Viresh Kumar15d651b2015-01-23 13:07:45 +0530143 if (!operation)
144 return NULL;
145
146 request = operation->request->payload;
Viresh Kumarf4e6c812015-01-27 09:08:04 +0530147 request->count = cpu_to_le16(count);
Viresh Kumar15d651b2015-01-23 13:07:45 +0530148 request->mode = dev->mode;
149 request->chip_select = dev->chip_select;
150
151 gb_xfer = &request->transfers[0];
152 tx_data = gb_xfer + count; /* place tx data after last gb_xfer */
153
154 /* Fill in the transfers array */
155 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +0000156 if (last_xfer && xfer == last_xfer)
157 xfer_len = last_xfer_size;
158 else
159 xfer_len = xfer->len;
160
Viresh Kumarf4e6c812015-01-27 09:08:04 +0530161 gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +0000162 gb_xfer->len = cpu_to_le32(xfer_len);
Viresh Kumar15d651b2015-01-23 13:07:45 +0530163 gb_xfer->delay_usecs = cpu_to_le16(xfer->delay_usecs);
164 gb_xfer->cs_change = xfer->cs_change;
165 gb_xfer->bits_per_word = xfer->bits_per_word;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530166
167 /* Copy tx data */
168 if (xfer->tx_buf) {
Rui Miguel Silvab455c842015-12-02 11:12:27 +0000169 gb_xfer->rdwr |= GB_SPI_XFER_WRITE;
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +0000170 memcpy(tx_data, xfer->tx_buf, xfer_len);
171 tx_data += xfer_len;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530172 }
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +0000173
Rui Miguel Silvab455c842015-12-02 11:12:27 +0000174 if (xfer->rx_buf)
175 gb_xfer->rdwr |= GB_SPI_XFER_READ;
Rui Miguel Silva31bc2c92015-12-02 11:12:29 +0000176
177 if (last_xfer && xfer == last_xfer)
178 break;
179
Rui Miguel Silvab455c842015-12-02 11:12:27 +0000180 gb_xfer++;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530181 }
182
183 return operation;
184}
185
186static void gb_spi_decode_response(struct spi_message *msg,
187 struct gb_spi_transfer_response *response)
188{
189 struct spi_transfer *xfer;
190 void *rx_data = response->data;
191
192 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
193 /* Copy rx data */
194 if (xfer->rx_buf) {
195 memcpy(xfer->rx_buf, rx_data, xfer->len);
196 rx_data += xfer->len;
197 }
198 }
199}
200
201static int gb_spi_transfer_one_message(struct spi_master *master,
202 struct spi_message *msg)
203{
204 struct gb_spi *spi = spi_master_get_devdata(master);
205 struct gb_connection *connection = spi->connection;
206 struct gb_spi_transfer_response *response;
207 struct gb_operation *operation;
208 u32 len = 0;
209 int ret;
210
211 operation = gb_spi_operation_create(connection, msg, &len);
212 if (!operation)
213 return -ENOMEM;
214
215 ret = gb_operation_request_send_sync(operation);
216 if (!ret) {
217 response = operation->response->payload;
218 if (response)
219 gb_spi_decode_response(msg, response);
220 } else {
Johan Hovold5ef545f2016-02-11 13:52:50 +0100221 dev_err(&connection->bundle->dev,
222 "transfer operation failed: %d\n", ret);
Viresh Kumar15d651b2015-01-23 13:07:45 +0530223 }
Johan Hovold6ab1ce42015-09-26 17:59:15 -0700224
225 gb_operation_put(operation);
Viresh Kumar15d651b2015-01-23 13:07:45 +0530226
227 msg->actual_length = len;
228 msg->status = 0;
229 spi_finalize_current_message(master);
230
231 return ret;
232}
233
234static int gb_spi_setup(struct spi_device *spi)
235{
236 /* Nothing to do for now */
237 return 0;
238}
239
240static void gb_spi_cleanup(struct spi_device *spi)
241{
242 /* Nothing to do for now */
243}
244
245
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000246/* Routines to get controller information */
Viresh Kumar15d651b2015-01-23 13:07:45 +0530247
Viresh Kumar15d651b2015-01-23 13:07:45 +0530248/*
249 * Map Greybus spi mode bits/flags/bpw into Linux ones.
250 * All bits are same for now and so these macro's return same values.
251 */
252#define gb_spi_mode_map(mode) mode
253#define gb_spi_flags_map(flags) flags
254
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000255static int gb_spi_get_master_config(struct gb_spi *spi)
Viresh Kumar15d651b2015-01-23 13:07:45 +0530256{
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000257 struct gb_spi_master_config_response response;
258 u16 mode, flags;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530259 int ret;
260
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000261 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
Viresh Kumar15d651b2015-01-23 13:07:45 +0530262 NULL, 0, &response, sizeof(response));
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000263 if (ret < 0)
Viresh Kumar15d651b2015-01-23 13:07:45 +0530264 return ret;
265
266 mode = le16_to_cpu(response.mode);
267 spi->mode = gb_spi_mode_map(mode);
268
Viresh Kumar15d651b2015-01-23 13:07:45 +0530269 flags = le16_to_cpu(response.flags);
270 spi->flags = gb_spi_flags_map(flags);
271
Viresh Kumar15d651b2015-01-23 13:07:45 +0530272 spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
Rui Miguel Silva50014e02015-12-10 14:24:58 +0000273 spi->num_chipselect = response.num_chipselect;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530274
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000275 spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
276 spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
277
Viresh Kumar15d651b2015-01-23 13:07:45 +0530278 return 0;
279}
280
Rui Miguel Silva50014e02015-12-10 14:24:58 +0000281static int gb_spi_setup_device(struct gb_spi *spi, u8 cs)
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000282{
283 struct spi_master *master = get_master_from_spi(spi);
284 struct gb_spi_device_config_request request;
285 struct gb_spi_device_config_response response;
286 struct spi_board_info spi_board = { {0} };
Rui Miguel Silva65fabd12015-12-15 19:09:57 +0000287 struct spi_device *spidev;
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000288 int ret;
Rui Miguel Silva02730382016-02-02 14:23:16 +0000289 u8 dev_type;
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000290
Rui Miguel Silva50014e02015-12-10 14:24:58 +0000291 request.chip_select = cs;
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000292
293 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
294 &request, sizeof(request),
295 &response, sizeof(response));
296 if (ret < 0)
297 return ret;
298
Rui Miguel Silva02730382016-02-02 14:23:16 +0000299 dev_type = response.device_type;
300
301 if (dev_type == GB_SPI_SPI_DEV)
302 strlcpy(spi_board.modalias, SPI_DEV_MODALIAS,
303 sizeof(spi_board.modalias));
304 else if (dev_type == GB_SPI_SPI_NOR)
305 strlcpy(spi_board.modalias, SPI_NOR_MODALIAS,
306 sizeof(spi_board.modalias));
307 else if (dev_type == GB_SPI_SPI_MODALIAS)
308 memcpy(spi_board.modalias, response.name,
309 sizeof(spi_board.modalias));
310 else
311 return -EINVAL;
312
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000313 spi_board.mode = le16_to_cpu(response.mode);
314 spi_board.bus_num = master->bus_num;
315 spi_board.chip_select = cs;
316 spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
317
318 spidev = spi_new_device(master, &spi_board);
319 if (!spidev)
Rui Miguel Silva02730382016-02-02 14:23:16 +0000320 return -EINVAL;
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000321
322 return 0;
323}
324
Viresh Kumar15d651b2015-01-23 13:07:45 +0530325static int gb_spi_connection_init(struct gb_connection *connection)
326{
327 struct gb_spi *spi;
328 struct spi_master *master;
329 int ret;
Rui Miguel Silva50014e02015-12-10 14:24:58 +0000330 u8 i;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530331
332 /* Allocate master with space for data */
Greg Kroah-Hartman1cb9e382015-10-14 11:15:38 -0700333 master = spi_alloc_master(&connection->bundle->dev, sizeof(*spi));
Viresh Kumar15d651b2015-01-23 13:07:45 +0530334 if (!master) {
Greg Kroah-Hartman1cb9e382015-10-14 11:15:38 -0700335 dev_err(&connection->bundle->dev, "cannot alloc SPI master\n");
Viresh Kumar15d651b2015-01-23 13:07:45 +0530336 return -ENOMEM;
337 }
338
339 spi = spi_master_get_devdata(master);
340 spi->connection = connection;
341 connection->private = master;
342
Rui Miguel Silva65fabd12015-12-15 19:09:57 +0000343 /* get master configuration */
344 ret = gb_spi_get_master_config(spi);
Viresh Kumar15d651b2015-01-23 13:07:45 +0530345 if (ret)
Rui Miguel Silva5fbd1a02015-12-15 19:09:54 +0000346 goto out_put_master;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530347
Viresh Kumar21111342015-09-04 15:10:09 +0530348 master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
Viresh Kumar15d651b2015-01-23 13:07:45 +0530349 master->num_chipselect = spi->num_chipselect;
350 master->mode_bits = spi->mode;
351 master->flags = spi->flags;
352 master->bits_per_word_mask = spi->bits_per_word_mask;
353
354 /* Attach methods */
355 master->cleanup = gb_spi_cleanup;
356 master->setup = gb_spi_setup;
357 master->transfer_one_message = gb_spi_transfer_one_message;
358
359 ret = spi_register_master(master);
Rui Miguel Silvaa92a2d42015-12-15 19:09:55 +0000360 if (ret < 0)
361 goto out_put_master;
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000362
363 /* now, fetch the devices configuration */
364 for (i = 0; i < spi->num_chipselect; i++) {
365 ret = gb_spi_setup_device(spi, i);
Rui Miguel Silva4a0c4452015-12-15 19:09:56 +0000366 if (ret < 0) {
367 dev_err(&connection->bundle->dev,
368 "failed to allocated spi device: %d\n", ret);
369 spi_unregister_master(master);
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000370 break;
Rui Miguel Silva4a0c4452015-12-15 19:09:56 +0000371 }
Rui Miguel Silvab343f6a2015-12-02 11:12:28 +0000372 }
373
374 return ret;
Viresh Kumar15d651b2015-01-23 13:07:45 +0530375
Rui Miguel Silva5fbd1a02015-12-15 19:09:54 +0000376out_put_master:
Viresh Kumar15d651b2015-01-23 13:07:45 +0530377 spi_master_put(master);
378
379 return ret;
380}
381
382static void gb_spi_connection_exit(struct gb_connection *connection)
383{
384 struct spi_master *master = connection->private;
385
386 spi_unregister_master(master);
387}
388
389static struct gb_protocol spi_protocol = {
390 .name = "spi",
391 .id = GREYBUS_PROTOCOL_SPI,
Viresh Kumar75492192015-08-08 10:25:38 +0530392 .major = GB_SPI_VERSION_MAJOR,
393 .minor = GB_SPI_VERSION_MINOR,
Viresh Kumar15d651b2015-01-23 13:07:45 +0530394 .connection_init = gb_spi_connection_init,
395 .connection_exit = gb_spi_connection_exit,
396 .request_recv = NULL,
397};
398
Viresh Kumare18822e2015-07-01 12:13:52 +0530399gb_builtin_protocol_driver(spi_protocol);