blob: 03edf5ed0e9fb5d3310a7442fed3c262864cad37 [file] [log] [blame]
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +09001/*
2 * SH SPI bus driver
3 *
4 * Copyright (C) 2011 Renesas Solutions Corp.
5 *
6 * Based on pxa2xx_spi.c:
7 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/timer.h>
29#include <linux/delay.h>
30#include <linux/list.h>
31#include <linux/workqueue.h>
32#include <linux/interrupt.h>
33#include <linux/platform_device.h>
34#include <linux/io.h>
35#include <linux/spi/spi.h>
36
37#define SPI_SH_TBR 0x00
38#define SPI_SH_RBR 0x00
39#define SPI_SH_CR1 0x08
40#define SPI_SH_CR2 0x10
41#define SPI_SH_CR3 0x18
42#define SPI_SH_CR4 0x20
43#define SPI_SH_CR5 0x28
44
45/* CR1 */
46#define SPI_SH_TBE 0x80
47#define SPI_SH_TBF 0x40
48#define SPI_SH_RBE 0x20
49#define SPI_SH_RBF 0x10
50#define SPI_SH_PFONRD 0x08
51#define SPI_SH_SSDB 0x04
52#define SPI_SH_SSD 0x02
53#define SPI_SH_SSA 0x01
54
55/* CR2 */
56#define SPI_SH_RSTF 0x80
57#define SPI_SH_LOOPBK 0x40
58#define SPI_SH_CPOL 0x20
59#define SPI_SH_CPHA 0x10
60#define SPI_SH_L1M0 0x08
61
62/* CR3 */
63#define SPI_SH_MAX_BYTE 0xFF
64
65/* CR4 */
66#define SPI_SH_TBEI 0x80
67#define SPI_SH_TBFI 0x40
68#define SPI_SH_RBEI 0x20
69#define SPI_SH_RBFI 0x10
70#define SPI_SH_WPABRT 0x04
71#define SPI_SH_SSS 0x01
72
73/* CR8 */
74#define SPI_SH_P1L0 0x80
75#define SPI_SH_PP1L0 0x40
76#define SPI_SH_MUXI 0x20
77#define SPI_SH_MUXIRQ 0x10
78
79#define SPI_SH_FIFO_SIZE 32
80#define SPI_SH_SEND_TIMEOUT (3 * HZ)
81#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
82
83#undef DEBUG
84
85struct spi_sh_data {
86 void __iomem *addr;
87 int irq;
88 struct spi_master *master;
89 struct list_head queue;
90 struct workqueue_struct *workqueue;
91 struct work_struct ws;
92 unsigned long cr1;
93 wait_queue_head_t wait;
94 spinlock_t lock;
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +090095 int width;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +090096};
97
98static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
99 unsigned long offset)
100{
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +0900101 if (ss->width == 8)
102 iowrite8(data, ss->addr + (offset >> 2));
103 else if (ss->width == 32)
104 iowrite32(data, ss->addr + offset);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900105}
106
107static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
108{
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +0900109 if (ss->width == 8)
110 return ioread8(ss->addr + (offset >> 2));
111 else if (ss->width == 32)
112 return ioread32(ss->addr + offset);
113 else
114 return 0;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900115}
116
117static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
118 unsigned long offset)
119{
120 unsigned long tmp;
121
122 tmp = spi_sh_read(ss, offset);
123 tmp |= val;
124 spi_sh_write(ss, tmp, offset);
125}
126
127static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
128 unsigned long offset)
129{
130 unsigned long tmp;
131
132 tmp = spi_sh_read(ss, offset);
133 tmp &= ~val;
134 spi_sh_write(ss, tmp, offset);
135}
136
137static void clear_fifo(struct spi_sh_data *ss)
138{
139 spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
140 spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
141}
142
143static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
144{
145 int timeout = 100000;
146
147 while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
148 udelay(10);
149 if (timeout-- < 0)
150 return -ETIMEDOUT;
151 }
152 return 0;
153}
154
155static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
156{
157 int timeout = 100000;
158
159 while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
160 udelay(10);
161 if (timeout-- < 0)
162 return -ETIMEDOUT;
163 }
164 return 0;
165}
166
167static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
168 struct spi_transfer *t)
169{
170 int i, retval = 0;
171 int remain = t->len;
172 int cur_len;
173 unsigned char *data;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900174 long ret;
175
176 if (t->len)
177 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
178
179 data = (unsigned char *)t->tx_buf;
180 while (remain > 0) {
181 cur_len = min(SPI_SH_FIFO_SIZE, remain);
182 for (i = 0; i < cur_len &&
183 !(spi_sh_read(ss, SPI_SH_CR4) &
184 SPI_SH_WPABRT) &&
185 !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
186 i++)
187 spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
188
189 if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
190 /* Abort SPI operation */
191 spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
192 retval = -EIO;
193 break;
194 }
195
196 cur_len = i;
197
198 remain -= cur_len;
199 data += cur_len;
200
201 if (remain > 0) {
202 ss->cr1 &= ~SPI_SH_TBE;
203 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
204 ret = wait_event_interruptible_timeout(ss->wait,
205 ss->cr1 & SPI_SH_TBE,
206 SPI_SH_SEND_TIMEOUT);
207 if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
208 printk(KERN_ERR "%s: timeout\n", __func__);
209 return -ETIMEDOUT;
210 }
211 }
212 }
213
214 if (list_is_last(&t->transfer_list, &mesg->transfers)) {
Axel Lin909e7092014-01-13 10:00:44 +0800215 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900216 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
217
218 ss->cr1 &= ~SPI_SH_TBE;
219 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
220 ret = wait_event_interruptible_timeout(ss->wait,
221 ss->cr1 & SPI_SH_TBE,
222 SPI_SH_SEND_TIMEOUT);
223 if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
224 printk(KERN_ERR "%s: timeout\n", __func__);
225 return -ETIMEDOUT;
226 }
227 }
228
229 return retval;
230}
231
232static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
233 struct spi_transfer *t)
234{
235 int i;
236 int remain = t->len;
237 int cur_len;
238 unsigned char *data;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900239 long ret;
240
241 if (t->len > SPI_SH_MAX_BYTE)
242 spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
243 else
244 spi_sh_write(ss, t->len, SPI_SH_CR3);
245
Axel Lin909e7092014-01-13 10:00:44 +0800246 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900247 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
248
249 spi_sh_wait_write_buffer_empty(ss);
250
251 data = (unsigned char *)t->rx_buf;
252 while (remain > 0) {
253 if (remain >= SPI_SH_FIFO_SIZE) {
254 ss->cr1 &= ~SPI_SH_RBF;
255 spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
256 ret = wait_event_interruptible_timeout(ss->wait,
257 ss->cr1 & SPI_SH_RBF,
258 SPI_SH_RECEIVE_TIMEOUT);
259 if (ret == 0 &&
260 spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
261 printk(KERN_ERR "%s: timeout\n", __func__);
262 return -ETIMEDOUT;
263 }
264 }
265
266 cur_len = min(SPI_SH_FIFO_SIZE, remain);
267 for (i = 0; i < cur_len; i++) {
268 if (spi_sh_wait_receive_buffer(ss))
269 break;
270 data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
271 }
272
273 remain -= cur_len;
274 data += cur_len;
275 }
276
277 /* deassert CS when SPI is receiving. */
278 if (t->len > SPI_SH_MAX_BYTE) {
279 clear_fifo(ss);
280 spi_sh_write(ss, 1, SPI_SH_CR3);
281 } else {
282 spi_sh_write(ss, 0, SPI_SH_CR3);
283 }
284
285 return 0;
286}
287
288static void spi_sh_work(struct work_struct *work)
289{
290 struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
291 struct spi_message *mesg;
292 struct spi_transfer *t;
293 unsigned long flags;
294 int ret;
295
296 pr_debug("%s: enter\n", __func__);
297
298 spin_lock_irqsave(&ss->lock, flags);
299 while (!list_empty(&ss->queue)) {
300 mesg = list_entry(ss->queue.next, struct spi_message, queue);
301 list_del_init(&mesg->queue);
302
303 spin_unlock_irqrestore(&ss->lock, flags);
304 list_for_each_entry(t, &mesg->transfers, transfer_list) {
305 pr_debug("tx_buf = %p, rx_buf = %p\n",
306 t->tx_buf, t->rx_buf);
307 pr_debug("len = %d, delay_usecs = %d\n",
308 t->len, t->delay_usecs);
309
310 if (t->tx_buf) {
311 ret = spi_sh_send(ss, mesg, t);
312 if (ret < 0)
313 goto error;
314 }
315 if (t->rx_buf) {
316 ret = spi_sh_receive(ss, mesg, t);
317 if (ret < 0)
318 goto error;
319 }
320 mesg->actual_length += t->len;
321 }
322 spin_lock_irqsave(&ss->lock, flags);
323
324 mesg->status = 0;
Axel Lin0a6d3872014-04-02 22:21:04 +0800325 if (mesg->complete)
326 mesg->complete(mesg->context);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900327 }
328
329 clear_fifo(ss);
330 spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
331 udelay(100);
332
333 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
334 SPI_SH_CR1);
335
336 clear_fifo(ss);
337
338 spin_unlock_irqrestore(&ss->lock, flags);
339
340 return;
341
342 error:
343 mesg->status = ret;
Axel Lin0a6d3872014-04-02 22:21:04 +0800344 if (mesg->complete)
345 mesg->complete(mesg->context);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900346
347 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
348 SPI_SH_CR1);
349 clear_fifo(ss);
350
351}
352
353static int spi_sh_setup(struct spi_device *spi)
354{
355 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
356
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900357 pr_debug("%s: enter\n", __func__);
358
359 spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
360 spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */
361 spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */
362
363 clear_fifo(ss);
364
365 /* 1/8 clock */
366 spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
367 udelay(10);
368
369 return 0;
370}
371
372static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
373{
374 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
375 unsigned long flags;
376
377 pr_debug("%s: enter\n", __func__);
378 pr_debug("\tmode = %02x\n", spi->mode);
379
380 spin_lock_irqsave(&ss->lock, flags);
381
382 mesg->actual_length = 0;
383 mesg->status = -EINPROGRESS;
384
385 spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
386
387 list_add_tail(&mesg->queue, &ss->queue);
388 queue_work(ss->workqueue, &ss->ws);
389
390 spin_unlock_irqrestore(&ss->lock, flags);
391
392 return 0;
393}
394
395static void spi_sh_cleanup(struct spi_device *spi)
396{
397 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
398
399 pr_debug("%s: enter\n", __func__);
400
401 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
402 SPI_SH_CR1);
403}
404
405static irqreturn_t spi_sh_irq(int irq, void *_ss)
406{
407 struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
408 unsigned long cr1;
409
410 cr1 = spi_sh_read(ss, SPI_SH_CR1);
411 if (cr1 & SPI_SH_TBE)
412 ss->cr1 |= SPI_SH_TBE;
413 if (cr1 & SPI_SH_TBF)
414 ss->cr1 |= SPI_SH_TBF;
415 if (cr1 & SPI_SH_RBE)
416 ss->cr1 |= SPI_SH_RBE;
417 if (cr1 & SPI_SH_RBF)
418 ss->cr1 |= SPI_SH_RBF;
419
420 if (ss->cr1) {
421 spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
422 wake_up(&ss->wait);
423 }
424
425 return IRQ_HANDLED;
426}
427
Grant Likelyfd4a3192012-12-07 16:57:14 +0000428static int spi_sh_remove(struct platform_device *pdev)
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900429{
Jingoo Han24b5a822013-05-23 19:20:40 +0900430 struct spi_sh_data *ss = platform_get_drvdata(pdev);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900431
Axel Lin680c1302011-05-11 21:27:00 +0800432 spi_unregister_master(ss->master);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900433 destroy_workqueue(ss->workqueue);
434 free_irq(ss->irq, ss);
435 iounmap(ss->addr);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900436
437 return 0;
438}
439
Grant Likelyfd4a3192012-12-07 16:57:14 +0000440static int spi_sh_probe(struct platform_device *pdev)
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900441{
442 struct resource *res;
443 struct spi_master *master;
444 struct spi_sh_data *ss;
445 int ret, irq;
446
447 /* get base addr */
448 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
449 if (unlikely(res == NULL)) {
450 dev_err(&pdev->dev, "invalid resource\n");
451 return -EINVAL;
452 }
453
454 irq = platform_get_irq(pdev, 0);
455 if (irq < 0) {
456 dev_err(&pdev->dev, "platform_get_irq error\n");
457 return -ENODEV;
458 }
459
460 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
461 if (master == NULL) {
462 dev_err(&pdev->dev, "spi_alloc_master error.\n");
463 return -ENOMEM;
464 }
465
466 ss = spi_master_get_devdata(master);
Jingoo Han24b5a822013-05-23 19:20:40 +0900467 platform_set_drvdata(pdev, ss);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900468
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +0900469 switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
470 case IORESOURCE_MEM_8BIT:
471 ss->width = 8;
472 break;
473 case IORESOURCE_MEM_32BIT:
474 ss->width = 32;
475 break;
476 default:
477 dev_err(&pdev->dev, "No support width\n");
478 ret = -ENODEV;
479 goto error1;
480 }
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900481 ss->irq = irq;
482 ss->master = master;
483 ss->addr = ioremap(res->start, resource_size(res));
484 if (ss->addr == NULL) {
485 dev_err(&pdev->dev, "ioremap error.\n");
486 ret = -ENOMEM;
487 goto error1;
488 }
489 INIT_LIST_HEAD(&ss->queue);
490 spin_lock_init(&ss->lock);
491 INIT_WORK(&ss->ws, spi_sh_work);
492 init_waitqueue_head(&ss->wait);
493 ss->workqueue = create_singlethread_workqueue(
494 dev_name(master->dev.parent));
495 if (ss->workqueue == NULL) {
496 dev_err(&pdev->dev, "create workqueue error\n");
497 ret = -EBUSY;
498 goto error2;
499 }
500
Yong Zhang38ada212011-10-22 17:56:55 +0800501 ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900502 if (ret < 0) {
503 dev_err(&pdev->dev, "request_irq error\n");
504 goto error3;
505 }
506
507 master->num_chipselect = 2;
508 master->bus_num = pdev->id;
509 master->setup = spi_sh_setup;
510 master->transfer = spi_sh_transfer;
511 master->cleanup = spi_sh_cleanup;
512
513 ret = spi_register_master(master);
514 if (ret < 0) {
515 printk(KERN_ERR "spi_register_master error.\n");
516 goto error4;
517 }
518
519 return 0;
520
521 error4:
522 free_irq(irq, ss);
523 error3:
524 destroy_workqueue(ss->workqueue);
525 error2:
526 iounmap(ss->addr);
527 error1:
528 spi_master_put(master);
529
530 return ret;
531}
532
533static struct platform_driver spi_sh_driver = {
534 .probe = spi_sh_probe,
Grant Likelyfd4a3192012-12-07 16:57:14 +0000535 .remove = spi_sh_remove,
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900536 .driver = {
537 .name = "sh_spi",
538 .owner = THIS_MODULE,
539 },
540};
Grant Likely940ab882011-10-05 11:29:49 -0600541module_platform_driver(spi_sh_driver);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900542
543MODULE_DESCRIPTION("SH SPI bus driver");
544MODULE_LICENSE("GPL");
545MODULE_AUTHOR("Yoshihiro Shimoda");
546MODULE_ALIAS("platform:sh_spi");