blob: 502501187c9e839ea1222bbffedf5e57acb17a6a [file] [log] [blame]
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +09001/*
2 * SH SPI bus driver
3 *
4 * Copyright (C) 2011 Renesas Solutions Corp.
5 *
6 * Based on pxa2xx_spi.c:
7 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +090017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/errno.h>
23#include <linux/timer.h>
24#include <linux/delay.h>
25#include <linux/list.h>
26#include <linux/workqueue.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/io.h>
30#include <linux/spi/spi.h>
31
32#define SPI_SH_TBR 0x00
33#define SPI_SH_RBR 0x00
34#define SPI_SH_CR1 0x08
35#define SPI_SH_CR2 0x10
36#define SPI_SH_CR3 0x18
37#define SPI_SH_CR4 0x20
38#define SPI_SH_CR5 0x28
39
40/* CR1 */
41#define SPI_SH_TBE 0x80
42#define SPI_SH_TBF 0x40
43#define SPI_SH_RBE 0x20
44#define SPI_SH_RBF 0x10
45#define SPI_SH_PFONRD 0x08
46#define SPI_SH_SSDB 0x04
47#define SPI_SH_SSD 0x02
48#define SPI_SH_SSA 0x01
49
50/* CR2 */
51#define SPI_SH_RSTF 0x80
52#define SPI_SH_LOOPBK 0x40
53#define SPI_SH_CPOL 0x20
54#define SPI_SH_CPHA 0x10
55#define SPI_SH_L1M0 0x08
56
57/* CR3 */
58#define SPI_SH_MAX_BYTE 0xFF
59
60/* CR4 */
61#define SPI_SH_TBEI 0x80
62#define SPI_SH_TBFI 0x40
63#define SPI_SH_RBEI 0x20
64#define SPI_SH_RBFI 0x10
65#define SPI_SH_WPABRT 0x04
66#define SPI_SH_SSS 0x01
67
68/* CR8 */
69#define SPI_SH_P1L0 0x80
70#define SPI_SH_PP1L0 0x40
71#define SPI_SH_MUXI 0x20
72#define SPI_SH_MUXIRQ 0x10
73
74#define SPI_SH_FIFO_SIZE 32
75#define SPI_SH_SEND_TIMEOUT (3 * HZ)
76#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
77
78#undef DEBUG
79
80struct spi_sh_data {
81 void __iomem *addr;
82 int irq;
83 struct spi_master *master;
84 struct list_head queue;
85 struct workqueue_struct *workqueue;
86 struct work_struct ws;
87 unsigned long cr1;
88 wait_queue_head_t wait;
89 spinlock_t lock;
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +090090 int width;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +090091};
92
93static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
94 unsigned long offset)
95{
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +090096 if (ss->width == 8)
97 iowrite8(data, ss->addr + (offset >> 2));
98 else if (ss->width == 32)
99 iowrite32(data, ss->addr + offset);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900100}
101
102static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
103{
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +0900104 if (ss->width == 8)
105 return ioread8(ss->addr + (offset >> 2));
106 else if (ss->width == 32)
107 return ioread32(ss->addr + offset);
108 else
109 return 0;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900110}
111
112static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
113 unsigned long offset)
114{
115 unsigned long tmp;
116
117 tmp = spi_sh_read(ss, offset);
118 tmp |= val;
119 spi_sh_write(ss, tmp, offset);
120}
121
122static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
123 unsigned long offset)
124{
125 unsigned long tmp;
126
127 tmp = spi_sh_read(ss, offset);
128 tmp &= ~val;
129 spi_sh_write(ss, tmp, offset);
130}
131
132static void clear_fifo(struct spi_sh_data *ss)
133{
134 spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
135 spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
136}
137
138static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
139{
140 int timeout = 100000;
141
142 while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
143 udelay(10);
144 if (timeout-- < 0)
145 return -ETIMEDOUT;
146 }
147 return 0;
148}
149
150static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
151{
152 int timeout = 100000;
153
154 while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
155 udelay(10);
156 if (timeout-- < 0)
157 return -ETIMEDOUT;
158 }
159 return 0;
160}
161
162static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
163 struct spi_transfer *t)
164{
165 int i, retval = 0;
166 int remain = t->len;
167 int cur_len;
168 unsigned char *data;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900169 long ret;
170
171 if (t->len)
172 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
173
174 data = (unsigned char *)t->tx_buf;
175 while (remain > 0) {
176 cur_len = min(SPI_SH_FIFO_SIZE, remain);
177 for (i = 0; i < cur_len &&
178 !(spi_sh_read(ss, SPI_SH_CR4) &
179 SPI_SH_WPABRT) &&
180 !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
181 i++)
182 spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
183
184 if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
185 /* Abort SPI operation */
186 spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
187 retval = -EIO;
188 break;
189 }
190
191 cur_len = i;
192
193 remain -= cur_len;
194 data += cur_len;
195
196 if (remain > 0) {
197 ss->cr1 &= ~SPI_SH_TBE;
198 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
199 ret = wait_event_interruptible_timeout(ss->wait,
200 ss->cr1 & SPI_SH_TBE,
201 SPI_SH_SEND_TIMEOUT);
202 if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
203 printk(KERN_ERR "%s: timeout\n", __func__);
204 return -ETIMEDOUT;
205 }
206 }
207 }
208
209 if (list_is_last(&t->transfer_list, &mesg->transfers)) {
Axel Lin909e7092014-01-13 10:00:44 +0800210 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900211 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
212
213 ss->cr1 &= ~SPI_SH_TBE;
214 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
215 ret = wait_event_interruptible_timeout(ss->wait,
216 ss->cr1 & SPI_SH_TBE,
217 SPI_SH_SEND_TIMEOUT);
218 if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
219 printk(KERN_ERR "%s: timeout\n", __func__);
220 return -ETIMEDOUT;
221 }
222 }
223
224 return retval;
225}
226
227static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
228 struct spi_transfer *t)
229{
230 int i;
231 int remain = t->len;
232 int cur_len;
233 unsigned char *data;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900234 long ret;
235
236 if (t->len > SPI_SH_MAX_BYTE)
237 spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
238 else
239 spi_sh_write(ss, t->len, SPI_SH_CR3);
240
Axel Lin909e7092014-01-13 10:00:44 +0800241 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900242 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
243
244 spi_sh_wait_write_buffer_empty(ss);
245
246 data = (unsigned char *)t->rx_buf;
247 while (remain > 0) {
248 if (remain >= SPI_SH_FIFO_SIZE) {
249 ss->cr1 &= ~SPI_SH_RBF;
250 spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
251 ret = wait_event_interruptible_timeout(ss->wait,
252 ss->cr1 & SPI_SH_RBF,
253 SPI_SH_RECEIVE_TIMEOUT);
254 if (ret == 0 &&
255 spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
256 printk(KERN_ERR "%s: timeout\n", __func__);
257 return -ETIMEDOUT;
258 }
259 }
260
261 cur_len = min(SPI_SH_FIFO_SIZE, remain);
262 for (i = 0; i < cur_len; i++) {
263 if (spi_sh_wait_receive_buffer(ss))
264 break;
265 data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
266 }
267
268 remain -= cur_len;
269 data += cur_len;
270 }
271
272 /* deassert CS when SPI is receiving. */
273 if (t->len > SPI_SH_MAX_BYTE) {
274 clear_fifo(ss);
275 spi_sh_write(ss, 1, SPI_SH_CR3);
276 } else {
277 spi_sh_write(ss, 0, SPI_SH_CR3);
278 }
279
280 return 0;
281}
282
283static void spi_sh_work(struct work_struct *work)
284{
285 struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
286 struct spi_message *mesg;
287 struct spi_transfer *t;
288 unsigned long flags;
289 int ret;
290
291 pr_debug("%s: enter\n", __func__);
292
293 spin_lock_irqsave(&ss->lock, flags);
294 while (!list_empty(&ss->queue)) {
295 mesg = list_entry(ss->queue.next, struct spi_message, queue);
296 list_del_init(&mesg->queue);
297
298 spin_unlock_irqrestore(&ss->lock, flags);
299 list_for_each_entry(t, &mesg->transfers, transfer_list) {
300 pr_debug("tx_buf = %p, rx_buf = %p\n",
301 t->tx_buf, t->rx_buf);
302 pr_debug("len = %d, delay_usecs = %d\n",
303 t->len, t->delay_usecs);
304
305 if (t->tx_buf) {
306 ret = spi_sh_send(ss, mesg, t);
307 if (ret < 0)
308 goto error;
309 }
310 if (t->rx_buf) {
311 ret = spi_sh_receive(ss, mesg, t);
312 if (ret < 0)
313 goto error;
314 }
315 mesg->actual_length += t->len;
316 }
317 spin_lock_irqsave(&ss->lock, flags);
318
319 mesg->status = 0;
Axel Lin0a6d3872014-04-02 22:21:04 +0800320 if (mesg->complete)
321 mesg->complete(mesg->context);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900322 }
323
324 clear_fifo(ss);
325 spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
326 udelay(100);
327
328 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
329 SPI_SH_CR1);
330
331 clear_fifo(ss);
332
333 spin_unlock_irqrestore(&ss->lock, flags);
334
335 return;
336
337 error:
338 mesg->status = ret;
Axel Lin0a6d3872014-04-02 22:21:04 +0800339 if (mesg->complete)
340 mesg->complete(mesg->context);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900341
342 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
343 SPI_SH_CR1);
344 clear_fifo(ss);
345
346}
347
348static int spi_sh_setup(struct spi_device *spi)
349{
350 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
351
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900352 pr_debug("%s: enter\n", __func__);
353
354 spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
355 spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */
356 spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */
357
358 clear_fifo(ss);
359
360 /* 1/8 clock */
361 spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
362 udelay(10);
363
364 return 0;
365}
366
367static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
368{
369 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
370 unsigned long flags;
371
372 pr_debug("%s: enter\n", __func__);
373 pr_debug("\tmode = %02x\n", spi->mode);
374
375 spin_lock_irqsave(&ss->lock, flags);
376
377 mesg->actual_length = 0;
378 mesg->status = -EINPROGRESS;
379
380 spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
381
382 list_add_tail(&mesg->queue, &ss->queue);
383 queue_work(ss->workqueue, &ss->ws);
384
385 spin_unlock_irqrestore(&ss->lock, flags);
386
387 return 0;
388}
389
390static void spi_sh_cleanup(struct spi_device *spi)
391{
392 struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
393
394 pr_debug("%s: enter\n", __func__);
395
396 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
397 SPI_SH_CR1);
398}
399
400static irqreturn_t spi_sh_irq(int irq, void *_ss)
401{
402 struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
403 unsigned long cr1;
404
405 cr1 = spi_sh_read(ss, SPI_SH_CR1);
406 if (cr1 & SPI_SH_TBE)
407 ss->cr1 |= SPI_SH_TBE;
408 if (cr1 & SPI_SH_TBF)
409 ss->cr1 |= SPI_SH_TBF;
410 if (cr1 & SPI_SH_RBE)
411 ss->cr1 |= SPI_SH_RBE;
412 if (cr1 & SPI_SH_RBF)
413 ss->cr1 |= SPI_SH_RBF;
414
415 if (ss->cr1) {
416 spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
417 wake_up(&ss->wait);
418 }
419
420 return IRQ_HANDLED;
421}
422
Grant Likelyfd4a3192012-12-07 16:57:14 +0000423static int spi_sh_remove(struct platform_device *pdev)
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900424{
Jingoo Han24b5a822013-05-23 19:20:40 +0900425 struct spi_sh_data *ss = platform_get_drvdata(pdev);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900426
Axel Lin680c1302011-05-11 21:27:00 +0800427 spi_unregister_master(ss->master);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900428 destroy_workqueue(ss->workqueue);
429 free_irq(ss->irq, ss);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900430
431 return 0;
432}
433
Grant Likelyfd4a3192012-12-07 16:57:14 +0000434static int spi_sh_probe(struct platform_device *pdev)
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900435{
436 struct resource *res;
437 struct spi_master *master;
438 struct spi_sh_data *ss;
439 int ret, irq;
440
441 /* get base addr */
442 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
443 if (unlikely(res == NULL)) {
444 dev_err(&pdev->dev, "invalid resource\n");
445 return -EINVAL;
446 }
447
448 irq = platform_get_irq(pdev, 0);
449 if (irq < 0) {
450 dev_err(&pdev->dev, "platform_get_irq error\n");
451 return -ENODEV;
452 }
453
454 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
455 if (master == NULL) {
456 dev_err(&pdev->dev, "spi_alloc_master error.\n");
457 return -ENOMEM;
458 }
459
460 ss = spi_master_get_devdata(master);
Jingoo Han24b5a822013-05-23 19:20:40 +0900461 platform_set_drvdata(pdev, ss);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900462
Shimoda, Yoshihiro0eb88802012-03-07 14:45:37 +0900463 switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
464 case IORESOURCE_MEM_8BIT:
465 ss->width = 8;
466 break;
467 case IORESOURCE_MEM_32BIT:
468 ss->width = 32;
469 break;
470 default:
471 dev_err(&pdev->dev, "No support width\n");
472 ret = -ENODEV;
473 goto error1;
474 }
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900475 ss->irq = irq;
476 ss->master = master;
Himangi Saraogie9d42d12014-07-06 21:44:50 +0530477 ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900478 if (ss->addr == NULL) {
479 dev_err(&pdev->dev, "ioremap error.\n");
480 ret = -ENOMEM;
481 goto error1;
482 }
483 INIT_LIST_HEAD(&ss->queue);
484 spin_lock_init(&ss->lock);
485 INIT_WORK(&ss->ws, spi_sh_work);
486 init_waitqueue_head(&ss->wait);
487 ss->workqueue = create_singlethread_workqueue(
488 dev_name(master->dev.parent));
489 if (ss->workqueue == NULL) {
490 dev_err(&pdev->dev, "create workqueue error\n");
491 ret = -EBUSY;
Himangi Saraogie9d42d12014-07-06 21:44:50 +0530492 goto error1;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900493 }
494
Yong Zhang38ada212011-10-22 17:56:55 +0800495 ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900496 if (ret < 0) {
497 dev_err(&pdev->dev, "request_irq error\n");
Himangi Saraogie9d42d12014-07-06 21:44:50 +0530498 goto error2;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900499 }
500
501 master->num_chipselect = 2;
502 master->bus_num = pdev->id;
503 master->setup = spi_sh_setup;
504 master->transfer = spi_sh_transfer;
505 master->cleanup = spi_sh_cleanup;
506
507 ret = spi_register_master(master);
508 if (ret < 0) {
509 printk(KERN_ERR "spi_register_master error.\n");
Himangi Saraogie9d42d12014-07-06 21:44:50 +0530510 goto error3;
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900511 }
512
513 return 0;
514
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900515 error3:
Himangi Saraogie9d42d12014-07-06 21:44:50 +0530516 free_irq(irq, ss);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900517 error2:
Himangi Saraogie9d42d12014-07-06 21:44:50 +0530518 destroy_workqueue(ss->workqueue);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900519 error1:
520 spi_master_put(master);
521
522 return ret;
523}
524
525static struct platform_driver spi_sh_driver = {
526 .probe = spi_sh_probe,
Grant Likelyfd4a3192012-12-07 16:57:14 +0000527 .remove = spi_sh_remove,
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900528 .driver = {
529 .name = "sh_spi",
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900530 },
531};
Grant Likely940ab882011-10-05 11:29:49 -0600532module_platform_driver(spi_sh_driver);
Yoshihiro Shimoda5c05dd02011-02-15 10:30:32 +0900533
534MODULE_DESCRIPTION("SH SPI bus driver");
535MODULE_LICENSE("GPL");
536MODULE_AUTHOR("Yoshihiro Shimoda");
537MODULE_ALIAS("platform:sh_spi");