blob: d871dc23909ca856bb11b467ae09cb5f60fc7f14 [file] [log] [blame]
dmitry pervushin0644c482009-09-22 16:46:15 -07001/*
2 * Freescale STMP378X SPI master driver
3 *
4 * Author: dmitry pervushin <dimka@embeddedalley.com>
5 *
6 * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
7 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
8 */
9
10/*
11 * The code contained herein is licensed under the GNU General Public
12 * License. You may obtain a copy of the GNU General Public License
13 * Version 2 or later at the following locations:
14 *
15 * http://www.opensource.org/licenses/gpl-license.html
16 * http://www.gnu.org/copyleft/gpl.html
17 */
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/spi/spi.h>
23#include <linux/err.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26#include <linux/dma-mapping.h>
27#include <linux/delay.h>
28
29#include <mach/platform.h>
30#include <mach/stmp3xxx.h>
31#include <mach/dma.h>
32#include <mach/regs-ssp.h>
33#include <mach/regs-apbh.h>
34
35
36/* 0 means DMA mode(recommended, default), !0 - PIO mode */
37static int pio;
38static int clock;
39
40/* default timeout for busy waits is 2 seconds */
41#define STMP_SPI_TIMEOUT (2 * HZ)
42
43struct stmp_spi {
44 int id;
45
46 void * __iomem regs; /* vaddr of the control registers */
47
48 int irq, err_irq;
49 u32 dma;
50 struct stmp3xxx_dma_descriptor d;
51
52 u32 speed_khz;
53 u32 saved_timings;
54 u32 divider;
55
56 struct clk *clk;
57 struct device *master_dev;
58
59 struct work_struct work;
60 struct workqueue_struct *workqueue;
61
62 /* lock protects queue access */
63 spinlock_t lock;
64 struct list_head queue;
65
66 struct completion done;
67};
68
69#define busy_wait(cond) \
70 ({ \
71 unsigned long end_jiffies = jiffies + STMP_SPI_TIMEOUT; \
72 bool succeeded = false; \
73 do { \
74 if (cond) { \
75 succeeded = true; \
76 break; \
77 } \
78 cpu_relax(); \
79 } while (time_before(end_jiffies, jiffies)); \
80 succeeded; \
81 })
82
83/**
84 * stmp_spi_init_hw
85 * Initialize the SSP port
86 */
87static int stmp_spi_init_hw(struct stmp_spi *ss)
88{
89 int err = 0;
90 void *pins = ss->master_dev->platform_data;
91
92 err = stmp3xxx_request_pin_group(pins, dev_name(ss->master_dev));
93 if (err)
94 goto out;
95
96 ss->clk = clk_get(NULL, "ssp");
97 if (IS_ERR(ss->clk)) {
98 err = PTR_ERR(ss->clk);
99 goto out_free_pins;
100 }
101 clk_enable(ss->clk);
102
103 stmp3xxx_reset_block(ss->regs, false);
104 stmp3xxx_dma_reset_channel(ss->dma);
105
106 return 0;
107
108out_free_pins:
109 stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
110out:
111 return err;
112}
113
114static void stmp_spi_release_hw(struct stmp_spi *ss)
115{
116 void *pins = ss->master_dev->platform_data;
117
118 if (ss->clk && !IS_ERR(ss->clk)) {
119 clk_disable(ss->clk);
120 clk_put(ss->clk);
121 }
122 stmp3xxx_release_pin_group(pins, dev_name(ss->master_dev));
123}
124
125static int stmp_spi_setup_transfer(struct spi_device *spi,
126 struct spi_transfer *t)
127{
128 u8 bits_per_word;
129 u32 hz;
130 struct stmp_spi *ss = spi_master_get_devdata(spi->master);
131 u16 rate;
132
133 bits_per_word = spi->bits_per_word;
134 if (t && t->bits_per_word)
135 bits_per_word = t->bits_per_word;
136
137 /*
138 * Calculate speed:
139 * - by default, use maximum speed from ssp clk
140 * - if device overrides it, use it
141 * - if transfer specifies other speed, use transfer's one
142 */
143 hz = 1000 * ss->speed_khz / ss->divider;
144 if (spi->max_speed_hz)
145 hz = min(hz, spi->max_speed_hz);
146 if (t && t->speed_hz)
147 hz = min(hz, t->speed_hz);
148
149 if (hz == 0) {
150 dev_err(&spi->dev, "Cannot continue with zero clock\n");
151 return -EINVAL;
152 }
153
154 if (bits_per_word != 8) {
155 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
156 __func__, bits_per_word);
157 return -EINVAL;
158 }
159
160 dev_dbg(&spi->dev, "Requested clk rate = %uHz, max = %uHz/%d = %uHz\n",
161 hz, ss->speed_khz, ss->divider,
162 ss->speed_khz * 1000 / ss->divider);
163
164 if (ss->speed_khz * 1000 / ss->divider < hz) {
165 dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
166 __func__, hz);
167 return -EINVAL;
168 }
169
170 rate = 1000 * ss->speed_khz/ss->divider/hz;
171
172 writel(BF(ss->divider, SSP_TIMING_CLOCK_DIVIDE) |
173 BF(rate - 1, SSP_TIMING_CLOCK_RATE),
174 HW_SSP_TIMING + ss->regs);
175
176 writel(BF(1 /* mode SPI */, SSP_CTRL1_SSP_MODE) |
177 BF(4 /* 8 bits */, SSP_CTRL1_WORD_LENGTH) |
178 ((spi->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
179 ((spi->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0) |
180 (pio ? 0 : BM_SSP_CTRL1_DMA_ENABLE),
181 ss->regs + HW_SSP_CTRL1);
182
183 return 0;
184}
185
186static int stmp_spi_setup(struct spi_device *spi)
187{
188 /* spi_setup() does basic checks,
189 * stmp_spi_setup_transfer() does more later
190 */
191 if (spi->bits_per_word != 8) {
192 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
193 __func__, spi->bits_per_word);
194 return -EINVAL;
195 }
196 return 0;
197}
198
199static inline u32 stmp_spi_cs(unsigned cs)
200{
201 return ((cs & 1) ? BM_SSP_CTRL0_WAIT_FOR_CMD : 0) |
202 ((cs & 2) ? BM_SSP_CTRL0_WAIT_FOR_IRQ : 0);
203}
204
205static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs,
206 unsigned char *buf, dma_addr_t dma_buf, int len,
207 int first, int last, bool write)
208{
209 u32 c0 = 0;
210 dma_addr_t spi_buf_dma = dma_buf;
211 int status = 0;
212 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
213
214 c0 |= (first ? BM_SSP_CTRL0_LOCK_CS : 0);
215 c0 |= (last ? BM_SSP_CTRL0_IGNORE_CRC : 0);
216 c0 |= (write ? 0 : BM_SSP_CTRL0_READ);
217 c0 |= BM_SSP_CTRL0_DATA_XFER;
218
219 c0 |= stmp_spi_cs(cs);
220
221 c0 |= BF(len, SSP_CTRL0_XFER_COUNT);
222
223 if (!dma_buf)
224 spi_buf_dma = dma_map_single(ss->master_dev, buf, len, dir);
225
226 ss->d.command->cmd =
227 BF(len, APBH_CHn_CMD_XFER_COUNT) |
228 BF(1, APBH_CHn_CMD_CMDWORDS) |
229 BM_APBH_CHn_CMD_WAIT4ENDCMD |
230 BM_APBH_CHn_CMD_IRQONCMPLT |
231 BF(write ? BV_APBH_CHn_CMD_COMMAND__DMA_READ :
232 BV_APBH_CHn_CMD_COMMAND__DMA_WRITE,
233 APBH_CHn_CMD_COMMAND);
234 ss->d.command->pio_words[0] = c0;
235 ss->d.command->buf_ptr = spi_buf_dma;
236
237 stmp3xxx_dma_reset_channel(ss->dma);
238 stmp3xxx_dma_clear_interrupt(ss->dma);
239 stmp3xxx_dma_enable_interrupt(ss->dma);
240 init_completion(&ss->done);
241 stmp3xxx_dma_go(ss->dma, &ss->d, 1);
242 wait_for_completion(&ss->done);
243
244 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN))
245 status = ETIMEDOUT;
246
247 if (!dma_buf)
248 dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir);
249
250 return status;
251}
252
253static inline void stmp_spi_enable(struct stmp_spi *ss)
254{
255 stmp3xxx_setl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
256 stmp3xxx_clearl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
257}
258
259static inline void stmp_spi_disable(struct stmp_spi *ss)
260{
261 stmp3xxx_clearl(BM_SSP_CTRL0_LOCK_CS, ss->regs + HW_SSP_CTRL0);
262 stmp3xxx_setl(BM_SSP_CTRL0_IGNORE_CRC, ss->regs + HW_SSP_CTRL0);
263}
264
265static int stmp_spi_txrx_pio(struct stmp_spi *ss, int cs,
266 unsigned char *buf, int len,
267 bool first, bool last, bool write)
268{
269 if (first)
270 stmp_spi_enable(ss);
271
272 stmp3xxx_setl(stmp_spi_cs(cs), ss->regs + HW_SSP_CTRL0);
273
274 while (len--) {
275 if (last && len <= 0)
276 stmp_spi_disable(ss);
277
278 stmp3xxx_clearl(BM_SSP_CTRL0_XFER_COUNT,
279 ss->regs + HW_SSP_CTRL0);
280 stmp3xxx_setl(1, ss->regs + HW_SSP_CTRL0);
281
282 if (write)
283 stmp3xxx_clearl(BM_SSP_CTRL0_READ,
284 ss->regs + HW_SSP_CTRL0);
285 else
286 stmp3xxx_setl(BM_SSP_CTRL0_READ,
287 ss->regs + HW_SSP_CTRL0);
288
289 /* Run! */
290 stmp3xxx_setl(BM_SSP_CTRL0_RUN, ss->regs + HW_SSP_CTRL0);
291
292 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
293 BM_SSP_CTRL0_RUN))
294 break;
295
296 if (write)
297 writel(*buf, ss->regs + HW_SSP_DATA);
298
299 /* Set TRANSFER */
300 stmp3xxx_setl(BM_SSP_CTRL0_DATA_XFER, ss->regs + HW_SSP_CTRL0);
301
302 if (!write) {
303 if (busy_wait((readl(ss->regs + HW_SSP_STATUS) &
304 BM_SSP_STATUS_FIFO_EMPTY)))
305 break;
306 *buf = readl(ss->regs + HW_SSP_DATA) & 0xFF;
307 }
308
309 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) &
310 BM_SSP_CTRL0_RUN))
311 break;
312
313 /* advance to the next byte */
314 buf++;
315 }
316
317 return len < 0 ? 0 : -ETIMEDOUT;
318}
319
320static int stmp_spi_handle_message(struct stmp_spi *ss, struct spi_message *m)
321{
322 bool first, last;
323 struct spi_transfer *t, *tmp_t;
324 int status = 0;
325 int cs;
326
327 cs = m->spi->chip_select;
328
329 list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
330
331 first = (&t->transfer_list == m->transfers.next);
332 last = (&t->transfer_list == m->transfers.prev);
333
334 if (first || t->speed_hz || t->bits_per_word)
335 stmp_spi_setup_transfer(m->spi, t);
336
337 /* reject "not last" transfers which request to change cs */
338 if (t->cs_change && !last) {
339 dev_err(&m->spi->dev,
340 "Message with t->cs_change has been skipped\n");
341 continue;
342 }
343
344 if (t->tx_buf) {
345 status = pio ?
346 stmp_spi_txrx_pio(ss, cs, (void *)t->tx_buf,
347 t->len, first, last, true) :
348 stmp_spi_txrx_dma(ss, cs, (void *)t->tx_buf,
349 t->tx_dma, t->len, first, last, true);
350#ifdef DEBUG
351 if (t->len < 0x10)
352 print_hex_dump_bytes("Tx ",
353 DUMP_PREFIX_OFFSET,
354 t->tx_buf, t->len);
355 else
356 pr_debug("Tx: %d bytes\n", t->len);
357#endif
358 }
359 if (t->rx_buf) {
360 status = pio ?
361 stmp_spi_txrx_pio(ss, cs, t->rx_buf,
362 t->len, first, last, false) :
363 stmp_spi_txrx_dma(ss, cs, t->rx_buf,
364 t->rx_dma, t->len, first, last, false);
365#ifdef DEBUG
366 if (t->len < 0x10)
367 print_hex_dump_bytes("Rx ",
368 DUMP_PREFIX_OFFSET,
369 t->rx_buf, t->len);
370 else
371 pr_debug("Rx: %d bytes\n", t->len);
372#endif
373 }
374
375 if (t->delay_usecs)
376 udelay(t->delay_usecs);
377
378 if (status)
379 break;
380
381 }
382 return status;
383}
384
385/**
386 * stmp_spi_handle - handle messages from the queue
387 */
388static void stmp_spi_handle(struct work_struct *w)
389{
390 struct stmp_spi *ss = container_of(w, struct stmp_spi, work);
391 unsigned long flags;
392 struct spi_message *m;
393
394 spin_lock_irqsave(&ss->lock, flags);
395 while (!list_empty(&ss->queue)) {
396 m = list_entry(ss->queue.next, struct spi_message, queue);
397 list_del_init(&m->queue);
398 spin_unlock_irqrestore(&ss->lock, flags);
399
400 m->status = stmp_spi_handle_message(ss, m);
401 m->complete(m->context);
402
403 spin_lock_irqsave(&ss->lock, flags);
404 }
405 spin_unlock_irqrestore(&ss->lock, flags);
406
407 return;
408}
409
410/**
411 * stmp_spi_transfer - perform message transfer.
412 * Called indirectly from spi_async, queues all the messages to
413 * spi_handle_message.
414 * @spi: spi device
415 * @m: message to be queued
416 */
417static int stmp_spi_transfer(struct spi_device *spi, struct spi_message *m)
418{
419 struct stmp_spi *ss = spi_master_get_devdata(spi->master);
420 unsigned long flags;
421
422 m->status = -EINPROGRESS;
423 spin_lock_irqsave(&ss->lock, flags);
424 list_add_tail(&m->queue, &ss->queue);
425 queue_work(ss->workqueue, &ss->work);
426 spin_unlock_irqrestore(&ss->lock, flags);
427 return 0;
428}
429
430static irqreturn_t stmp_spi_irq(int irq, void *dev_id)
431{
432 struct stmp_spi *ss = dev_id;
433
434 stmp3xxx_dma_clear_interrupt(ss->dma);
435 complete(&ss->done);
436 return IRQ_HANDLED;
437}
438
439static irqreturn_t stmp_spi_irq_err(int irq, void *dev_id)
440{
441 struct stmp_spi *ss = dev_id;
442 u32 c1, st;
443
444 c1 = readl(ss->regs + HW_SSP_CTRL1);
445 st = readl(ss->regs + HW_SSP_STATUS);
446 dev_err(ss->master_dev, "%s: status = 0x%08X, c1 = 0x%08X\n",
447 __func__, st, c1);
448 stmp3xxx_clearl(c1 & 0xCCCC0000, ss->regs + HW_SSP_CTRL1);
449
450 return IRQ_HANDLED;
451}
452
453static int __devinit stmp_spi_probe(struct platform_device *dev)
454{
455 int err = 0;
456 struct spi_master *master;
457 struct stmp_spi *ss;
458 struct resource *r;
459
460 master = spi_alloc_master(&dev->dev, sizeof(struct stmp_spi));
461 if (master == NULL) {
462 err = -ENOMEM;
463 goto out0;
464 }
465 master->flags = SPI_MASTER_HALF_DUPLEX;
466
467 ss = spi_master_get_devdata(master);
468 platform_set_drvdata(dev, master);
469
470 /* Get resources(memory, IRQ) associated with the device */
471 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
472 if (r == NULL) {
473 err = -ENODEV;
474 goto out_put_master;
475 }
476 ss->regs = ioremap(r->start, resource_size(r));
477 if (!ss->regs) {
478 err = -EINVAL;
479 goto out_put_master;
480 }
481
482 ss->master_dev = &dev->dev;
483 ss->id = dev->id;
484
485 INIT_WORK(&ss->work, stmp_spi_handle);
486 INIT_LIST_HEAD(&ss->queue);
487 spin_lock_init(&ss->lock);
488
489 ss->workqueue = create_singlethread_workqueue(dev_name(&dev->dev));
490 if (!ss->workqueue) {
491 err = -ENXIO;
492 goto out_put_master;
493 }
494 master->transfer = stmp_spi_transfer;
495 master->setup = stmp_spi_setup;
496
497 /* the spi->mode bits understood by this driver: */
498 master->mode_bits = SPI_CPOL | SPI_CPHA;
499
500 ss->irq = platform_get_irq(dev, 0);
501 if (ss->irq < 0) {
502 err = ss->irq;
503 goto out_put_master;
504 }
505 ss->err_irq = platform_get_irq(dev, 1);
506 if (ss->err_irq < 0) {
507 err = ss->err_irq;
508 goto out_put_master;
509 }
510
511 r = platform_get_resource(dev, IORESOURCE_DMA, 0);
512 if (r == NULL) {
513 err = -ENODEV;
514 goto out_put_master;
515 }
516
517 ss->dma = r->start;
518 err = stmp3xxx_dma_request(ss->dma, &dev->dev, dev_name(&dev->dev));
519 if (err)
520 goto out_put_master;
521
522 err = stmp3xxx_dma_allocate_command(ss->dma, &ss->d);
523 if (err)
524 goto out_free_dma;
525
526 master->bus_num = dev->id;
527 master->num_chipselect = 1;
528
529 /* SPI controller initializations */
530 err = stmp_spi_init_hw(ss);
531 if (err) {
532 dev_dbg(&dev->dev, "cannot initialize hardware\n");
533 goto out_free_dma_desc;
534 }
535
536 if (clock) {
537 dev_info(&dev->dev, "clock rate forced to %d\n", clock);
538 clk_set_rate(ss->clk, clock);
539 }
540 ss->speed_khz = clk_get_rate(ss->clk);
541 ss->divider = 2;
542 dev_info(&dev->dev, "max possible speed %d = %ld/%d kHz\n",
543 ss->speed_khz, clk_get_rate(ss->clk), ss->divider);
544
545 /* Register for SPI interrupt */
546 err = request_irq(ss->irq, stmp_spi_irq, 0,
547 dev_name(&dev->dev), ss);
548 if (err) {
549 dev_dbg(&dev->dev, "request_irq failed, %d\n", err);
550 goto out_release_hw;
551 }
552
553 /* ..and shared interrupt for all SSP controllers */
554 err = request_irq(ss->err_irq, stmp_spi_irq_err, IRQF_SHARED,
555 dev_name(&dev->dev), ss);
556 if (err) {
557 dev_dbg(&dev->dev, "request_irq(error) failed, %d\n", err);
558 goto out_free_irq;
559 }
560
561 err = spi_register_master(master);
562 if (err) {
563 dev_dbg(&dev->dev, "cannot register spi master, %d\n", err);
564 goto out_free_irq_2;
565 }
566 dev_info(&dev->dev, "at (mapped) 0x%08X, irq=%d, bus %d, %s mode\n",
567 (u32)ss->regs, ss->irq, master->bus_num,
568 pio ? "PIO" : "DMA");
569 return 0;
570
571out_free_irq_2:
572 free_irq(ss->err_irq, ss);
573out_free_irq:
574 free_irq(ss->irq, ss);
575out_free_dma_desc:
576 stmp3xxx_dma_free_command(ss->dma, &ss->d);
577out_free_dma:
578 stmp3xxx_dma_release(ss->dma);
579out_release_hw:
580 stmp_spi_release_hw(ss);
581out_put_master:
582 if (ss->workqueue)
583 destroy_workqueue(ss->workqueue);
584 if (ss->regs)
585 iounmap(ss->regs);
586 platform_set_drvdata(dev, NULL);
587 spi_master_put(master);
588out0:
589 return err;
590}
591
592static int __devexit stmp_spi_remove(struct platform_device *dev)
593{
594 struct stmp_spi *ss;
595 struct spi_master *master;
596
597 master = platform_get_drvdata(dev);
598 if (master == NULL)
599 goto out0;
600 ss = spi_master_get_devdata(master);
601
602 spi_unregister_master(master);
603
604 free_irq(ss->err_irq, ss);
605 free_irq(ss->irq, ss);
606 stmp3xxx_dma_free_command(ss->dma, &ss->d);
607 stmp3xxx_dma_release(ss->dma);
608 stmp_spi_release_hw(ss);
609 destroy_workqueue(ss->workqueue);
610 iounmap(ss->regs);
611 spi_master_put(master);
612 platform_set_drvdata(dev, NULL);
613out0:
614 return 0;
615}
616
617#ifdef CONFIG_PM
618static int stmp_spi_suspend(struct platform_device *pdev, pm_message_t pmsg)
619{
620 struct stmp_spi *ss;
621 struct spi_master *master;
622
623 master = platform_get_drvdata(pdev);
624 ss = spi_master_get_devdata(master);
625
626 ss->saved_timings = readl(HW_SSP_TIMING + ss->regs);
627 clk_disable(ss->clk);
628
629 return 0;
630}
631
632static int stmp_spi_resume(struct platform_device *pdev)
633{
634 struct stmp_spi *ss;
635 struct spi_master *master;
636
637 master = platform_get_drvdata(pdev);
638 ss = spi_master_get_devdata(master);
639
640 clk_enable(ss->clk);
641 stmp3xxx_reset_block(ss->regs, false);
642 writel(ss->saved_timings, ss->regs + HW_SSP_TIMING);
643
644 return 0;
645}
646
647#else
648#define stmp_spi_suspend NULL
649#define stmp_spi_resume NULL
650#endif
651
652static struct platform_driver stmp_spi_driver = {
653 .probe = stmp_spi_probe,
654 .remove = __devexit_p(stmp_spi_remove),
655 .driver = {
656 .name = "stmp3xxx_ssp",
657 .owner = THIS_MODULE,
658 },
659 .suspend = stmp_spi_suspend,
660 .resume = stmp_spi_resume,
661};
662
663static int __init stmp_spi_init(void)
664{
665 return platform_driver_register(&stmp_spi_driver);
666}
667
668static void __exit stmp_spi_exit(void)
669{
670 platform_driver_unregister(&stmp_spi_driver);
671}
672
673module_init(stmp_spi_init);
674module_exit(stmp_spi_exit);
675module_param(pio, int, S_IRUGO);
676module_param(clock, int, S_IRUGO);
677MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>");
678MODULE_DESCRIPTION("STMP3xxx SPI/SSP driver");
679MODULE_LICENSE("GPL");