blob: ff5bbb9c43c9961485b94f8cb86e5fd349789823 [file] [log] [blame]
Linus Walleijb43d65f2009-06-09 08:11:42 +01001/*
2 * drivers/spi/amba-pl022.c
3 *
4 * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
5 *
6 * Copyright (C) 2008-2009 ST-Ericsson AB
7 * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
8 *
9 * Author: Linus Walleij <linus.walleij@stericsson.com>
10 *
11 * Initial version inspired by:
12 * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
13 * Initial adoption to PL022 by:
14 * Sachin Verma <sachin.verma@st.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 */
26
27/*
28 * TODO:
29 * - add timeout on polled transfers
30 * - add generic DMA framework support
31 */
32
33#include <linux/init.h>
34#include <linux/module.h>
35#include <linux/device.h>
36#include <linux/ioport.h>
37#include <linux/errno.h>
38#include <linux/interrupt.h>
39#include <linux/spi/spi.h>
40#include <linux/workqueue.h>
Linus Walleijb43d65f2009-06-09 08:11:42 +010041#include <linux/delay.h>
42#include <linux/clk.h>
43#include <linux/err.h>
44#include <linux/amba/bus.h>
45#include <linux/amba/pl022.h>
46#include <linux/io.h>
Linus Walleijb43d65f2009-06-09 08:11:42 +010047
48/*
49 * This macro is used to define some register default values.
50 * reg is masked with mask, the OR:ed with an (again masked)
51 * val shifted sb steps to the left.
52 */
53#define SSP_WRITE_BITS(reg, val, mask, sb) \
54 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
55
56/*
57 * This macro is also used to define some default values.
58 * It will just shift val by sb steps to the left and mask
59 * the result with mask.
60 */
61#define GEN_MASK_BITS(val, mask, sb) \
62 (((val)<<(sb)) & (mask))
63
64#define DRIVE_TX 0
65#define DO_NOT_DRIVE_TX 1
66
67#define DO_NOT_QUEUE_DMA 0
68#define QUEUE_DMA 1
69
70#define RX_TRANSFER 1
71#define TX_TRANSFER 2
72
73/*
74 * Macros to access SSP Registers with their offsets
75 */
76#define SSP_CR0(r) (r + 0x000)
77#define SSP_CR1(r) (r + 0x004)
78#define SSP_DR(r) (r + 0x008)
79#define SSP_SR(r) (r + 0x00C)
80#define SSP_CPSR(r) (r + 0x010)
81#define SSP_IMSC(r) (r + 0x014)
82#define SSP_RIS(r) (r + 0x018)
83#define SSP_MIS(r) (r + 0x01C)
84#define SSP_ICR(r) (r + 0x020)
85#define SSP_DMACR(r) (r + 0x024)
86#define SSP_ITCR(r) (r + 0x080)
87#define SSP_ITIP(r) (r + 0x084)
88#define SSP_ITOP(r) (r + 0x088)
89#define SSP_TDR(r) (r + 0x08C)
90
91#define SSP_PID0(r) (r + 0xFE0)
92#define SSP_PID1(r) (r + 0xFE4)
93#define SSP_PID2(r) (r + 0xFE8)
94#define SSP_PID3(r) (r + 0xFEC)
95
96#define SSP_CID0(r) (r + 0xFF0)
97#define SSP_CID1(r) (r + 0xFF4)
98#define SSP_CID2(r) (r + 0xFF8)
99#define SSP_CID3(r) (r + 0xFFC)
100
101/*
102 * SSP Control Register 0 - SSP_CR0
103 */
104#define SSP_CR0_MASK_DSS (0x1FUL << 0)
105#define SSP_CR0_MASK_HALFDUP (0x1UL << 5)
106#define SSP_CR0_MASK_SPO (0x1UL << 6)
107#define SSP_CR0_MASK_SPH (0x1UL << 7)
108#define SSP_CR0_MASK_SCR (0xFFUL << 8)
109#define SSP_CR0_MASK_CSS (0x1FUL << 16)
110#define SSP_CR0_MASK_FRF (0x3UL << 21)
111
112/*
113 * SSP Control Register 0 - SSP_CR1
114 */
115#define SSP_CR1_MASK_LBM (0x1UL << 0)
116#define SSP_CR1_MASK_SSE (0x1UL << 1)
117#define SSP_CR1_MASK_MS (0x1UL << 2)
118#define SSP_CR1_MASK_SOD (0x1UL << 3)
119#define SSP_CR1_MASK_RENDN (0x1UL << 4)
120#define SSP_CR1_MASK_TENDN (0x1UL << 5)
121#define SSP_CR1_MASK_MWAIT (0x1UL << 6)
122#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
123#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
124
125/*
126 * SSP Data Register - SSP_DR
127 */
128#define SSP_DR_MASK_DATA 0xFFFFFFFF
129
130/*
131 * SSP Status Register - SSP_SR
132 */
133#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
134#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
135#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
136#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
137#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
138
139/*
140 * SSP Clock Prescale Register - SSP_CPSR
141 */
142#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
143
144/*
145 * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
146 */
147#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
148#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
149#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
150#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
151
152/*
153 * SSP Raw Interrupt Status Register - SSP_RIS
154 */
155/* Receive Overrun Raw Interrupt status */
156#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
157/* Receive Timeout Raw Interrupt status */
158#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
159/* Receive FIFO Raw Interrupt status */
160#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
161/* Transmit FIFO Raw Interrupt status */
162#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
163
164/*
165 * SSP Masked Interrupt Status Register - SSP_MIS
166 */
167/* Receive Overrun Masked Interrupt status */
168#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
169/* Receive Timeout Masked Interrupt status */
170#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
171/* Receive FIFO Masked Interrupt status */
172#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
173/* Transmit FIFO Masked Interrupt status */
174#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
175
176/*
177 * SSP Interrupt Clear Register - SSP_ICR
178 */
179/* Receive Overrun Raw Clear Interrupt bit */
180#define SSP_ICR_MASK_RORIC (0x1UL << 0)
181/* Receive Timeout Clear Interrupt bit */
182#define SSP_ICR_MASK_RTIC (0x1UL << 1)
183
184/*
185 * SSP DMA Control Register - SSP_DMACR
186 */
187/* Receive DMA Enable bit */
188#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
189/* Transmit DMA Enable bit */
190#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
191
192/*
193 * SSP Integration Test control Register - SSP_ITCR
194 */
195#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
196#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
197
198/*
199 * SSP Integration Test Input Register - SSP_ITIP
200 */
201#define ITIP_MASK_SSPRXD (0x1UL << 0)
202#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
203#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
204#define ITIP_MASK_RXDMAC (0x1UL << 3)
205#define ITIP_MASK_TXDMAC (0x1UL << 4)
206#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
207
208/*
209 * SSP Integration Test output Register - SSP_ITOP
210 */
211#define ITOP_MASK_SSPTXD (0x1UL << 0)
212#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
213#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
214#define ITOP_MASK_SSPOEn (0x1UL << 3)
215#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
216#define ITOP_MASK_RORINTR (0x1UL << 5)
217#define ITOP_MASK_RTINTR (0x1UL << 6)
218#define ITOP_MASK_RXINTR (0x1UL << 7)
219#define ITOP_MASK_TXINTR (0x1UL << 8)
220#define ITOP_MASK_INTR (0x1UL << 9)
221#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
222#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
223#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
224#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
225
226/*
227 * SSP Test Data Register - SSP_TDR
228 */
229#define TDR_MASK_TESTDATA (0xFFFFFFFF)
230
231/*
232 * Message State
233 * we use the spi_message.state (void *) pointer to
234 * hold a single state value, that's why all this
235 * (void *) casting is done here.
236 */
237#define STATE_START ((void *) 0)
238#define STATE_RUNNING ((void *) 1)
239#define STATE_DONE ((void *) 2)
240#define STATE_ERROR ((void *) -1)
241
242/*
243 * Queue State
244 */
245#define QUEUE_RUNNING (0)
246#define QUEUE_STOPPED (1)
247/*
248 * SSP State - Whether Enabled or Disabled
249 */
250#define SSP_DISABLED (0)
251#define SSP_ENABLED (1)
252
253/*
254 * SSP DMA State - Whether DMA Enabled or Disabled
255 */
256#define SSP_DMA_DISABLED (0)
257#define SSP_DMA_ENABLED (1)
258
259/*
260 * SSP Clock Defaults
261 */
262#define NMDK_SSP_DEFAULT_CLKRATE 0x2
263#define NMDK_SSP_DEFAULT_PRESCALE 0x40
264
265/*
266 * SSP Clock Parameter ranges
267 */
268#define CPSDVR_MIN 0x02
269#define CPSDVR_MAX 0xFE
270#define SCR_MIN 0x00
271#define SCR_MAX 0xFF
272
273/*
274 * SSP Interrupt related Macros
275 */
276#define DEFAULT_SSP_REG_IMSC 0x0UL
277#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
278#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
279
280#define CLEAR_ALL_INTERRUPTS 0x3
281
282
283/*
284 * The type of reading going on on this chip
285 */
286enum ssp_reading {
287 READING_NULL,
288 READING_U8,
289 READING_U16,
290 READING_U32
291};
292
293/**
294 * The type of writing going on on this chip
295 */
296enum ssp_writing {
297 WRITING_NULL,
298 WRITING_U8,
299 WRITING_U16,
300 WRITING_U32
301};
302
303/**
304 * struct vendor_data - vendor-specific config parameters
305 * for PL022 derivates
306 * @fifodepth: depth of FIFOs (both)
307 * @max_bpw: maximum number of bits per word
308 * @unidir: supports unidirection transfers
309 */
310struct vendor_data {
311 int fifodepth;
312 int max_bpw;
313 bool unidir;
314};
315
316/**
317 * struct pl022 - This is the private SSP driver data structure
318 * @adev: AMBA device model hookup
319 * @phybase: The physical memory where the SSP device resides
320 * @virtbase: The virtual memory where the SSP is mapped
321 * @master: SPI framework hookup
322 * @master_info: controller-specific data from machine setup
323 * @regs: SSP controller register's virtual address
324 * @pump_messages: Work struct for scheduling work to the workqueue
325 * @lock: spinlock to syncronise access to driver data
326 * @workqueue: a workqueue on which any spi_message request is queued
327 * @busy: workqueue is busy
328 * @run: workqueue is running
329 * @pump_transfers: Tasklet used in Interrupt Transfer mode
330 * @cur_msg: Pointer to current spi_message being processed
331 * @cur_transfer: Pointer to current spi_transfer
332 * @cur_chip: pointer to current clients chip(assigned from controller_state)
333 * @tx: current position in TX buffer to be read
334 * @tx_end: end position in TX buffer to be read
335 * @rx: current position in RX buffer to be written
336 * @rx_end: end position in RX buffer to be written
337 * @readingtype: the type of read currently going on
338 * @writingtype: the type or write currently going on
339 */
340struct pl022 {
341 struct amba_device *adev;
342 struct vendor_data *vendor;
343 resource_size_t phybase;
344 void __iomem *virtbase;
345 struct clk *clk;
346 struct spi_master *master;
347 struct pl022_ssp_controller *master_info;
348 /* Driver message queue */
349 struct workqueue_struct *workqueue;
350 struct work_struct pump_messages;
351 spinlock_t queue_lock;
352 struct list_head queue;
353 int busy;
354 int run;
355 /* Message transfer pump */
356 struct tasklet_struct pump_transfers;
357 struct spi_message *cur_msg;
358 struct spi_transfer *cur_transfer;
359 struct chip_data *cur_chip;
360 void *tx;
361 void *tx_end;
362 void *rx;
363 void *rx_end;
364 enum ssp_reading read;
365 enum ssp_writing write;
366};
367
368/**
369 * struct chip_data - To maintain runtime state of SSP for each client chip
370 * @cr0: Value of control register CR0 of SSP
371 * @cr1: Value of control register CR1 of SSP
372 * @dmacr: Value of DMA control Register of SSP
373 * @cpsr: Value of Clock prescale register
374 * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
375 * @enable_dma: Whether to enable DMA or not
376 * @write: function ptr to be used to write when doing xfer for this chip
377 * @read: function ptr to be used to read when doing xfer for this chip
378 * @cs_control: chip select callback provided by chip
379 * @xfer_type: polling/interrupt/DMA
380 *
381 * Runtime state of the SSP controller, maintained per chip,
382 * This would be set according to the current message that would be served
383 */
384struct chip_data {
385 u16 cr0;
386 u16 cr1;
387 u16 dmacr;
388 u16 cpsr;
389 u8 n_bytes;
390 u8 enable_dma:1;
391 enum ssp_reading read;
392 enum ssp_writing write;
393 void (*cs_control) (u32 command);
394 int xfer_type;
395};
396
397/**
398 * null_cs_control - Dummy chip select function
399 * @command: select/delect the chip
400 *
401 * If no chip select function is provided by client this is used as dummy
402 * chip select
403 */
404static void null_cs_control(u32 command)
405{
406 pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
407}
408
409/**
410 * giveback - current spi_message is over, schedule next message and call
411 * callback of this message. Assumes that caller already
412 * set message->status; dma and pio irqs are blocked
413 * @pl022: SSP driver private data structure
414 */
415static void giveback(struct pl022 *pl022)
416{
417 struct spi_transfer *last_transfer;
418 unsigned long flags;
419 struct spi_message *msg;
420 void (*curr_cs_control) (u32 command);
421
422 /*
423 * This local reference to the chip select function
424 * is needed because we set curr_chip to NULL
425 * as a step toward termininating the message.
426 */
427 curr_cs_control = pl022->cur_chip->cs_control;
428 spin_lock_irqsave(&pl022->queue_lock, flags);
429 msg = pl022->cur_msg;
430 pl022->cur_msg = NULL;
431 pl022->cur_transfer = NULL;
432 pl022->cur_chip = NULL;
433 queue_work(pl022->workqueue, &pl022->pump_messages);
434 spin_unlock_irqrestore(&pl022->queue_lock, flags);
435
436 last_transfer = list_entry(msg->transfers.prev,
437 struct spi_transfer,
438 transfer_list);
439
440 /* Delay if requested before any change in chip select */
441 if (last_transfer->delay_usecs)
442 /*
443 * FIXME: This runs in interrupt context.
444 * Is this really smart?
445 */
446 udelay(last_transfer->delay_usecs);
447
448 /*
449 * Drop chip select UNLESS cs_change is true or we are returning
450 * a message with an error, or next message is for another chip
451 */
452 if (!last_transfer->cs_change)
453 curr_cs_control(SSP_CHIP_DESELECT);
454 else {
455 struct spi_message *next_msg;
456
457 /* Holding of cs was hinted, but we need to make sure
458 * the next message is for the same chip. Don't waste
459 * time with the following tests unless this was hinted.
460 *
461 * We cannot postpone this until pump_messages, because
462 * after calling msg->complete (below) the driver that
463 * sent the current message could be unloaded, which
464 * could invalidate the cs_control() callback...
465 */
466
467 /* get a pointer to the next message, if any */
468 spin_lock_irqsave(&pl022->queue_lock, flags);
469 if (list_empty(&pl022->queue))
470 next_msg = NULL;
471 else
472 next_msg = list_entry(pl022->queue.next,
473 struct spi_message, queue);
474 spin_unlock_irqrestore(&pl022->queue_lock, flags);
475
476 /* see if the next and current messages point
477 * to the same chip
478 */
479 if (next_msg && next_msg->spi != msg->spi)
480 next_msg = NULL;
481 if (!next_msg || msg->state == STATE_ERROR)
482 curr_cs_control(SSP_CHIP_DESELECT);
483 }
484 msg->state = NULL;
485 if (msg->complete)
486 msg->complete(msg->context);
487 /* This message is completed, so let's turn off the clock! */
488 clk_disable(pl022->clk);
489}
490
491/**
492 * flush - flush the FIFO to reach a clean state
493 * @pl022: SSP driver private data structure
494 */
495static int flush(struct pl022 *pl022)
496{
497 unsigned long limit = loops_per_jiffy << 1;
498
499 dev_dbg(&pl022->adev->dev, "flush\n");
500 do {
501 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
502 readw(SSP_DR(pl022->virtbase));
503 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
504 return limit;
505}
506
507/**
508 * restore_state - Load configuration of current chip
509 * @pl022: SSP driver private data structure
510 */
511static void restore_state(struct pl022 *pl022)
512{
513 struct chip_data *chip = pl022->cur_chip;
514
515 writew(chip->cr0, SSP_CR0(pl022->virtbase));
516 writew(chip->cr1, SSP_CR1(pl022->virtbase));
517 writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
518 writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
519 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
520 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
521}
522
523/**
524 * load_ssp_default_config - Load default configuration for SSP
525 * @pl022: SSP driver private data structure
526 */
527
528/*
529 * Default SSP Register Values
530 */
531#define DEFAULT_SSP_REG_CR0 ( \
532 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
533 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
534 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
Linus Walleijee2b8052009-08-15 15:12:05 +0100535 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
Linus Walleijb43d65f2009-06-09 08:11:42 +0100536 GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
537 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
538 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
539)
540
541#define DEFAULT_SSP_REG_CR1 ( \
542 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
543 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
544 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
545 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
546 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \
547 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \
548 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\
549 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \
550 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \
551)
552
553#define DEFAULT_SSP_REG_CPSR ( \
554 GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
555)
556
557#define DEFAULT_SSP_REG_DMACR (\
558 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
559 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
560)
561
562
563static void load_ssp_default_config(struct pl022 *pl022)
564{
565 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
566 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
567 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
568 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
569 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
570 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
571}
572
573/**
574 * This will write to TX and read from RX according to the parameters
575 * set in pl022.
576 */
577static void readwriter(struct pl022 *pl022)
578{
579
580 /*
581 * The FIFO depth is different inbetween primecell variants.
582 * I believe filling in too much in the FIFO might cause
583 * errons in 8bit wide transfers on ARM variants (just 8 words
584 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
585 *
586 * FIXME: currently we have no logic to account for this.
587 * perhaps there is even something broken in HW regarding
588 * 8bit transfers (it doesn't fail on 16bit) so this needs
589 * more investigation...
590 */
591 dev_dbg(&pl022->adev->dev,
592 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
593 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
594
595 /* Read as much as you can */
596 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
597 && (pl022->rx < pl022->rx_end)) {
598 switch (pl022->read) {
599 case READING_NULL:
600 readw(SSP_DR(pl022->virtbase));
601 break;
602 case READING_U8:
603 *(u8 *) (pl022->rx) =
604 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
605 break;
606 case READING_U16:
607 *(u16 *) (pl022->rx) =
608 (u16) readw(SSP_DR(pl022->virtbase));
609 break;
610 case READING_U32:
611 *(u32 *) (pl022->rx) =
612 readl(SSP_DR(pl022->virtbase));
613 break;
614 }
615 pl022->rx += (pl022->cur_chip->n_bytes);
616 }
617 /*
618 * Write as much as you can, while keeping an eye on the RX FIFO!
619 */
620 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
621 && (pl022->tx < pl022->tx_end)) {
622 switch (pl022->write) {
623 case WRITING_NULL:
624 writew(0x0, SSP_DR(pl022->virtbase));
625 break;
626 case WRITING_U8:
627 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
628 break;
629 case WRITING_U16:
630 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
631 break;
632 case WRITING_U32:
633 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
634 break;
635 }
636 pl022->tx += (pl022->cur_chip->n_bytes);
637 /*
638 * This inner reader takes care of things appearing in the RX
639 * FIFO as we're transmitting. This will happen a lot since the
640 * clock starts running when you put things into the TX FIFO,
641 * and then things are continously clocked into the RX FIFO.
642 */
643 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
644 && (pl022->rx < pl022->rx_end)) {
645 switch (pl022->read) {
646 case READING_NULL:
647 readw(SSP_DR(pl022->virtbase));
648 break;
649 case READING_U8:
650 *(u8 *) (pl022->rx) =
651 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
652 break;
653 case READING_U16:
654 *(u16 *) (pl022->rx) =
655 (u16) readw(SSP_DR(pl022->virtbase));
656 break;
657 case READING_U32:
658 *(u32 *) (pl022->rx) =
659 readl(SSP_DR(pl022->virtbase));
660 break;
661 }
662 pl022->rx += (pl022->cur_chip->n_bytes);
663 }
664 }
665 /*
666 * When we exit here the TX FIFO should be full and the RX FIFO
667 * should be empty
668 */
669}
670
671
672/**
673 * next_transfer - Move to the Next transfer in the current spi message
674 * @pl022: SSP driver private data structure
675 *
676 * This function moves though the linked list of spi transfers in the
677 * current spi message and returns with the state of current spi
678 * message i.e whether its last transfer is done(STATE_DONE) or
679 * Next transfer is ready(STATE_RUNNING)
680 */
681static void *next_transfer(struct pl022 *pl022)
682{
683 struct spi_message *msg = pl022->cur_msg;
684 struct spi_transfer *trans = pl022->cur_transfer;
685
686 /* Move to next transfer */
687 if (trans->transfer_list.next != &msg->transfers) {
688 pl022->cur_transfer =
689 list_entry(trans->transfer_list.next,
690 struct spi_transfer, transfer_list);
691 return STATE_RUNNING;
692 }
693 return STATE_DONE;
694}
695/**
696 * pl022_interrupt_handler - Interrupt handler for SSP controller
697 *
698 * This function handles interrupts generated for an interrupt based transfer.
699 * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
700 * current message's state as STATE_ERROR and schedule the tasklet
701 * pump_transfers which will do the postprocessing of the current message by
702 * calling giveback(). Otherwise it reads data from RX FIFO till there is no
703 * more data, and writes data in TX FIFO till it is not full. If we complete
704 * the transfer we move to the next transfer and schedule the tasklet.
705 */
706static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
707{
708 struct pl022 *pl022 = dev_id;
709 struct spi_message *msg = pl022->cur_msg;
710 u16 irq_status = 0;
711 u16 flag = 0;
712
713 if (unlikely(!msg)) {
714 dev_err(&pl022->adev->dev,
715 "bad message state in interrupt handler");
716 /* Never fail */
717 return IRQ_HANDLED;
718 }
719
720 /* Read the Interrupt Status Register */
721 irq_status = readw(SSP_MIS(pl022->virtbase));
722
723 if (unlikely(!irq_status))
724 return IRQ_NONE;
725
726 /* This handles the error code interrupts */
727 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
728 /*
729 * Overrun interrupt - bail out since our Data has been
730 * corrupted
731 */
732 dev_err(&pl022->adev->dev,
733 "FIFO overrun\n");
734 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
735 dev_err(&pl022->adev->dev,
736 "RXFIFO is full\n");
737 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
738 dev_err(&pl022->adev->dev,
739 "TXFIFO is full\n");
740
741 /*
742 * Disable and clear interrupts, disable SSP,
743 * mark message with bad status so it can be
744 * retried.
745 */
746 writew(DISABLE_ALL_INTERRUPTS,
747 SSP_IMSC(pl022->virtbase));
748 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
749 writew((readw(SSP_CR1(pl022->virtbase)) &
750 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
751 msg->state = STATE_ERROR;
752
753 /* Schedule message queue handler */
754 tasklet_schedule(&pl022->pump_transfers);
755 return IRQ_HANDLED;
756 }
757
758 readwriter(pl022);
759
760 if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
761 flag = 1;
762 /* Disable Transmit interrupt */
763 writew(readw(SSP_IMSC(pl022->virtbase)) &
764 (~SSP_IMSC_MASK_TXIM),
765 SSP_IMSC(pl022->virtbase));
766 }
767
768 /*
769 * Since all transactions must write as much as shall be read,
770 * we can conclude the entire transaction once RX is complete.
771 * At this point, all TX will always be finished.
772 */
773 if (pl022->rx >= pl022->rx_end) {
774 writew(DISABLE_ALL_INTERRUPTS,
775 SSP_IMSC(pl022->virtbase));
776 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
777 if (unlikely(pl022->rx > pl022->rx_end)) {
778 dev_warn(&pl022->adev->dev, "read %u surplus "
779 "bytes (did you request an odd "
780 "number of bytes on a 16bit bus?)\n",
781 (u32) (pl022->rx - pl022->rx_end));
782 }
783 /* Update total bytes transfered */
784 msg->actual_length += pl022->cur_transfer->len;
785 if (pl022->cur_transfer->cs_change)
786 pl022->cur_chip->
787 cs_control(SSP_CHIP_DESELECT);
788 /* Move to next transfer */
789 msg->state = next_transfer(pl022);
790 tasklet_schedule(&pl022->pump_transfers);
791 return IRQ_HANDLED;
792 }
793
794 return IRQ_HANDLED;
795}
796
797/**
798 * This sets up the pointers to memory for the next message to
799 * send out on the SPI bus.
800 */
801static int set_up_next_transfer(struct pl022 *pl022,
802 struct spi_transfer *transfer)
803{
804 int residue;
805
806 /* Sanity check the message for this bus width */
807 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
808 if (unlikely(residue != 0)) {
809 dev_err(&pl022->adev->dev,
810 "message of %u bytes to transmit but the current "
811 "chip bus has a data width of %u bytes!\n",
812 pl022->cur_transfer->len,
813 pl022->cur_chip->n_bytes);
814 dev_err(&pl022->adev->dev, "skipping this message\n");
815 return -EIO;
816 }
817 pl022->tx = (void *)transfer->tx_buf;
818 pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
819 pl022->rx = (void *)transfer->rx_buf;
820 pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
821 pl022->write =
822 pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
823 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
824 return 0;
825}
826
827/**
828 * pump_transfers - Tasklet function which schedules next interrupt transfer
829 * when running in interrupt transfer mode.
830 * @data: SSP driver private data structure
831 *
832 */
833static void pump_transfers(unsigned long data)
834{
835 struct pl022 *pl022 = (struct pl022 *) data;
836 struct spi_message *message = NULL;
837 struct spi_transfer *transfer = NULL;
838 struct spi_transfer *previous = NULL;
839
840 /* Get current state information */
841 message = pl022->cur_msg;
842 transfer = pl022->cur_transfer;
843
844 /* Handle for abort */
845 if (message->state == STATE_ERROR) {
846 message->status = -EIO;
847 giveback(pl022);
848 return;
849 }
850
851 /* Handle end of message */
852 if (message->state == STATE_DONE) {
853 message->status = 0;
854 giveback(pl022);
855 return;
856 }
857
858 /* Delay if requested at end of transfer before CS change */
859 if (message->state == STATE_RUNNING) {
860 previous = list_entry(transfer->transfer_list.prev,
861 struct spi_transfer,
862 transfer_list);
863 if (previous->delay_usecs)
864 /*
865 * FIXME: This runs in interrupt context.
866 * Is this really smart?
867 */
868 udelay(previous->delay_usecs);
869
870 /* Drop chip select only if cs_change is requested */
871 if (previous->cs_change)
872 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
873 } else {
874 /* STATE_START */
875 message->state = STATE_RUNNING;
876 }
877
878 if (set_up_next_transfer(pl022, transfer)) {
879 message->state = STATE_ERROR;
880 message->status = -EIO;
881 giveback(pl022);
882 return;
883 }
884 /* Flush the FIFOs and let's go! */
885 flush(pl022);
886 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
887}
888
889/**
890 * NOT IMPLEMENTED
891 * configure_dma - It configures the DMA pipes for DMA transfers
892 * @data: SSP driver's private data structure
893 *
894 */
895static int configure_dma(void *data)
896{
897 struct pl022 *pl022 = data;
898 dev_dbg(&pl022->adev->dev, "configure DMA\n");
899 return -ENOTSUPP;
900}
901
902/**
903 * do_dma_transfer - It handles transfers of the current message
904 * if it is DMA xfer.
905 * NOT FULLY IMPLEMENTED
906 * @data: SSP driver's private data structure
907 */
908static void do_dma_transfer(void *data)
909{
910 struct pl022 *pl022 = data;
911
912 if (configure_dma(data)) {
913 dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
914 goto err_config_dma;
915 }
916
917 /* TODO: Implememt DMA setup of pipes here */
918
919 /* Enable target chip, set up transfer */
920 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
921 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
922 /* Error path */
923 pl022->cur_msg->state = STATE_ERROR;
924 pl022->cur_msg->status = -EIO;
925 giveback(pl022);
926 return;
927 }
928 /* Enable SSP */
929 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
930 SSP_CR1(pl022->virtbase));
931
932 /* TODO: Enable the DMA transfer here */
933 return;
934
935 err_config_dma:
936 pl022->cur_msg->state = STATE_ERROR;
937 pl022->cur_msg->status = -EIO;
938 giveback(pl022);
939 return;
940}
941
942static void do_interrupt_transfer(void *data)
943{
944 struct pl022 *pl022 = data;
945
946 /* Enable target chip */
947 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
948 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
949 /* Error path */
950 pl022->cur_msg->state = STATE_ERROR;
951 pl022->cur_msg->status = -EIO;
952 giveback(pl022);
953 return;
954 }
955 /* Enable SSP, turn on interrupts */
956 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
957 SSP_CR1(pl022->virtbase));
958 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
959}
960
961static void do_polling_transfer(void *data)
962{
963 struct pl022 *pl022 = data;
964 struct spi_message *message = NULL;
965 struct spi_transfer *transfer = NULL;
966 struct spi_transfer *previous = NULL;
967 struct chip_data *chip;
968
969 chip = pl022->cur_chip;
970 message = pl022->cur_msg;
971
972 while (message->state != STATE_DONE) {
973 /* Handle for abort */
974 if (message->state == STATE_ERROR)
975 break;
976 transfer = pl022->cur_transfer;
977
978 /* Delay if requested at end of transfer */
979 if (message->state == STATE_RUNNING) {
980 previous =
981 list_entry(transfer->transfer_list.prev,
982 struct spi_transfer, transfer_list);
983 if (previous->delay_usecs)
984 udelay(previous->delay_usecs);
985 if (previous->cs_change)
986 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
987 } else {
988 /* STATE_START */
989 message->state = STATE_RUNNING;
990 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
991 }
992
993 /* Configuration Changing Per Transfer */
994 if (set_up_next_transfer(pl022, transfer)) {
995 /* Error path */
996 message->state = STATE_ERROR;
997 break;
998 }
999 /* Flush FIFOs and enable SSP */
1000 flush(pl022);
1001 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1002 SSP_CR1(pl022->virtbase));
1003
1004 dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n");
1005 /* FIXME: insert a timeout so we don't hang here indefinately */
1006 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
1007 readwriter(pl022);
1008
1009 /* Update total byte transfered */
1010 message->actual_length += pl022->cur_transfer->len;
1011 if (pl022->cur_transfer->cs_change)
1012 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
1013 /* Move to next transfer */
1014 message->state = next_transfer(pl022);
1015 }
1016
1017 /* Handle end of message */
1018 if (message->state == STATE_DONE)
1019 message->status = 0;
1020 else
1021 message->status = -EIO;
1022
1023 giveback(pl022);
1024 return;
1025}
1026
1027/**
1028 * pump_messages - Workqueue function which processes spi message queue
1029 * @data: pointer to private data of SSP driver
1030 *
1031 * This function checks if there is any spi message in the queue that
1032 * needs processing and delegate control to appropriate function
1033 * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer()
1034 * based on the kind of the transfer
1035 *
1036 */
1037static void pump_messages(struct work_struct *work)
1038{
1039 struct pl022 *pl022 =
1040 container_of(work, struct pl022, pump_messages);
1041 unsigned long flags;
1042
1043 /* Lock queue and check for queue work */
1044 spin_lock_irqsave(&pl022->queue_lock, flags);
1045 if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
1046 pl022->busy = 0;
1047 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1048 return;
1049 }
1050 /* Make sure we are not already running a message */
1051 if (pl022->cur_msg) {
1052 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1053 return;
1054 }
1055 /* Extract head of queue */
1056 pl022->cur_msg =
1057 list_entry(pl022->queue.next, struct spi_message, queue);
1058
1059 list_del_init(&pl022->cur_msg->queue);
1060 pl022->busy = 1;
1061 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1062
1063 /* Initial message state */
1064 pl022->cur_msg->state = STATE_START;
1065 pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
1066 struct spi_transfer,
1067 transfer_list);
1068
1069 /* Setup the SPI using the per chip configuration */
1070 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1071 /*
1072 * We enable the clock here, then the clock will be disabled when
1073 * giveback() is called in each method (poll/interrupt/DMA)
1074 */
1075 clk_enable(pl022->clk);
1076 restore_state(pl022);
1077 flush(pl022);
1078
1079 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1080 do_polling_transfer(pl022);
1081 else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
1082 do_interrupt_transfer(pl022);
1083 else
1084 do_dma_transfer(pl022);
1085}
1086
1087
1088static int __init init_queue(struct pl022 *pl022)
1089{
1090 INIT_LIST_HEAD(&pl022->queue);
1091 spin_lock_init(&pl022->queue_lock);
1092
1093 pl022->run = QUEUE_STOPPED;
1094 pl022->busy = 0;
1095
1096 tasklet_init(&pl022->pump_transfers,
1097 pump_transfers, (unsigned long)pl022);
1098
1099 INIT_WORK(&pl022->pump_messages, pump_messages);
1100 pl022->workqueue = create_singlethread_workqueue(
1101 dev_name(pl022->master->dev.parent));
1102 if (pl022->workqueue == NULL)
1103 return -EBUSY;
1104
1105 return 0;
1106}
1107
1108
1109static int start_queue(struct pl022 *pl022)
1110{
1111 unsigned long flags;
1112
1113 spin_lock_irqsave(&pl022->queue_lock, flags);
1114
1115 if (pl022->run == QUEUE_RUNNING || pl022->busy) {
1116 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1117 return -EBUSY;
1118 }
1119
1120 pl022->run = QUEUE_RUNNING;
1121 pl022->cur_msg = NULL;
1122 pl022->cur_transfer = NULL;
1123 pl022->cur_chip = NULL;
1124 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1125
1126 queue_work(pl022->workqueue, &pl022->pump_messages);
1127
1128 return 0;
1129}
1130
1131
1132static int stop_queue(struct pl022 *pl022)
1133{
1134 unsigned long flags;
1135 unsigned limit = 500;
1136 int status = 0;
1137
1138 spin_lock_irqsave(&pl022->queue_lock, flags);
1139
1140 /* This is a bit lame, but is optimized for the common execution path.
1141 * A wait_queue on the pl022->busy could be used, but then the common
1142 * execution path (pump_messages) would be required to call wake_up or
1143 * friends on every SPI message. Do this instead */
1144 pl022->run = QUEUE_STOPPED;
1145 while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
1146 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1147 msleep(10);
1148 spin_lock_irqsave(&pl022->queue_lock, flags);
1149 }
1150
1151 if (!list_empty(&pl022->queue) || pl022->busy)
1152 status = -EBUSY;
1153
1154 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1155
1156 return status;
1157}
1158
1159static int destroy_queue(struct pl022 *pl022)
1160{
1161 int status;
1162
1163 status = stop_queue(pl022);
1164 /* we are unloading the module or failing to load (only two calls
1165 * to this routine), and neither call can handle a return value.
1166 * However, destroy_workqueue calls flush_workqueue, and that will
1167 * block until all work is done. If the reason that stop_queue
1168 * timed out is that the work will never finish, then it does no
1169 * good to call destroy_workqueue, so return anyway. */
1170 if (status != 0)
1171 return status;
1172
1173 destroy_workqueue(pl022->workqueue);
1174
1175 return 0;
1176}
1177
1178static int verify_controller_parameters(struct pl022 *pl022,
1179 struct pl022_config_chip *chip_info)
1180{
1181 if ((chip_info->lbm != LOOPBACK_ENABLED)
1182 && (chip_info->lbm != LOOPBACK_DISABLED)) {
1183 dev_err(chip_info->dev,
1184 "loopback Mode is configured incorrectly\n");
1185 return -EINVAL;
1186 }
1187 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1188 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1189 dev_err(chip_info->dev,
1190 "interface is configured incorrectly\n");
1191 return -EINVAL;
1192 }
1193 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1194 (!pl022->vendor->unidir)) {
1195 dev_err(chip_info->dev,
1196 "unidirectional mode not supported in this "
1197 "hardware version\n");
1198 return -EINVAL;
1199 }
1200 if ((chip_info->hierarchy != SSP_MASTER)
1201 && (chip_info->hierarchy != SSP_SLAVE)) {
1202 dev_err(chip_info->dev,
1203 "hierarchy is configured incorrectly\n");
1204 return -EINVAL;
1205 }
1206 if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
1207 || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
1208 dev_err(chip_info->dev,
1209 "cpsdvsr is configured incorrectly\n");
1210 return -EINVAL;
1211 }
1212 if ((chip_info->endian_rx != SSP_RX_MSB)
1213 && (chip_info->endian_rx != SSP_RX_LSB)) {
1214 dev_err(chip_info->dev,
1215 "RX FIFO endianess is configured incorrectly\n");
1216 return -EINVAL;
1217 }
1218 if ((chip_info->endian_tx != SSP_TX_MSB)
1219 && (chip_info->endian_tx != SSP_TX_LSB)) {
1220 dev_err(chip_info->dev,
1221 "TX FIFO endianess is configured incorrectly\n");
1222 return -EINVAL;
1223 }
1224 if ((chip_info->data_size < SSP_DATA_BITS_4)
1225 || (chip_info->data_size > SSP_DATA_BITS_32)) {
1226 dev_err(chip_info->dev,
1227 "DATA Size is configured incorrectly\n");
1228 return -EINVAL;
1229 }
1230 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1231 && (chip_info->com_mode != DMA_TRANSFER)
1232 && (chip_info->com_mode != POLLING_TRANSFER)) {
1233 dev_err(chip_info->dev,
1234 "Communication mode is configured incorrectly\n");
1235 return -EINVAL;
1236 }
1237 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
1238 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
1239 dev_err(chip_info->dev,
1240 "RX FIFO Trigger Level is configured incorrectly\n");
1241 return -EINVAL;
1242 }
1243 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
1244 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
1245 dev_err(chip_info->dev,
1246 "TX FIFO Trigger Level is configured incorrectly\n");
1247 return -EINVAL;
1248 }
1249 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
Linus Walleijee2b8052009-08-15 15:12:05 +01001250 if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
1251 && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
Linus Walleijb43d65f2009-06-09 08:11:42 +01001252 dev_err(chip_info->dev,
1253 "Clock Phase is configured incorrectly\n");
1254 return -EINVAL;
1255 }
1256 if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
1257 && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
1258 dev_err(chip_info->dev,
1259 "Clock Polarity is configured incorrectly\n");
1260 return -EINVAL;
1261 }
1262 }
1263 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1264 if ((chip_info->ctrl_len < SSP_BITS_4)
1265 || (chip_info->ctrl_len > SSP_BITS_32)) {
1266 dev_err(chip_info->dev,
1267 "CTRL LEN is configured incorrectly\n");
1268 return -EINVAL;
1269 }
1270 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1271 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1272 dev_err(chip_info->dev,
1273 "Wait State is configured incorrectly\n");
1274 return -EINVAL;
1275 }
1276 if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1277 && (chip_info->duplex !=
1278 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1279 dev_err(chip_info->dev,
1280 "DUPLEX is configured incorrectly\n");
1281 return -EINVAL;
1282 }
1283 }
1284 if (chip_info->cs_control == NULL) {
1285 dev_warn(chip_info->dev,
1286 "Chip Select Function is NULL for this chip\n");
1287 chip_info->cs_control = null_cs_control;
1288 }
1289 return 0;
1290}
1291
1292/**
1293 * pl022_transfer - transfer function registered to SPI master framework
1294 * @spi: spi device which is requesting transfer
1295 * @msg: spi message which is to handled is queued to driver queue
1296 *
1297 * This function is registered to the SPI framework for this SPI master
1298 * controller. It will queue the spi_message in the queue of driver if
1299 * the queue is not stopped and return.
1300 */
1301static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1302{
1303 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1304 unsigned long flags;
1305
1306 spin_lock_irqsave(&pl022->queue_lock, flags);
1307
1308 if (pl022->run == QUEUE_STOPPED) {
1309 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1310 return -ESHUTDOWN;
1311 }
1312 msg->actual_length = 0;
1313 msg->status = -EINPROGRESS;
1314 msg->state = STATE_START;
1315
1316 list_add_tail(&msg->queue, &pl022->queue);
1317 if (pl022->run == QUEUE_RUNNING && !pl022->busy)
1318 queue_work(pl022->workqueue, &pl022->pump_messages);
1319
1320 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1321 return 0;
1322}
1323
1324static int calculate_effective_freq(struct pl022 *pl022,
1325 int freq,
1326 struct ssp_clock_params *clk_freq)
1327{
1328 /* Lets calculate the frequency parameters */
1329 u16 cpsdvsr = 2;
1330 u16 scr = 0;
1331 bool freq_found = false;
1332 u32 rate;
1333 u32 max_tclk;
1334 u32 min_tclk;
1335
1336 rate = clk_get_rate(pl022->clk);
1337 /* cpsdvscr = 2 & scr 0 */
1338 max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
1339 /* cpsdvsr = 254 & scr = 255 */
1340 min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
1341
1342 if ((freq <= max_tclk) && (freq >= min_tclk)) {
1343 while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
1344 while (scr <= SCR_MAX && !freq_found) {
1345 if ((rate /
1346 (cpsdvsr * (1 + scr))) > freq)
1347 scr += 1;
1348 else {
1349 /*
1350 * This bool is made true when
1351 * effective frequency >=
1352 * target frequency is found
1353 */
1354 freq_found = true;
1355 if ((rate /
1356 (cpsdvsr * (1 + scr))) != freq) {
1357 if (scr == SCR_MIN) {
1358 cpsdvsr -= 2;
1359 scr = SCR_MAX;
1360 } else
1361 scr -= 1;
1362 }
1363 }
1364 }
1365 if (!freq_found) {
1366 cpsdvsr += 2;
1367 scr = SCR_MIN;
1368 }
1369 }
1370 if (cpsdvsr != 0) {
1371 dev_dbg(&pl022->adev->dev,
1372 "SSP Effective Frequency is %u\n",
1373 (rate / (cpsdvsr * (1 + scr))));
1374 clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
1375 clk_freq->scr = (u8) (scr & 0xFF);
1376 dev_dbg(&pl022->adev->dev,
1377 "SSP cpsdvsr = %d, scr = %d\n",
1378 clk_freq->cpsdvsr, clk_freq->scr);
1379 }
1380 } else {
1381 dev_err(&pl022->adev->dev,
1382 "controller data is incorrect: out of range frequency");
1383 return -EINVAL;
1384 }
1385 return 0;
1386}
1387
1388/**
1389 * NOT IMPLEMENTED
1390 * process_dma_info - Processes the DMA info provided by client drivers
1391 * @chip_info: chip info provided by client device
1392 * @chip: Runtime state maintained by the SSP controller for each spi device
1393 *
1394 * This function processes and stores DMA config provided by client driver
1395 * into the runtime state maintained by the SSP controller driver
1396 */
1397static int process_dma_info(struct pl022_config_chip *chip_info,
1398 struct chip_data *chip)
1399{
1400 dev_err(chip_info->dev,
1401 "cannot process DMA info, DMA not implemented!\n");
1402 return -ENOTSUPP;
1403}
1404
1405/**
1406 * pl022_setup - setup function registered to SPI master framework
1407 * @spi: spi device which is requesting setup
1408 *
1409 * This function is registered to the SPI framework for this SPI master
1410 * controller. If it is the first time when setup is called by this device,
1411 * this function will initialize the runtime state for this chip and save
1412 * the same in the device structure. Else it will update the runtime info
1413 * with the updated chip info. Nothing is really being written to the
1414 * controller hardware here, that is not done until the actual transfer
1415 * commence.
1416 */
1417
1418/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */
1419#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1420 | SPI_LSB_FIRST | SPI_LOOP)
1421
1422static int pl022_setup(struct spi_device *spi)
1423{
1424 struct pl022_config_chip *chip_info;
1425 struct chip_data *chip;
1426 int status = 0;
1427 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1428
1429 if (spi->mode & ~MODEBITS) {
1430 dev_dbg(&spi->dev, "unsupported mode bits %x\n",
1431 spi->mode & ~MODEBITS);
1432 return -EINVAL;
1433 }
1434
1435 if (!spi->max_speed_hz)
1436 return -EINVAL;
1437
1438 /* Get controller_state if one is supplied */
1439 chip = spi_get_ctldata(spi);
1440
1441 if (chip == NULL) {
1442 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1443 if (!chip) {
1444 dev_err(&spi->dev,
1445 "cannot allocate controller state\n");
1446 return -ENOMEM;
1447 }
1448 dev_dbg(&spi->dev,
1449 "allocated memory for controller's runtime state\n");
1450 }
1451
1452 /* Get controller data if one is supplied */
1453 chip_info = spi->controller_data;
1454
1455 if (chip_info == NULL) {
1456 /* spi_board_info.controller_data not is supplied */
1457 dev_dbg(&spi->dev,
1458 "using default controller_data settings\n");
1459
1460 chip_info =
1461 kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
1462
1463 if (!chip_info) {
1464 dev_err(&spi->dev,
1465 "cannot allocate controller data\n");
1466 status = -ENOMEM;
1467 goto err_first_setup;
1468 }
1469
1470 dev_dbg(&spi->dev, "allocated memory for controller data\n");
1471
1472 /* Pointer back to the SPI device */
1473 chip_info->dev = &spi->dev;
1474 /*
1475 * Set controller data default values:
1476 * Polling is supported by default
1477 */
1478 chip_info->lbm = LOOPBACK_DISABLED;
1479 chip_info->com_mode = POLLING_TRANSFER;
1480 chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
1481 chip_info->hierarchy = SSP_SLAVE;
1482 chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
1483 chip_info->endian_tx = SSP_TX_LSB;
1484 chip_info->endian_rx = SSP_RX_LSB;
1485 chip_info->data_size = SSP_DATA_BITS_12;
1486 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
1487 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
Linus Walleijee2b8052009-08-15 15:12:05 +01001488 chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
Linus Walleijb43d65f2009-06-09 08:11:42 +01001489 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
1490 chip_info->ctrl_len = SSP_BITS_8;
1491 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
1492 chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
1493 chip_info->cs_control = null_cs_control;
1494 } else {
1495 dev_dbg(&spi->dev,
1496 "using user supplied controller_data settings\n");
1497 }
1498
1499 /*
1500 * We can override with custom divisors, else we use the board
1501 * frequency setting
1502 */
1503 if ((0 == chip_info->clk_freq.cpsdvsr)
1504 && (0 == chip_info->clk_freq.scr)) {
1505 status = calculate_effective_freq(pl022,
1506 spi->max_speed_hz,
1507 &chip_info->clk_freq);
1508 if (status < 0)
1509 goto err_config_params;
1510 } else {
1511 if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
1512 chip_info->clk_freq.cpsdvsr =
1513 chip_info->clk_freq.cpsdvsr - 1;
1514 }
1515 status = verify_controller_parameters(pl022, chip_info);
1516 if (status) {
1517 dev_err(&spi->dev, "controller data is incorrect");
1518 goto err_config_params;
1519 }
1520 /* Now set controller state based on controller data */
1521 chip->xfer_type = chip_info->com_mode;
1522 chip->cs_control = chip_info->cs_control;
1523
1524 if (chip_info->data_size <= 8) {
1525 dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
1526 chip->n_bytes = 1;
1527 chip->read = READING_U8;
1528 chip->write = WRITING_U8;
1529 } else if (chip_info->data_size <= 16) {
1530 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1531 chip->n_bytes = 2;
1532 chip->read = READING_U16;
1533 chip->write = WRITING_U16;
1534 } else {
1535 if (pl022->vendor->max_bpw >= 32) {
1536 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1537 chip->n_bytes = 4;
1538 chip->read = READING_U32;
1539 chip->write = WRITING_U32;
1540 } else {
1541 dev_err(&spi->dev,
1542 "illegal data size for this controller!\n");
1543 dev_err(&spi->dev,
1544 "a standard pl022 can only handle "
1545 "1 <= n <= 16 bit words\n");
1546 goto err_config_params;
1547 }
1548 }
1549
1550 /* Now Initialize all register settings required for this chip */
1551 chip->cr0 = 0;
1552 chip->cr1 = 0;
1553 chip->dmacr = 0;
1554 chip->cpsr = 0;
1555 if ((chip_info->com_mode == DMA_TRANSFER)
1556 && ((pl022->master_info)->enable_dma)) {
1557 chip->enable_dma = 1;
1558 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1559 status = process_dma_info(chip_info, chip);
1560 if (status < 0)
1561 goto err_config_params;
1562 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1563 SSP_DMACR_MASK_RXDMAE, 0);
1564 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1565 SSP_DMACR_MASK_TXDMAE, 1);
1566 } else {
1567 chip->enable_dma = 0;
1568 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1569 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1570 SSP_DMACR_MASK_RXDMAE, 0);
1571 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1572 SSP_DMACR_MASK_TXDMAE, 1);
1573 }
1574
1575 chip->cpsr = chip_info->clk_freq.cpsdvsr;
1576
1577 SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0);
1578 SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5);
1579 SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
1580 SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
1581 SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
1582 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16);
1583 SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21);
1584 SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
1585 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
1586 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
1587 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
1588 SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
1589 SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
1590 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
1591 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
1592 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
1593
1594 /* Save controller_state */
1595 spi_set_ctldata(spi, chip);
1596 return status;
1597 err_config_params:
1598 err_first_setup:
1599 kfree(chip);
1600 return status;
1601}
1602
1603/**
1604 * pl022_cleanup - cleanup function registered to SPI master framework
1605 * @spi: spi device which is requesting cleanup
1606 *
1607 * This function is registered to the SPI framework for this SPI master
1608 * controller. It will free the runtime state of chip.
1609 */
1610static void pl022_cleanup(struct spi_device *spi)
1611{
1612 struct chip_data *chip = spi_get_ctldata(spi);
1613
1614 spi_set_ctldata(spi, NULL);
1615 kfree(chip);
1616}
1617
1618
1619static int __init
1620pl022_probe(struct amba_device *adev, struct amba_id *id)
1621{
1622 struct device *dev = &adev->dev;
1623 struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
1624 struct spi_master *master;
1625 struct pl022 *pl022 = NULL; /*Data for this driver */
1626 int status = 0;
1627
1628 dev_info(&adev->dev,
1629 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
1630 if (platform_info == NULL) {
1631 dev_err(&adev->dev, "probe - no platform data supplied\n");
1632 status = -ENODEV;
1633 goto err_no_pdata;
1634 }
1635
1636 /* Allocate master with space for data */
1637 master = spi_alloc_master(dev, sizeof(struct pl022));
1638 if (master == NULL) {
1639 dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
1640 status = -ENOMEM;
1641 goto err_no_master;
1642 }
1643
1644 pl022 = spi_master_get_devdata(master);
1645 pl022->master = master;
1646 pl022->master_info = platform_info;
1647 pl022->adev = adev;
1648 pl022->vendor = id->data;
1649
1650 /*
1651 * Bus Number Which has been Assigned to this SSP controller
1652 * on this board
1653 */
1654 master->bus_num = platform_info->bus_id;
1655 master->num_chipselect = platform_info->num_chipselect;
1656 master->cleanup = pl022_cleanup;
1657 master->setup = pl022_setup;
1658 master->transfer = pl022_transfer;
1659
1660 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
1661
1662 status = amba_request_regions(adev, NULL);
1663 if (status)
1664 goto err_no_ioregion;
1665
1666 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
1667 if (pl022->virtbase == NULL) {
1668 status = -ENOMEM;
1669 goto err_no_ioremap;
1670 }
1671 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
1672 adev->res.start, pl022->virtbase);
1673
1674 pl022->clk = clk_get(&adev->dev, NULL);
1675 if (IS_ERR(pl022->clk)) {
1676 status = PTR_ERR(pl022->clk);
1677 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
1678 goto err_no_clk;
1679 }
1680
1681 /* Disable SSP */
1682 clk_enable(pl022->clk);
1683 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
1684 SSP_CR1(pl022->virtbase));
1685 load_ssp_default_config(pl022);
1686 clk_disable(pl022->clk);
1687
1688 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
1689 pl022);
1690 if (status < 0) {
1691 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
1692 goto err_no_irq;
1693 }
1694 /* Initialize and start queue */
1695 status = init_queue(pl022);
1696 if (status != 0) {
1697 dev_err(&adev->dev, "probe - problem initializing queue\n");
1698 goto err_init_queue;
1699 }
1700 status = start_queue(pl022);
1701 if (status != 0) {
1702 dev_err(&adev->dev, "probe - problem starting queue\n");
1703 goto err_start_queue;
1704 }
1705 /* Register with the SPI framework */
1706 amba_set_drvdata(adev, pl022);
1707 status = spi_register_master(master);
1708 if (status != 0) {
1709 dev_err(&adev->dev,
1710 "probe - problem registering spi master\n");
1711 goto err_spi_register;
1712 }
1713 dev_dbg(dev, "probe succeded\n");
1714 return 0;
1715
1716 err_spi_register:
1717 err_start_queue:
1718 err_init_queue:
1719 destroy_queue(pl022);
1720 free_irq(adev->irq[0], pl022);
1721 err_no_irq:
1722 clk_put(pl022->clk);
1723 err_no_clk:
1724 iounmap(pl022->virtbase);
1725 err_no_ioremap:
1726 amba_release_regions(adev);
1727 err_no_ioregion:
1728 spi_master_put(master);
1729 err_no_master:
1730 err_no_pdata:
1731 return status;
1732}
1733
1734static int __exit
1735pl022_remove(struct amba_device *adev)
1736{
1737 struct pl022 *pl022 = amba_get_drvdata(adev);
1738 int status = 0;
1739 if (!pl022)
1740 return 0;
1741
1742 /* Remove the queue */
1743 status = destroy_queue(pl022);
1744 if (status != 0) {
1745 dev_err(&adev->dev,
1746 "queue remove failed (%d)\n", status);
1747 return status;
1748 }
1749 load_ssp_default_config(pl022);
1750 free_irq(adev->irq[0], pl022);
1751 clk_disable(pl022->clk);
1752 clk_put(pl022->clk);
1753 iounmap(pl022->virtbase);
1754 amba_release_regions(adev);
1755 tasklet_disable(&pl022->pump_transfers);
1756 spi_unregister_master(pl022->master);
1757 spi_master_put(pl022->master);
1758 amba_set_drvdata(adev, NULL);
1759 dev_dbg(&adev->dev, "remove succeded\n");
1760 return 0;
1761}
1762
1763#ifdef CONFIG_PM
1764static int pl022_suspend(struct amba_device *adev, pm_message_t state)
1765{
1766 struct pl022 *pl022 = amba_get_drvdata(adev);
1767 int status = 0;
1768
1769 status = stop_queue(pl022);
1770 if (status) {
1771 dev_warn(&adev->dev, "suspend cannot stop queue\n");
1772 return status;
1773 }
1774
1775 clk_enable(pl022->clk);
1776 load_ssp_default_config(pl022);
1777 clk_disable(pl022->clk);
1778 dev_dbg(&adev->dev, "suspended\n");
1779 return 0;
1780}
1781
1782static int pl022_resume(struct amba_device *adev)
1783{
1784 struct pl022 *pl022 = amba_get_drvdata(adev);
1785 int status = 0;
1786
1787 /* Start the queue running */
1788 status = start_queue(pl022);
1789 if (status)
1790 dev_err(&adev->dev, "problem starting queue (%d)\n", status);
1791 else
1792 dev_dbg(&adev->dev, "resumed\n");
1793
1794 return status;
1795}
1796#else
1797#define pl022_suspend NULL
1798#define pl022_resume NULL
1799#endif /* CONFIG_PM */
1800
1801static struct vendor_data vendor_arm = {
1802 .fifodepth = 8,
1803 .max_bpw = 16,
1804 .unidir = false,
1805};
1806
1807
1808static struct vendor_data vendor_st = {
1809 .fifodepth = 32,
1810 .max_bpw = 32,
1811 .unidir = false,
1812};
1813
1814static struct amba_id pl022_ids[] = {
1815 {
1816 /*
1817 * ARM PL022 variant, this has a 16bit wide
1818 * and 8 locations deep TX/RX FIFO
1819 */
1820 .id = 0x00041022,
1821 .mask = 0x000fffff,
1822 .data = &vendor_arm,
1823 },
1824 {
1825 /*
1826 * ST Micro derivative, this has 32bit wide
1827 * and 32 locations deep TX/RX FIFO
1828 */
Srinidhi Kasagare89e04f2009-10-05 06:13:53 +01001829 .id = 0x01080022,
Linus Walleijb43d65f2009-06-09 08:11:42 +01001830 .mask = 0xffffffff,
1831 .data = &vendor_st,
1832 },
1833 { 0, 0 },
1834};
1835
1836static struct amba_driver pl022_driver = {
1837 .drv = {
1838 .name = "ssp-pl022",
1839 },
1840 .id_table = pl022_ids,
1841 .probe = pl022_probe,
1842 .remove = __exit_p(pl022_remove),
1843 .suspend = pl022_suspend,
1844 .resume = pl022_resume,
1845};
1846
1847
1848static int __init pl022_init(void)
1849{
1850 return amba_driver_register(&pl022_driver);
1851}
1852
1853module_init(pl022_init);
1854
1855static void __exit pl022_exit(void)
1856{
1857 amba_driver_unregister(&pl022_driver);
1858}
1859
1860module_exit(pl022_exit);
1861
1862MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
1863MODULE_DESCRIPTION("PL022 SSP Controller Driver");
1864MODULE_LICENSE("GPL");