blob: faeddf119fd4247ea4ad5b0c87c611b466ef58a1 [file] [log] [blame]
Manuel Lauss9bdcf332009-10-04 14:55:24 +02001/*
2 * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
3 *
4 * All Alchemy development boards (except, of course, the weird PB1000)
5 * have a few registers in a CPLD with standardised layout; they mostly
6 * only differ in base address.
7 * All registers are 16bits wide with 32bit spacing.
8 */
9
Manuel Lauss95a43792009-10-04 14:55:25 +020010#include <linux/interrupt.h>
Thomas Gleixnere0288a0a2015-07-13 20:46:04 +000011#include <linux/irqchip/chained_irq.h>
Manuel Lauss9bdcf332009-10-04 14:55:24 +020012#include <linux/module.h>
13#include <linux/spinlock.h>
David Howellsca4d3e672010-10-07 14:08:54 +010014#include <linux/irq.h>
Manuel Lauss9bdcf332009-10-04 14:55:24 +020015#include <asm/addrspace.h>
16#include <asm/io.h>
17#include <asm/mach-db1x00/bcsr.h>
18
19static struct bcsr_reg {
20 void __iomem *raddr;
21 spinlock_t lock;
22} bcsr_regs[BCSR_CNT];
23
Ralf Baechle70342282013-01-22 12:59:30 +010024static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
Manuel Lauss95a43792009-10-04 14:55:25 +020025static int bcsr_csc_base; /* linux-irq of first cascaded irq */
26
Manuel Lauss9bdcf332009-10-04 14:55:24 +020027void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
28{
29 int i;
30
31 bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys));
32 bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys));
33
Manuel Lauss95a43792009-10-04 14:55:25 +020034 bcsr_virt = (void __iomem *)bcsr1_phys;
35
Manuel Lauss9bdcf332009-10-04 14:55:24 +020036 for (i = 0; i < BCSR_CNT; i++) {
37 if (i >= BCSR_HEXLEDS)
38 bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys +
39 (0x04 * (i - BCSR_HEXLEDS));
40 else
41 bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys +
42 (0x04 * i);
43
44 spin_lock_init(&bcsr_regs[i].lock);
45 }
46}
47
48unsigned short bcsr_read(enum bcsr_id reg)
49{
50 unsigned short r;
51 unsigned long flags;
52
53 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
54 r = __raw_readw(bcsr_regs[reg].raddr);
55 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
56 return r;
57}
58EXPORT_SYMBOL_GPL(bcsr_read);
59
60void bcsr_write(enum bcsr_id reg, unsigned short val)
61{
62 unsigned long flags;
63
64 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
65 __raw_writew(val, bcsr_regs[reg].raddr);
66 wmb();
67 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
68}
69EXPORT_SYMBOL_GPL(bcsr_write);
70
71void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set)
72{
73 unsigned short r;
74 unsigned long flags;
75
76 spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
77 r = __raw_readw(bcsr_regs[reg].raddr);
78 r &= ~clr;
79 r |= set;
80 __raw_writew(r, bcsr_regs[reg].raddr);
81 wmb();
82 spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
83}
84EXPORT_SYMBOL_GPL(bcsr_mod);
Manuel Lauss95a43792009-10-04 14:55:25 +020085
86/*
87 * DB1200/PB1200 CPLD IRQ muxer
88 */
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +020089static void bcsr_csc_handler(struct irq_desc *d)
Manuel Lauss95a43792009-10-04 14:55:25 +020090{
91 unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
Thomas Gleixnere0288a0a2015-07-13 20:46:04 +000092 struct irq_chip *chip = irq_desc_get_chip(d);
Manuel Lauss95a43792009-10-04 14:55:25 +020093
Thomas Gleixnere0288a0a2015-07-13 20:46:04 +000094 chained_irq_enter(chip, d);
Manuel Lauss6c2be5c2012-01-21 18:13:15 +010095 generic_handle_irq(bcsr_csc_base + __ffs(bisr));
Thomas Gleixnere0288a0a2015-07-13 20:46:04 +000096 chained_irq_exit(chip, d);
Manuel Lauss95a43792009-10-04 14:55:25 +020097}
98
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +000099static void bcsr_irq_mask(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200100{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000101 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200102 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
103 wmb();
104}
105
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000106static void bcsr_irq_maskack(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200107{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000108 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200109 __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
110 __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
111 wmb();
112}
113
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000114static void bcsr_irq_unmask(struct irq_data *d)
Manuel Lauss95a43792009-10-04 14:55:25 +0200115{
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000116 unsigned short v = 1 << (d->irq - bcsr_csc_base);
Manuel Lauss95a43792009-10-04 14:55:25 +0200117 __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
118 wmb();
119}
120
121static struct irq_chip bcsr_irq_type = {
122 .name = "CPLD",
Thomas Gleixnerd24c1a22011-03-23 21:08:44 +0000123 .irq_mask = bcsr_irq_mask,
124 .irq_mask_ack = bcsr_irq_maskack,
125 .irq_unmask = bcsr_irq_unmask,
Manuel Lauss95a43792009-10-04 14:55:25 +0200126};
127
128void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
129{
130 unsigned int irq;
131
Manuel Laussfb469f02011-11-01 20:03:29 +0100132 /* mask & enable & ack all */
Manuel Lauss95a43792009-10-04 14:55:25 +0200133 __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR);
Manuel Laussfb469f02011-11-01 20:03:29 +0100134 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSET);
Manuel Lauss95a43792009-10-04 14:55:25 +0200135 __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT);
136 wmb();
137
138 bcsr_csc_base = csc_start;
139
140 for (irq = csc_start; irq <= csc_end; irq++)
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200141 irq_set_chip_and_handler_name(irq, &bcsr_irq_type,
142 handle_level_irq, "level");
Manuel Lauss95a43792009-10-04 14:55:25 +0200143
Thomas Gleixnere4ec7982011-03-27 15:19:28 +0200144 irq_set_chained_handler(hook_irq, bcsr_csc_handler);
Manuel Lauss95a43792009-10-04 14:55:25 +0200145}