Krzysztof Halasa | 82a96f5 | 2008-01-01 21:55:23 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Intel IXP4xx Queue Manager driver for Linux |
| 3 | * |
| 4 | * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of version 2 of the GNU General Public License |
| 8 | * as published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/ioport.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> |
Russell King | a09e64f | 2008-08-05 16:14:15 +0100 | [diff] [blame] | 15 | #include <mach/qmgr.h> |
Krzysztof Halasa | 82a96f5 | 2008-01-01 21:55:23 +0100 | [diff] [blame] | 16 | |
| 17 | #define DEBUG 0 |
| 18 | |
| 19 | struct qmgr_regs __iomem *qmgr_regs; |
| 20 | static struct resource *mem_res; |
| 21 | static spinlock_t qmgr_lock; |
| 22 | static u32 used_sram_bitmap[4]; /* 128 16-dword pages */ |
| 23 | static void (*irq_handlers[HALF_QUEUES])(void *pdev); |
| 24 | static void *irq_pdevs[HALF_QUEUES]; |
| 25 | |
| 26 | void qmgr_set_irq(unsigned int queue, int src, |
| 27 | void (*handler)(void *pdev), void *pdev) |
| 28 | { |
| 29 | u32 __iomem *reg = &qmgr_regs->irqsrc[queue / 8]; /* 8 queues / u32 */ |
| 30 | int bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ |
| 31 | unsigned long flags; |
| 32 | |
| 33 | src &= 7; |
| 34 | spin_lock_irqsave(&qmgr_lock, flags); |
| 35 | __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg); |
| 36 | irq_handlers[queue] = handler; |
| 37 | irq_pdevs[queue] = pdev; |
| 38 | spin_unlock_irqrestore(&qmgr_lock, flags); |
| 39 | } |
| 40 | |
| 41 | |
| 42 | static irqreturn_t qmgr_irq1(int irq, void *pdev) |
| 43 | { |
| 44 | int i; |
| 45 | u32 val = __raw_readl(&qmgr_regs->irqstat[0]); |
| 46 | __raw_writel(val, &qmgr_regs->irqstat[0]); /* ACK */ |
| 47 | |
| 48 | for (i = 0; i < HALF_QUEUES; i++) |
| 49 | if (val & (1 << i)) |
| 50 | irq_handlers[i](irq_pdevs[i]); |
| 51 | |
| 52 | return val ? IRQ_HANDLED : 0; |
| 53 | } |
| 54 | |
| 55 | |
| 56 | void qmgr_enable_irq(unsigned int queue) |
| 57 | { |
| 58 | unsigned long flags; |
| 59 | |
| 60 | spin_lock_irqsave(&qmgr_lock, flags); |
| 61 | __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) | (1 << queue), |
| 62 | &qmgr_regs->irqen[0]); |
| 63 | spin_unlock_irqrestore(&qmgr_lock, flags); |
| 64 | } |
| 65 | |
| 66 | void qmgr_disable_irq(unsigned int queue) |
| 67 | { |
| 68 | unsigned long flags; |
| 69 | |
| 70 | spin_lock_irqsave(&qmgr_lock, flags); |
| 71 | __raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue), |
| 72 | &qmgr_regs->irqen[0]); |
Krzysztof Hałasa | ae2754a | 2008-05-09 02:14:09 +0200 | [diff] [blame] | 73 | __raw_writel(1 << queue, &qmgr_regs->irqstat[0]); /* clear */ |
Krzysztof Halasa | 82a96f5 | 2008-01-01 21:55:23 +0100 | [diff] [blame] | 74 | spin_unlock_irqrestore(&qmgr_lock, flags); |
| 75 | } |
| 76 | |
| 77 | static inline void shift_mask(u32 *mask) |
| 78 | { |
| 79 | mask[3] = mask[3] << 1 | mask[2] >> 31; |
| 80 | mask[2] = mask[2] << 1 | mask[1] >> 31; |
| 81 | mask[1] = mask[1] << 1 | mask[0] >> 31; |
| 82 | mask[0] <<= 1; |
| 83 | } |
| 84 | |
| 85 | int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, |
| 86 | unsigned int nearly_empty_watermark, |
| 87 | unsigned int nearly_full_watermark) |
| 88 | { |
| 89 | u32 cfg, addr = 0, mask[4]; /* in 16-dwords */ |
| 90 | int err; |
| 91 | |
| 92 | if (queue >= HALF_QUEUES) |
| 93 | return -ERANGE; |
| 94 | |
| 95 | if ((nearly_empty_watermark | nearly_full_watermark) & ~7) |
| 96 | return -EINVAL; |
| 97 | |
| 98 | switch (len) { |
| 99 | case 16: |
| 100 | cfg = 0 << 24; |
| 101 | mask[0] = 0x1; |
| 102 | break; |
| 103 | case 32: |
| 104 | cfg = 1 << 24; |
| 105 | mask[0] = 0x3; |
| 106 | break; |
| 107 | case 64: |
| 108 | cfg = 2 << 24; |
| 109 | mask[0] = 0xF; |
| 110 | break; |
| 111 | case 128: |
| 112 | cfg = 3 << 24; |
| 113 | mask[0] = 0xFF; |
| 114 | break; |
| 115 | default: |
| 116 | return -EINVAL; |
| 117 | } |
| 118 | |
| 119 | cfg |= nearly_empty_watermark << 26; |
| 120 | cfg |= nearly_full_watermark << 29; |
| 121 | len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */ |
| 122 | mask[1] = mask[2] = mask[3] = 0; |
| 123 | |
| 124 | if (!try_module_get(THIS_MODULE)) |
| 125 | return -ENODEV; |
| 126 | |
| 127 | spin_lock_irq(&qmgr_lock); |
| 128 | if (__raw_readl(&qmgr_regs->sram[queue])) { |
| 129 | err = -EBUSY; |
| 130 | goto err; |
| 131 | } |
| 132 | |
| 133 | while (1) { |
| 134 | if (!(used_sram_bitmap[0] & mask[0]) && |
| 135 | !(used_sram_bitmap[1] & mask[1]) && |
| 136 | !(used_sram_bitmap[2] & mask[2]) && |
| 137 | !(used_sram_bitmap[3] & mask[3])) |
| 138 | break; /* found free space */ |
| 139 | |
| 140 | addr++; |
| 141 | shift_mask(mask); |
| 142 | if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) { |
| 143 | printk(KERN_ERR "qmgr: no free SRAM space for" |
| 144 | " queue %i\n", queue); |
| 145 | err = -ENOMEM; |
| 146 | goto err; |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | used_sram_bitmap[0] |= mask[0]; |
| 151 | used_sram_bitmap[1] |= mask[1]; |
| 152 | used_sram_bitmap[2] |= mask[2]; |
| 153 | used_sram_bitmap[3] |= mask[3]; |
| 154 | __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]); |
| 155 | spin_unlock_irq(&qmgr_lock); |
| 156 | |
| 157 | #if DEBUG |
| 158 | printk(KERN_DEBUG "qmgr: requested queue %i, addr = 0x%02X\n", |
| 159 | queue, addr); |
| 160 | #endif |
| 161 | return 0; |
| 162 | |
| 163 | err: |
| 164 | spin_unlock_irq(&qmgr_lock); |
| 165 | module_put(THIS_MODULE); |
| 166 | return err; |
| 167 | } |
| 168 | |
| 169 | void qmgr_release_queue(unsigned int queue) |
| 170 | { |
| 171 | u32 cfg, addr, mask[4]; |
| 172 | |
| 173 | BUG_ON(queue >= HALF_QUEUES); /* not in valid range */ |
| 174 | |
| 175 | spin_lock_irq(&qmgr_lock); |
| 176 | cfg = __raw_readl(&qmgr_regs->sram[queue]); |
| 177 | addr = (cfg >> 14) & 0xFF; |
| 178 | |
| 179 | BUG_ON(!addr); /* not requested */ |
| 180 | |
| 181 | switch ((cfg >> 24) & 3) { |
| 182 | case 0: mask[0] = 0x1; break; |
| 183 | case 1: mask[0] = 0x3; break; |
| 184 | case 2: mask[0] = 0xF; break; |
| 185 | case 3: mask[0] = 0xFF; break; |
| 186 | } |
| 187 | |
Krzysztof Halasa | dac2f83 | 2008-04-20 19:06:39 +0200 | [diff] [blame] | 188 | mask[1] = mask[2] = mask[3] = 0; |
| 189 | |
Krzysztof Halasa | 82a96f5 | 2008-01-01 21:55:23 +0100 | [diff] [blame] | 190 | while (addr--) |
| 191 | shift_mask(mask); |
| 192 | |
| 193 | __raw_writel(0, &qmgr_regs->sram[queue]); |
| 194 | |
| 195 | used_sram_bitmap[0] &= ~mask[0]; |
| 196 | used_sram_bitmap[1] &= ~mask[1]; |
| 197 | used_sram_bitmap[2] &= ~mask[2]; |
| 198 | used_sram_bitmap[3] &= ~mask[3]; |
| 199 | irq_handlers[queue] = NULL; /* catch IRQ bugs */ |
| 200 | spin_unlock_irq(&qmgr_lock); |
| 201 | |
| 202 | module_put(THIS_MODULE); |
Krzysztof Hałasa | 3edcfb2 | 2008-05-08 23:18:31 +0200 | [diff] [blame] | 203 | |
| 204 | while ((addr = qmgr_get_entry(queue))) |
| 205 | printk(KERN_ERR "qmgr: released queue %d not empty: 0x%08X\n", |
| 206 | queue, addr); |
Krzysztof Halasa | 82a96f5 | 2008-01-01 21:55:23 +0100 | [diff] [blame] | 207 | #if DEBUG |
| 208 | printk(KERN_DEBUG "qmgr: released queue %i\n", queue); |
| 209 | #endif |
| 210 | } |
| 211 | |
| 212 | static int qmgr_init(void) |
| 213 | { |
| 214 | int i, err; |
| 215 | mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS, |
| 216 | IXP4XX_QMGR_REGION_SIZE, |
| 217 | "IXP4xx Queue Manager"); |
| 218 | if (mem_res == NULL) |
| 219 | return -EBUSY; |
| 220 | |
| 221 | qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); |
| 222 | if (qmgr_regs == NULL) { |
| 223 | err = -ENOMEM; |
| 224 | goto error_map; |
| 225 | } |
| 226 | |
| 227 | /* reset qmgr registers */ |
| 228 | for (i = 0; i < 4; i++) { |
| 229 | __raw_writel(0x33333333, &qmgr_regs->stat1[i]); |
| 230 | __raw_writel(0, &qmgr_regs->irqsrc[i]); |
| 231 | } |
| 232 | for (i = 0; i < 2; i++) { |
| 233 | __raw_writel(0, &qmgr_regs->stat2[i]); |
| 234 | __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */ |
| 235 | __raw_writel(0, &qmgr_regs->irqen[i]); |
| 236 | } |
| 237 | |
| 238 | for (i = 0; i < QUEUES; i++) |
| 239 | __raw_writel(0, &qmgr_regs->sram[i]); |
| 240 | |
| 241 | err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq1, 0, |
| 242 | "IXP4xx Queue Manager", NULL); |
| 243 | if (err) { |
| 244 | printk(KERN_ERR "qmgr: failed to request IRQ%i\n", |
| 245 | IRQ_IXP4XX_QM1); |
| 246 | goto error_irq; |
| 247 | } |
| 248 | |
| 249 | used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */ |
| 250 | spin_lock_init(&qmgr_lock); |
| 251 | |
| 252 | printk(KERN_INFO "IXP4xx Queue Manager initialized.\n"); |
| 253 | return 0; |
| 254 | |
| 255 | error_irq: |
| 256 | iounmap(qmgr_regs); |
| 257 | error_map: |
| 258 | release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); |
| 259 | return err; |
| 260 | } |
| 261 | |
| 262 | static void qmgr_remove(void) |
| 263 | { |
| 264 | free_irq(IRQ_IXP4XX_QM1, NULL); |
| 265 | synchronize_irq(IRQ_IXP4XX_QM1); |
| 266 | iounmap(qmgr_regs); |
| 267 | release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE); |
| 268 | } |
| 269 | |
| 270 | module_init(qmgr_init); |
| 271 | module_exit(qmgr_remove); |
| 272 | |
| 273 | MODULE_LICENSE("GPL v2"); |
| 274 | MODULE_AUTHOR("Krzysztof Halasa"); |
| 275 | |
| 276 | EXPORT_SYMBOL(qmgr_regs); |
| 277 | EXPORT_SYMBOL(qmgr_set_irq); |
| 278 | EXPORT_SYMBOL(qmgr_enable_irq); |
| 279 | EXPORT_SYMBOL(qmgr_disable_irq); |
| 280 | EXPORT_SYMBOL(qmgr_request_queue); |
| 281 | EXPORT_SYMBOL(qmgr_release_queue); |