blob: 7eaba3511ea831ab293d4532d745fbe8753a758e [file] [log] [blame]
Martyn Welch60479692009-07-31 09:28:17 +01001/*
2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3 *
Martyn Welch66bd8db2010-02-18 15:12:52 +00004 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
Martyn Welch60479692009-07-31 09:28:17 +01006 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * Derived from ca91c042.c by Michael Wyrick
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
Martyn Welch60479692009-07-31 09:28:17 +010018#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/types.h>
21#include <linux/errno.h>
Martyn Welch60479692009-07-31 09:28:17 +010022#include <linux/pci.h>
23#include <linux/dma-mapping.h>
24#include <linux/poll.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
Greg Kroah-Hartman6af783c2009-10-12 15:00:08 -070027#include <linux/sched.h>
Martyn Welch60479692009-07-31 09:28:17 +010028#include <asm/time.h>
29#include <asm/io.h>
30#include <asm/uaccess.h>
31
32#include "../vme.h"
33#include "../vme_bridge.h"
34#include "vme_ca91cx42.h"
35
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010036static int __init ca91cx42_init(void);
37static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
38static void ca91cx42_remove(struct pci_dev *);
39static void __exit ca91cx42_exit(void);
Martyn Welch60479692009-07-31 09:28:17 +010040
Martyn Welch12b2d5c2009-12-15 08:42:56 +000041/* Module parameters */
42static int geoid;
43
Martyn Welch42d4eff2009-12-15 08:43:15 +000044static struct vme_bridge *ca91cx42_bridge;
45static wait_queue_head_t dma_queue;
46static wait_queue_head_t iack_queue;
47#if 0
48static wait_queue_head_t lm_queue;
49#endif
50static wait_queue_head_t mbox_queue;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010051
Martyn Welch42d4eff2009-12-15 08:43:15 +000052static void (*lm_callback[4])(int); /* Called in interrupt handler */
53static void *crcsr_kernel;
54static dma_addr_t crcsr_bus;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010055
Martyn Welch42d4eff2009-12-15 08:43:15 +000056static struct mutex vme_rmw; /* Only one RMW cycle at a time */
57static struct mutex vme_int; /*
58 * Only one VME interrupt can be
59 * generated at a time, provide locking
60 */
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010061
62static char driver_name[] = "vme_ca91cx42";
63
Németh Márton13ac58d2010-01-10 00:18:26 +010064static const struct pci_device_id ca91cx42_ids[] = {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010065 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
66 { },
Martyn Welch60479692009-07-31 09:28:17 +010067};
68
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010069static struct pci_driver ca91cx42_driver = {
70 .name = driver_name,
71 .id_table = ca91cx42_ids,
72 .probe = ca91cx42_probe,
73 .remove = ca91cx42_remove,
Martyn Welch60479692009-07-31 09:28:17 +010074};
75
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010076static u32 ca91cx42_DMA_irqhandler(void)
Martyn Welch60479692009-07-31 09:28:17 +010077{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010078 wake_up(&dma_queue);
Martyn Welch60479692009-07-31 09:28:17 +010079
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010080 return CA91CX42_LINT_DMA;
Martyn Welch60479692009-07-31 09:28:17 +010081}
82
Martyn Welch3d0f8bc2009-08-27 17:00:40 +010083static u32 ca91cx42_LM_irqhandler(u32 stat)
84{
85 int i;
86 u32 serviced = 0;
87
88 for (i = 0; i < 4; i++) {
89 if (stat & CA91CX42_LINT_LM[i]) {
90 /* We only enable interrupts if the callback is set */
91 lm_callback[i](i);
92 serviced |= CA91CX42_LINT_LM[i];
93 }
94 }
95
96 return serviced;
97}
98
99/* XXX This needs to be split into 4 queues */
100static u32 ca91cx42_MB_irqhandler(int mbox_mask)
101{
102 wake_up(&mbox_queue);
103
104 return CA91CX42_LINT_MBOX;
105}
106
107static u32 ca91cx42_IACK_irqhandler(void)
108{
109 wake_up(&iack_queue);
110
111 return CA91CX42_LINT_SW_IACK;
112}
113
114#if 0
115int ca91cx42_bus_error_chk(int clrflag)
Martyn Welch60479692009-07-31 09:28:17 +0100116{
117 int tmp;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100118 tmp = ioread32(ca91cx42_bridge->base + PCI_COMMAND);
119 if (tmp & 0x08000000) { /* S_TA is Set */
Martyn Welch60479692009-07-31 09:28:17 +0100120 if (clrflag)
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100121 iowrite32(tmp | 0x08000000,
122 ca91cx42_bridge->base + PCI_COMMAND);
123 return 1;
Martyn Welch60479692009-07-31 09:28:17 +0100124 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100125 return 0;
Martyn Welch60479692009-07-31 09:28:17 +0100126}
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100127#endif
Martyn Welch60479692009-07-31 09:28:17 +0100128
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100129static u32 ca91cx42_VERR_irqhandler(void)
Martyn Welch60479692009-07-31 09:28:17 +0100130{
131 int val;
132
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100133 val = ioread32(ca91cx42_bridge->base + DGCS);
Martyn Welch60479692009-07-31 09:28:17 +0100134
135 if (!(val & 0x00000800)) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100136 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
137 "Error DGCS=%08X\n", val);
Martyn Welch60479692009-07-31 09:28:17 +0100138 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100139
140 return CA91CX42_LINT_VERR;
Martyn Welch60479692009-07-31 09:28:17 +0100141}
142
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100143static u32 ca91cx42_LERR_irqhandler(void)
Martyn Welch60479692009-07-31 09:28:17 +0100144{
145 int val;
146
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100147 val = ioread32(ca91cx42_bridge->base + DGCS);
Martyn Welch60479692009-07-31 09:28:17 +0100148
149 if (!(val & 0x00000800)) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100150 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
151 "Error DGCS=%08X\n", val);
152
Martyn Welch60479692009-07-31 09:28:17 +0100153 }
154
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100155 return CA91CX42_LINT_LERR;
Martyn Welch60479692009-07-31 09:28:17 +0100156}
157
Martyn Welch60479692009-07-31 09:28:17 +0100158
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100159static u32 ca91cx42_VIRQ_irqhandler(int stat)
Martyn Welch60479692009-07-31 09:28:17 +0100160{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100161 int vec, i, serviced = 0;
Martyn Welch60479692009-07-31 09:28:17 +0100162
163 for (i = 7; i > 0; i--) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100164 if (stat & (1 << i)) {
165 vec = ioread32(ca91cx42_bridge->base +
166 CA91CX42_V_STATID[i]) & 0xff;
167
Martyn Welchc813f592009-10-29 16:34:54 +0000168 vme_irq_handler(ca91cx42_bridge, i, vec);
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100169
170 serviced |= (1 << i);
Martyn Welch60479692009-07-31 09:28:17 +0100171 }
172 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100173
174 return serviced;
Martyn Welch60479692009-07-31 09:28:17 +0100175}
176
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100177static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id)
Martyn Welch60479692009-07-31 09:28:17 +0100178{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100179 u32 stat, enable, serviced = 0;
Martyn Welch60479692009-07-31 09:28:17 +0100180
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100181 if (dev_id != ca91cx42_bridge->base)
Martyn Welch60479692009-07-31 09:28:17 +0100182 return IRQ_NONE;
183
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100184 enable = ioread32(ca91cx42_bridge->base + LINT_EN);
185 stat = ioread32(ca91cx42_bridge->base + LINT_STAT);
Martyn Welch60479692009-07-31 09:28:17 +0100186
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100187 /* Only look at unmasked interrupts */
188 stat &= enable;
189
190 if (unlikely(!stat))
191 return IRQ_NONE;
192
193 if (stat & CA91CX42_LINT_DMA)
194 serviced |= ca91cx42_DMA_irqhandler();
195 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
196 CA91CX42_LINT_LM3))
197 serviced |= ca91cx42_LM_irqhandler(stat);
198 if (stat & CA91CX42_LINT_MBOX)
199 serviced |= ca91cx42_MB_irqhandler(stat);
200 if (stat & CA91CX42_LINT_SW_IACK)
201 serviced |= ca91cx42_IACK_irqhandler();
202 if (stat & CA91CX42_LINT_VERR)
203 serviced |= ca91cx42_VERR_irqhandler();
204 if (stat & CA91CX42_LINT_LERR)
205 serviced |= ca91cx42_LERR_irqhandler();
206 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
207 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
208 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
209 CA91CX42_LINT_VIRQ7))
210 serviced |= ca91cx42_VIRQ_irqhandler(stat);
211
212 /* Clear serviced interrupts */
213 iowrite32(stat, ca91cx42_bridge->base + LINT_STAT);
Martyn Welch60479692009-07-31 09:28:17 +0100214
215 return IRQ_HANDLED;
216}
217
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100218static int ca91cx42_irq_init(struct vme_bridge *bridge)
Martyn Welch60479692009-07-31 09:28:17 +0100219{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100220 int result, tmp;
221 struct pci_dev *pdev;
Martyn Welch60479692009-07-31 09:28:17 +0100222
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100223 /* Need pdev */
224 pdev = container_of(bridge->parent, struct pci_dev, dev);
Martyn Welch60479692009-07-31 09:28:17 +0100225
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100226 /* Initialise list for VME bus errors */
227 INIT_LIST_HEAD(&(bridge->vme_errors));
Martyn Welch60479692009-07-31 09:28:17 +0100228
Martyn Welchc813f592009-10-29 16:34:54 +0000229 mutex_init(&(bridge->irq_mtx));
230
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100231 /* Disable interrupts from PCI to VME */
232 iowrite32(0, bridge->base + VINT_EN);
Martyn Welch60479692009-07-31 09:28:17 +0100233
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100234 /* Disable PCI interrupts */
235 iowrite32(0, bridge->base + LINT_EN);
236 /* Clear Any Pending PCI Interrupts */
237 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
Martyn Welch60479692009-07-31 09:28:17 +0100238
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100239 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
240 driver_name, pdev);
241 if (result) {
242 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
243 pdev->irq);
244 return result;
Martyn Welch60479692009-07-31 09:28:17 +0100245 }
246
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100247 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
248 iowrite32(0, bridge->base + LINT_MAP0);
249 iowrite32(0, bridge->base + LINT_MAP1);
250 iowrite32(0, bridge->base + LINT_MAP2);
Martyn Welch60479692009-07-31 09:28:17 +0100251
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100252 /* Enable DMA, mailbox & LM Interrupts */
253 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
254 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
255 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
256
257 iowrite32(tmp, bridge->base + LINT_EN);
258
259 return 0;
Martyn Welch60479692009-07-31 09:28:17 +0100260}
261
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100262static void ca91cx42_irq_exit(struct pci_dev *pdev)
Martyn Welch60479692009-07-31 09:28:17 +0100263{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100264 /* Disable interrupts from PCI to VME */
265 iowrite32(0, ca91cx42_bridge->base + VINT_EN);
Martyn Welch60479692009-07-31 09:28:17 +0100266
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100267 /* Disable PCI interrupts */
268 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
269 /* Clear Any Pending PCI Interrupts */
270 iowrite32(0x00FFFFFF, ca91cx42_bridge->base + LINT_STAT);
Martyn Welch60479692009-07-31 09:28:17 +0100271
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100272 free_irq(pdev->irq, pdev);
Martyn Welch60479692009-07-31 09:28:17 +0100273}
274
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100275/*
276 * Set up an VME interrupt
277 */
Martyn Welchc813f592009-10-29 16:34:54 +0000278void ca91cx42_irq_set(int level, int state, int sync)
279
Martyn Welch60479692009-07-31 09:28:17 +0100280{
Martyn Welchc813f592009-10-29 16:34:54 +0000281 struct pci_dev *pdev;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100282 u32 tmp;
Martyn Welch60479692009-07-31 09:28:17 +0100283
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100284 /* Enable IRQ level */
285 tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
Martyn Welchc813f592009-10-29 16:34:54 +0000286
287 if (state == 0)
288 tmp &= ~CA91CX42_LINT_VIRQ[level];
289 else
290 tmp |= CA91CX42_LINT_VIRQ[level];
291
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100292 iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
293
Martyn Welchc813f592009-10-29 16:34:54 +0000294 if ((state == 0) && (sync != 0)) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100295 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
296 dev);
297
298 synchronize_irq(pdev->irq);
Martyn Welch60479692009-07-31 09:28:17 +0100299 }
Martyn Welch60479692009-07-31 09:28:17 +0100300}
301
Martyn Welchc813f592009-10-29 16:34:54 +0000302int ca91cx42_irq_generate(int level, int statid)
Martyn Welch60479692009-07-31 09:28:17 +0100303{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100304 u32 tmp;
Martyn Welch60479692009-07-31 09:28:17 +0100305
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100306 /* Universe can only generate even vectors */
307 if (statid & 1)
308 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +0100309
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100310 mutex_lock(&(vme_int));
Martyn Welch60479692009-07-31 09:28:17 +0100311
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100312 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
Martyn Welch60479692009-07-31 09:28:17 +0100313
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100314 /* Set Status/ID */
315 iowrite32(statid << 24, ca91cx42_bridge->base + STATID);
Martyn Welch60479692009-07-31 09:28:17 +0100316
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100317 /* Assert VMEbus IRQ */
318 tmp = tmp | (1 << (level + 24));
319 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
320
321 /* Wait for IACK */
322 wait_event_interruptible(iack_queue, 0);
323
324 /* Return interrupt to low state */
325 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
326 tmp = tmp & ~(1 << (level + 24));
327 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
328
329 mutex_unlock(&(vme_int));
330
331 return 0;
Martyn Welch60479692009-07-31 09:28:17 +0100332}
333
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100334int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
335 unsigned long long vme_base, unsigned long long size,
336 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
Martyn Welch60479692009-07-31 09:28:17 +0100337{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100338 unsigned int i, addr = 0, granularity = 0;
339 unsigned int temp_ctl = 0;
340 unsigned int vme_bound, pci_offset;
Martyn Welch60479692009-07-31 09:28:17 +0100341
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100342 i = image->number;
Martyn Welch60479692009-07-31 09:28:17 +0100343
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100344 switch (aspace) {
345 case VME_A16:
346 addr |= CA91CX42_VSI_CTL_VAS_A16;
347 break;
348 case VME_A24:
349 addr |= CA91CX42_VSI_CTL_VAS_A24;
350 break;
351 case VME_A32:
352 addr |= CA91CX42_VSI_CTL_VAS_A32;
353 break;
354 case VME_USER1:
355 addr |= CA91CX42_VSI_CTL_VAS_USER1;
356 break;
357 case VME_USER2:
358 addr |= CA91CX42_VSI_CTL_VAS_USER2;
359 break;
Martyn Welch60479692009-07-31 09:28:17 +0100360 case VME_A64:
361 case VME_CRCSR:
362 case VME_USER3:
363 case VME_USER4:
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100364 default:
365 printk(KERN_ERR "Invalid address space\n");
366 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +0100367 break;
368 }
369
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100370 /*
371 * Bound address is a valid address for the window, adjust
372 * accordingly
373 */
374 vme_bound = vme_base + size - granularity;
375 pci_offset = pci_base - vme_base;
Martyn Welch60479692009-07-31 09:28:17 +0100376
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100377 /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
378 * too big for registers
379 */
380
381 if ((i == 0) || (i == 4))
382 granularity = 0x1000;
383 else
384 granularity = 0x10000;
385
386 if (vme_base & (granularity - 1)) {
387 printk(KERN_ERR "Invalid VME base alignment\n");
388 return -EINVAL;
389 }
390 if (vme_bound & (granularity - 1)) {
391 printk(KERN_ERR "Invalid VME bound alignment\n");
392 return -EINVAL;
393 }
394 if (pci_offset & (granularity - 1)) {
395 printk(KERN_ERR "Invalid PCI Offset alignment\n");
396 return -EINVAL;
397 }
398
399 /* Disable while we are mucking around */
400 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
401 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
402 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
403
404 /* Setup mapping */
405 iowrite32(vme_base, ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
406 iowrite32(vme_bound, ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
407 iowrite32(pci_offset, ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
408
409/* XXX Prefetch stuff currently unsupported */
410#if 0
Martyn Welch60479692009-07-31 09:28:17 +0100411 if (vmeIn->wrPostEnable)
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100412 temp_ctl |= CA91CX42_VSI_CTL_PWEN;
Martyn Welch60479692009-07-31 09:28:17 +0100413 if (vmeIn->prefetchEnable)
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100414 temp_ctl |= CA91CX42_VSI_CTL_PREN;
Martyn Welch60479692009-07-31 09:28:17 +0100415 if (vmeIn->rmwLock)
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100416 temp_ctl |= CA91CX42_VSI_CTL_LLRMW;
Martyn Welch60479692009-07-31 09:28:17 +0100417 if (vmeIn->data64BitCapable)
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100418 temp_ctl |= CA91CX42_VSI_CTL_LD64EN;
419#endif
Martyn Welch60479692009-07-31 09:28:17 +0100420
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100421 /* Setup address space */
422 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
423 temp_ctl |= addr;
Martyn Welch60479692009-07-31 09:28:17 +0100424
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100425 /* Setup cycle types */
426 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
427 if (cycle & VME_SUPER)
428 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
429 if (cycle & VME_USER)
430 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
431 if (cycle & VME_PROG)
432 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
433 if (cycle & VME_DATA)
434 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
Martyn Welch60479692009-07-31 09:28:17 +0100435
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100436 /* Write ctl reg without enable */
437 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
438
439 if (enabled)
440 temp_ctl |= CA91CX42_VSI_CTL_EN;
441
442 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
443
444 return 0;
Martyn Welch60479692009-07-31 09:28:17 +0100445}
446
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100447int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
448 unsigned long long *vme_base, unsigned long long *size,
449 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
Martyn Welch60479692009-07-31 09:28:17 +0100450{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100451 unsigned int i, granularity = 0, ctl = 0;
452 unsigned long long vme_bound, pci_offset;
Martyn Welch60479692009-07-31 09:28:17 +0100453
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100454 i = image->number;
Martyn Welch60479692009-07-31 09:28:17 +0100455
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100456 if ((i == 0) || (i == 4))
457 granularity = 0x1000;
458 else
459 granularity = 0x10000;
Martyn Welch60479692009-07-31 09:28:17 +0100460
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100461 /* Read Registers */
462 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
Martyn Welch60479692009-07-31 09:28:17 +0100463
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100464 *vme_base = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
465 vme_bound = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
466 pci_offset = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
Martyn Welch60479692009-07-31 09:28:17 +0100467
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100468 *pci_base = (dma_addr_t)vme_base + pci_offset;
469 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
470
471 *enabled = 0;
472 *aspace = 0;
473 *cycle = 0;
474
475 if (ctl & CA91CX42_VSI_CTL_EN)
476 *enabled = 1;
477
478 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
479 *aspace = VME_A16;
480 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
481 *aspace = VME_A24;
482 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
483 *aspace = VME_A32;
484 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
485 *aspace = VME_USER1;
486 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
487 *aspace = VME_USER2;
488
489 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
490 *cycle |= VME_SUPER;
491 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
492 *cycle |= VME_USER;
493 if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
494 *cycle |= VME_PROG;
495 if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
496 *cycle |= VME_DATA;
497
498 return 0;
Martyn Welch60479692009-07-31 09:28:17 +0100499}
500
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100501/*
502 * Allocate and map PCI Resource
503 */
504static int ca91cx42_alloc_resource(struct vme_master_resource *image,
505 unsigned long long size)
Martyn Welch60479692009-07-31 09:28:17 +0100506{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100507 unsigned long long existing_size;
508 int retval = 0;
509 struct pci_dev *pdev;
Martyn Welch60479692009-07-31 09:28:17 +0100510
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100511 /* Find pci_dev container of dev */
512 if (ca91cx42_bridge->parent == NULL) {
513 printk(KERN_ERR "Dev entry NULL\n");
514 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +0100515 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100516 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
517
518 existing_size = (unsigned long long)(image->pci_resource.end -
519 image->pci_resource.start);
520
521 /* If the existing size is OK, return */
522 if (existing_size == (size - 1))
523 return 0;
524
525 if (existing_size != 0) {
526 iounmap(image->kern_base);
527 image->kern_base = NULL;
528 if (image->pci_resource.name != NULL)
529 kfree(image->pci_resource.name);
530 release_resource(&(image->pci_resource));
531 memset(&(image->pci_resource), 0, sizeof(struct resource));
Martyn Welch60479692009-07-31 09:28:17 +0100532 }
533
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100534 if (image->pci_resource.name == NULL) {
535 image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
536 if (image->pci_resource.name == NULL) {
537 printk(KERN_ERR "Unable to allocate memory for resource"
538 " name\n");
539 retval = -ENOMEM;
540 goto err_name;
541 }
Martyn Welch60479692009-07-31 09:28:17 +0100542 }
543
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100544 sprintf((char *)image->pci_resource.name, "%s.%d",
545 ca91cx42_bridge->name, image->number);
546
547 image->pci_resource.start = 0;
548 image->pci_resource.end = (unsigned long)size;
549 image->pci_resource.flags = IORESOURCE_MEM;
550
551 retval = pci_bus_alloc_resource(pdev->bus,
552 &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
553 0, NULL, NULL);
554 if (retval) {
555 printk(KERN_ERR "Failed to allocate mem resource for "
556 "window %d size 0x%lx start 0x%lx\n",
557 image->number, (unsigned long)size,
558 (unsigned long)image->pci_resource.start);
559 goto err_resource;
Martyn Welch60479692009-07-31 09:28:17 +0100560 }
561
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100562 image->kern_base = ioremap_nocache(
563 image->pci_resource.start, size);
564 if (image->kern_base == NULL) {
565 printk(KERN_ERR "Failed to remap resource\n");
566 retval = -ENOMEM;
567 goto err_remap;
Martyn Welch60479692009-07-31 09:28:17 +0100568 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100569
570 return 0;
571
572 iounmap(image->kern_base);
573 image->kern_base = NULL;
574err_remap:
575 release_resource(&(image->pci_resource));
576err_resource:
577 kfree(image->pci_resource.name);
578 memset(&(image->pci_resource), 0, sizeof(struct resource));
579err_name:
580 return retval;
581}
582
583/*
584 * * Free and unmap PCI Resource
585 * */
586static void ca91cx42_free_resource(struct vme_master_resource *image)
587{
588 iounmap(image->kern_base);
589 image->kern_base = NULL;
590 release_resource(&(image->pci_resource));
591 kfree(image->pci_resource.name);
592 memset(&(image->pci_resource), 0, sizeof(struct resource));
593}
594
595
596int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
597 unsigned long long vme_base, unsigned long long size,
598 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
599{
600 int retval = 0;
601 unsigned int i;
602 unsigned int temp_ctl = 0;
603 unsigned long long pci_bound, vme_offset, pci_base;
604
605 /* Verify input data */
606 if (vme_base & 0xFFF) {
607 printk(KERN_ERR "Invalid VME Window alignment\n");
608 retval = -EINVAL;
609 goto err_window;
610 }
611 if (size & 0xFFF) {
612 printk(KERN_ERR "Invalid VME Window alignment\n");
613 retval = -EINVAL;
614 goto err_window;
615 }
616
617 spin_lock(&(image->lock));
618
619 /* XXX We should do this much later, so that we can exit without
620 * needing to redo the mapping...
621 */
622 /*
623 * Let's allocate the resource here rather than further up the stack as
624 * it avoids pushing loads of bus dependant stuff up the stack
625 */
626 retval = ca91cx42_alloc_resource(image, size);
627 if (retval) {
628 spin_unlock(&(image->lock));
629 printk(KERN_ERR "Unable to allocate memory for resource "
630 "name\n");
631 retval = -ENOMEM;
632 goto err_res;
633 }
634
635 pci_base = (unsigned long long)image->pci_resource.start;
636
637 /*
638 * Bound address is a valid address for the window, adjust
639 * according to window granularity.
640 */
641 pci_bound = pci_base + (size - 0x1000);
642 vme_offset = vme_base - pci_base;
643
644 i = image->number;
645
646 /* Disable while we are mucking around */
647 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
648 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
649 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
650
651/* XXX Prefetch stuff currently unsupported */
652#if 0
Martyn Welch60479692009-07-31 09:28:17 +0100653 if (vmeOut->wrPostEnable)
654 temp_ctl |= 0x40000000;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100655#endif
Martyn Welch60479692009-07-31 09:28:17 +0100656
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100657 /* Setup cycle types */
658 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
659 if (cycle & VME_BLT)
660 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
661 if (cycle & VME_MBLT)
662 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
Martyn Welch60479692009-07-31 09:28:17 +0100663
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100664 /* Setup data width */
665 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
666 switch (dwidth) {
667 case VME_D8:
668 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
Martyn Welch60479692009-07-31 09:28:17 +0100669 break;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100670 case VME_D16:
671 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
Martyn Welch60479692009-07-31 09:28:17 +0100672 break;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100673 case VME_D32:
674 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
Martyn Welch60479692009-07-31 09:28:17 +0100675 break;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100676 case VME_D64:
677 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
Martyn Welch60479692009-07-31 09:28:17 +0100678 break;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100679 default:
680 spin_unlock(&(image->lock));
681 printk(KERN_ERR "Invalid data width\n");
682 retval = -EINVAL;
683 goto err_dwidth;
Martyn Welch60479692009-07-31 09:28:17 +0100684 break;
685 }
686
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100687 /* Setup address space */
688 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
689 switch (aspace) {
690 case VME_A16:
691 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
692 break;
693 case VME_A24:
694 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
695 break;
696 case VME_A32:
697 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
698 break;
699 case VME_CRCSR:
700 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
701 break;
702 case VME_USER1:
703 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
704 break;
705 case VME_USER2:
706 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
707 break;
Martyn Welch60479692009-07-31 09:28:17 +0100708 case VME_A64:
709 case VME_USER3:
710 case VME_USER4:
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100711 default:
712 spin_unlock(&(image->lock));
713 printk(KERN_ERR "Invalid address space\n");
714 retval = -EINVAL;
715 goto err_aspace;
Martyn Welch60479692009-07-31 09:28:17 +0100716 break;
717 }
718
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100719 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
720 if (cycle & VME_SUPER)
721 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
722 if (cycle & VME_PROG)
723 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
Martyn Welch60479692009-07-31 09:28:17 +0100724
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100725 /* Setup mapping */
726 iowrite32(pci_base, ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
727 iowrite32(pci_bound, ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
728 iowrite32(vme_offset, ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
Martyn Welch60479692009-07-31 09:28:17 +0100729
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100730 /* Write ctl reg without enable */
731 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
Martyn Welch60479692009-07-31 09:28:17 +0100732
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100733 if (enabled)
734 temp_ctl |= CA91CX42_LSI_CTL_EN;
Martyn Welch60479692009-07-31 09:28:17 +0100735
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100736 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
Martyn Welch60479692009-07-31 09:28:17 +0100737
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100738 spin_unlock(&(image->lock));
739 return 0;
740
741err_aspace:
742err_dwidth:
743 ca91cx42_free_resource(image);
744err_res:
745err_window:
746 return retval;
Martyn Welch60479692009-07-31 09:28:17 +0100747}
748
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100749int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
750 unsigned long long *vme_base, unsigned long long *size,
751 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
Martyn Welch60479692009-07-31 09:28:17 +0100752{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100753 unsigned int i, ctl;
754 unsigned long long pci_base, pci_bound, vme_offset;
Martyn Welch60479692009-07-31 09:28:17 +0100755
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100756 i = image->number;
757
758 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
759
760 pci_base = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
761 vme_offset = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
762 pci_bound = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
763
764 *vme_base = pci_base + vme_offset;
765 *size = (pci_bound - pci_base) + 0x1000;
766
767 *enabled = 0;
768 *aspace = 0;
769 *cycle = 0;
770 *dwidth = 0;
771
772 if (ctl & CA91CX42_LSI_CTL_EN)
773 *enabled = 1;
774
775 /* Setup address space */
776 switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
777 case CA91CX42_LSI_CTL_VAS_A16:
778 *aspace = VME_A16;
779 break;
780 case CA91CX42_LSI_CTL_VAS_A24:
781 *aspace = VME_A24;
782 break;
783 case CA91CX42_LSI_CTL_VAS_A32:
784 *aspace = VME_A32;
785 break;
786 case CA91CX42_LSI_CTL_VAS_CRCSR:
787 *aspace = VME_CRCSR;
788 break;
789 case CA91CX42_LSI_CTL_VAS_USER1:
790 *aspace = VME_USER1;
791 break;
792 case CA91CX42_LSI_CTL_VAS_USER2:
793 *aspace = VME_USER2;
794 break;
Martyn Welch60479692009-07-31 09:28:17 +0100795 }
Martyn Welch60479692009-07-31 09:28:17 +0100796
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100797 /* XXX Not sure howto check for MBLT */
798 /* Setup cycle types */
799 if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
800 *cycle |= VME_BLT;
801 else
802 *cycle |= VME_SCT;
803
804 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
805 *cycle |= VME_SUPER;
806 else
807 *cycle |= VME_USER;
808
809 if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
810 *cycle = VME_PROG;
811 else
812 *cycle = VME_DATA;
813
814 /* Setup data width */
815 switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
816 case CA91CX42_LSI_CTL_VDW_D8:
817 *dwidth = VME_D8;
818 break;
819 case CA91CX42_LSI_CTL_VDW_D16:
820 *dwidth = VME_D16;
821 break;
822 case CA91CX42_LSI_CTL_VDW_D32:
823 *dwidth = VME_D32;
824 break;
825 case CA91CX42_LSI_CTL_VDW_D64:
826 *dwidth = VME_D64;
827 break;
828 }
829
830/* XXX Prefetch stuff currently unsupported */
831#if 0
832 if (ctl & 0x40000000)
833 vmeOut->wrPostEnable = 1;
834#endif
835
836 return 0;
Martyn Welch60479692009-07-31 09:28:17 +0100837}
838
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100839int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
840 unsigned long long *vme_base, unsigned long long *size,
841 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
842{
843 int retval;
844
845 spin_lock(&(image->lock));
846
847 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
848 cycle, dwidth);
849
850 spin_unlock(&(image->lock));
851
852 return retval;
853}
854
855ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
856 size_t count, loff_t offset)
857{
858 int retval;
859
860 spin_lock(&(image->lock));
861
862 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
863 retval = count;
864
865 spin_unlock(&(image->lock));
866
867 return retval;
868}
869
870ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
871 size_t count, loff_t offset)
872{
873 int retval = 0;
874
875 spin_lock(&(image->lock));
876
877 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
878 retval = count;
879
880 spin_unlock(&(image->lock));
881
882 return retval;
883}
884
885int ca91cx42_slot_get(void)
886{
887 u32 slot = 0;
888
Martyn Welch12b2d5c2009-12-15 08:42:56 +0000889 if (!geoid) {
890 slot = ioread32(ca91cx42_bridge->base + VCSR_BS);
891 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
892 } else
893 slot = geoid;
894
Martyn Welch3d0f8bc2009-08-27 17:00:40 +0100895 return (int)slot;
896
897}
898
899static int __init ca91cx42_init(void)
900{
901 return pci_register_driver(&ca91cx42_driver);
902}
903
904/*
905 * Configure CR/CSR space
906 *
907 * Access to the CR/CSR can be configured at power-up. The location of the
908 * CR/CSR registers in the CR/CSR address space is determined by the boards
909 * Auto-ID or Geographic address. This function ensures that the window is
910 * enabled at an offset consistent with the boards geopgraphic address.
911 */
912static int ca91cx42_crcsr_init(struct pci_dev *pdev)
913{
914 unsigned int crcsr_addr;
915 int tmp, slot;
916
917/* XXX We may need to set this somehow as the Universe II does not support
918 * geographical addressing.
919 */
920#if 0
921 if (vme_slotnum != -1)
922 iowrite32(vme_slotnum << 27, ca91cx42_bridge->base + VCSR_BS);
923#endif
924 slot = ca91cx42_slot_get();
925 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
926 if (slot == 0) {
927 dev_err(&pdev->dev, "Slot number is unset, not configuring "
928 "CR/CSR space\n");
929 return -EINVAL;
930 }
931
932 /* Allocate mem for CR/CSR image */
933 crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
934 &crcsr_bus);
935 if (crcsr_kernel == NULL) {
936 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
937 "image\n");
938 return -ENOMEM;
939 }
940
941 memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
942
943 crcsr_addr = slot * (512 * 1024);
944 iowrite32(crcsr_bus - crcsr_addr, ca91cx42_bridge->base + VCSR_TO);
945
946 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
947 tmp |= CA91CX42_VCSR_CTL_EN;
948 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
949
950 return 0;
951}
952
953static void ca91cx42_crcsr_exit(struct pci_dev *pdev)
954{
955 u32 tmp;
956
957 /* Turn off CR/CSR space */
958 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
959 tmp &= ~CA91CX42_VCSR_CTL_EN;
960 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
961
962 /* Free image */
963 iowrite32(0, ca91cx42_bridge->base + VCSR_TO);
964
965 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
966}
967
968static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
969{
970 int retval, i;
971 u32 data;
972 struct list_head *pos = NULL;
973 struct vme_master_resource *master_image;
974 struct vme_slave_resource *slave_image;
975#if 0
976 struct vme_dma_resource *dma_ctrlr;
977#endif
978 struct vme_lm_resource *lm;
979
980 /* We want to support more than one of each bridge so we need to
981 * dynamically allocate the bridge structure
982 */
983 ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
984
985 if (ca91cx42_bridge == NULL) {
986 dev_err(&pdev->dev, "Failed to allocate memory for device "
987 "structure\n");
988 retval = -ENOMEM;
989 goto err_struct;
990 }
991
992 memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
993
994 /* Enable the device */
995 retval = pci_enable_device(pdev);
996 if (retval) {
997 dev_err(&pdev->dev, "Unable to enable device\n");
998 goto err_enable;
999 }
1000
1001 /* Map Registers */
1002 retval = pci_request_regions(pdev, driver_name);
1003 if (retval) {
1004 dev_err(&pdev->dev, "Unable to reserve resources\n");
1005 goto err_resource;
1006 }
1007
1008 /* map registers in BAR 0 */
1009 ca91cx42_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0),
1010 4096);
1011 if (!ca91cx42_bridge->base) {
1012 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1013 retval = -EIO;
1014 goto err_remap;
1015 }
1016
1017 /* Check to see if the mapping worked out */
1018 data = ioread32(ca91cx42_bridge->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1019 if (data != PCI_VENDOR_ID_TUNDRA) {
1020 dev_err(&pdev->dev, "PCI_ID check failed\n");
1021 retval = -EIO;
1022 goto err_test;
1023 }
1024
1025 /* Initialize wait queues & mutual exclusion flags */
1026 /* XXX These need to be moved to the vme_bridge structure */
1027 init_waitqueue_head(&dma_queue);
1028 init_waitqueue_head(&iack_queue);
1029 mutex_init(&(vme_int));
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001030 mutex_init(&(vme_rmw));
1031
1032 ca91cx42_bridge->parent = &(pdev->dev);
1033 strcpy(ca91cx42_bridge->name, driver_name);
1034
1035 /* Setup IRQ */
1036 retval = ca91cx42_irq_init(ca91cx42_bridge);
1037 if (retval != 0) {
1038 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1039 goto err_irq;
1040 }
1041
1042 /* Add master windows to list */
1043 INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1044 for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1045 master_image = kmalloc(sizeof(struct vme_master_resource),
1046 GFP_KERNEL);
1047 if (master_image == NULL) {
1048 dev_err(&pdev->dev, "Failed to allocate memory for "
1049 "master resource structure\n");
1050 retval = -ENOMEM;
1051 goto err_master;
1052 }
1053 master_image->parent = ca91cx42_bridge;
1054 spin_lock_init(&(master_image->lock));
1055 master_image->locked = 0;
1056 master_image->number = i;
1057 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1058 VME_CRCSR | VME_USER1 | VME_USER2;
1059 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1060 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1061 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1062 memset(&(master_image->pci_resource), 0,
1063 sizeof(struct resource));
1064 master_image->kern_base = NULL;
1065 list_add_tail(&(master_image->list),
1066 &(ca91cx42_bridge->master_resources));
1067 }
1068
1069 /* Add slave windows to list */
1070 INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1071 for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1072 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1073 GFP_KERNEL);
1074 if (slave_image == NULL) {
1075 dev_err(&pdev->dev, "Failed to allocate memory for "
1076 "slave resource structure\n");
1077 retval = -ENOMEM;
1078 goto err_slave;
1079 }
1080 slave_image->parent = ca91cx42_bridge;
1081 mutex_init(&(slave_image->mtx));
1082 slave_image->locked = 0;
1083 slave_image->number = i;
1084 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1085 VME_USER2;
1086
1087 /* Only windows 0 and 4 support A16 */
1088 if (i == 0 || i == 4)
1089 slave_image->address_attr |= VME_A16;
1090
1091 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1092 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1093 list_add_tail(&(slave_image->list),
1094 &(ca91cx42_bridge->slave_resources));
1095 }
1096#if 0
1097 /* Add dma engines to list */
1098 INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1099 for (i = 0; i < CA91C142_MAX_DMA; i++) {
1100 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1101 GFP_KERNEL);
1102 if (dma_ctrlr == NULL) {
1103 dev_err(&pdev->dev, "Failed to allocate memory for "
1104 "dma resource structure\n");
1105 retval = -ENOMEM;
1106 goto err_dma;
1107 }
1108 dma_ctrlr->parent = ca91cx42_bridge;
1109 mutex_init(&(dma_ctrlr->mtx));
1110 dma_ctrlr->locked = 0;
1111 dma_ctrlr->number = i;
Martyn Welch4f723df2010-02-18 15:12:58 +00001112 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1113 VME_DMA_MEM_TO_VME;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001114 INIT_LIST_HEAD(&(dma_ctrlr->pending));
1115 INIT_LIST_HEAD(&(dma_ctrlr->running));
1116 list_add_tail(&(dma_ctrlr->list),
1117 &(ca91cx42_bridge->dma_resources));
1118 }
1119#endif
1120 /* Add location monitor to list */
1121 INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1122 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1123 if (lm == NULL) {
1124 dev_err(&pdev->dev, "Failed to allocate memory for "
1125 "location monitor resource structure\n");
1126 retval = -ENOMEM;
1127 goto err_lm;
1128 }
1129 lm->parent = ca91cx42_bridge;
1130 mutex_init(&(lm->mtx));
1131 lm->locked = 0;
1132 lm->number = 1;
1133 lm->monitors = 4;
1134 list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1135
1136 ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1137 ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1138 ca91cx42_bridge->master_get = ca91cx42_master_get;
1139 ca91cx42_bridge->master_set = ca91cx42_master_set;
1140 ca91cx42_bridge->master_read = ca91cx42_master_read;
1141 ca91cx42_bridge->master_write = ca91cx42_master_write;
1142#if 0
1143 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1144 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1145 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1146 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1147#endif
Martyn Welchc813f592009-10-29 16:34:54 +00001148 ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1149 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001150#if 0
1151 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1152 ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1153 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1154 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1155#endif
1156 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1157
1158 data = ioread32(ca91cx42_bridge->base + MISC_CTL);
1159 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1160 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1161 dev_info(&pdev->dev, "Slot ID is %d\n", ca91cx42_slot_get());
1162
1163 if (ca91cx42_crcsr_init(pdev)) {
1164 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1165 retval = -EINVAL;
1166#if 0
1167 goto err_crcsr;
1168#endif
1169 }
1170
1171 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1172 * ca91cx42_remove()
1173 */
1174 retval = vme_register_bridge(ca91cx42_bridge);
1175 if (retval != 0) {
1176 dev_err(&pdev->dev, "Chip Registration failed.\n");
1177 goto err_reg;
1178 }
1179
1180 return 0;
1181
1182 vme_unregister_bridge(ca91cx42_bridge);
1183err_reg:
1184 ca91cx42_crcsr_exit(pdev);
Greg Kroah-Hartman70d7aa82009-10-29 16:18:53 -07001185#if 0
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001186err_crcsr:
Greg Kroah-Hartman70d7aa82009-10-29 16:18:53 -07001187#endif
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001188err_lm:
1189 /* resources are stored in link list */
1190 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1191 lm = list_entry(pos, struct vme_lm_resource, list);
1192 list_del(pos);
1193 kfree(lm);
1194 }
1195#if 0
1196err_dma:
1197 /* resources are stored in link list */
1198 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1199 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1200 list_del(pos);
1201 kfree(dma_ctrlr);
1202 }
1203#endif
1204err_slave:
1205 /* resources are stored in link list */
1206 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1207 slave_image = list_entry(pos, struct vme_slave_resource, list);
1208 list_del(pos);
1209 kfree(slave_image);
1210 }
1211err_master:
1212 /* resources are stored in link list */
1213 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1214 master_image = list_entry(pos, struct vme_master_resource,
1215 list);
1216 list_del(pos);
1217 kfree(master_image);
1218 }
1219
1220 ca91cx42_irq_exit(pdev);
1221err_irq:
1222err_test:
1223 iounmap(ca91cx42_bridge->base);
1224err_remap:
1225 pci_release_regions(pdev);
1226err_resource:
1227 pci_disable_device(pdev);
1228err_enable:
1229 kfree(ca91cx42_bridge);
1230err_struct:
1231 return retval;
1232
1233}
1234
1235void ca91cx42_remove(struct pci_dev *pdev)
1236{
1237 struct list_head *pos = NULL;
1238 struct vme_master_resource *master_image;
1239 struct vme_slave_resource *slave_image;
1240 struct vme_dma_resource *dma_ctrlr;
1241 struct vme_lm_resource *lm;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001242
1243 /* Turn off Ints */
1244 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
1245
1246 /* Turn off the windows */
1247 iowrite32(0x00800000, ca91cx42_bridge->base + LSI0_CTL);
1248 iowrite32(0x00800000, ca91cx42_bridge->base + LSI1_CTL);
1249 iowrite32(0x00800000, ca91cx42_bridge->base + LSI2_CTL);
1250 iowrite32(0x00800000, ca91cx42_bridge->base + LSI3_CTL);
1251 iowrite32(0x00800000, ca91cx42_bridge->base + LSI4_CTL);
1252 iowrite32(0x00800000, ca91cx42_bridge->base + LSI5_CTL);
1253 iowrite32(0x00800000, ca91cx42_bridge->base + LSI6_CTL);
1254 iowrite32(0x00800000, ca91cx42_bridge->base + LSI7_CTL);
1255 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI0_CTL);
1256 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI1_CTL);
1257 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI2_CTL);
1258 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI3_CTL);
1259 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI4_CTL);
1260 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI5_CTL);
1261 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI6_CTL);
1262 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI7_CTL);
1263
1264 vme_unregister_bridge(ca91cx42_bridge);
1265#if 0
1266 ca91cx42_crcsr_exit(pdev);
1267#endif
1268 /* resources are stored in link list */
1269 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1270 lm = list_entry(pos, struct vme_lm_resource, list);
1271 list_del(pos);
1272 kfree(lm);
1273 }
1274
1275 /* resources are stored in link list */
1276 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1277 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1278 list_del(pos);
1279 kfree(dma_ctrlr);
1280 }
1281
1282 /* resources are stored in link list */
1283 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1284 slave_image = list_entry(pos, struct vme_slave_resource, list);
1285 list_del(pos);
1286 kfree(slave_image);
1287 }
1288
1289 /* resources are stored in link list */
1290 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1291 master_image = list_entry(pos, struct vme_master_resource,
1292 list);
1293 list_del(pos);
1294 kfree(master_image);
1295 }
1296
1297 ca91cx42_irq_exit(pdev);
1298
1299 iounmap(ca91cx42_bridge->base);
1300
1301 pci_release_regions(pdev);
1302
1303 pci_disable_device(pdev);
1304
1305 kfree(ca91cx42_bridge);
1306}
1307
1308static void __exit ca91cx42_exit(void)
1309{
1310 pci_unregister_driver(&ca91cx42_driver);
1311}
1312
Martyn Welch12b2d5c2009-12-15 08:42:56 +00001313MODULE_PARM_DESC(geoid, "Override geographical addressing");
1314module_param(geoid, int, 0);
1315
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001316MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1317MODULE_LICENSE("GPL");
1318
1319module_init(ca91cx42_init);
1320module_exit(ca91cx42_exit);
1321
1322/*----------------------------------------------------------------------------
1323 * STAGING
1324 *--------------------------------------------------------------------------*/
1325
1326#if 0
Martyn Welch60479692009-07-31 09:28:17 +01001327#define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >> 8) | ((X & 0x0000FF00) << 8) | ((X & 0x000000FF) << 24))
1328
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001329int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
Martyn Welch60479692009-07-31 09:28:17 +01001330{
1331 int temp_ctl = 0;
1332 int tempBS = 0;
1333 int tempBD = 0;
1334 int tempTO = 0;
1335 int vmeBS = 0;
1336 int vmeBD = 0;
1337 int *rmw_pci_data_ptr = NULL;
1338 int *vaDataPtr = NULL;
1339 int i;
1340 vmeOutWindowCfg_t vmeOut;
1341 if (vmeRmw->maxAttempts < 1) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001342 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001343 }
1344 if (vmeRmw->targetAddrU) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001345 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001346 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001347 /* Find the PCI address that maps to the desired VME address */
Martyn Welch60479692009-07-31 09:28:17 +01001348 for (i = 0; i < 8; i++) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001349 temp_ctl = ioread32(ca91cx42_bridge->base +
1350 CA91CX42_LSI_CTL[i]);
Martyn Welch60479692009-07-31 09:28:17 +01001351 if ((temp_ctl & 0x80000000) == 0) {
1352 continue;
1353 }
1354 memset(&vmeOut, 0, sizeof(vmeOut));
1355 vmeOut.windowNbr = i;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001356 ca91cx42_get_out_bound(&vmeOut);
Martyn Welch60479692009-07-31 09:28:17 +01001357 if (vmeOut.addrSpace != vmeRmw->addrSpace) {
1358 continue;
1359 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001360 tempBS = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
1361 tempBD = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
1362 tempTO = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
Martyn Welch60479692009-07-31 09:28:17 +01001363 vmeBS = tempBS + tempTO;
1364 vmeBD = tempBD + tempTO;
1365 if ((vmeRmw->targetAddr >= vmeBS) &&
1366 (vmeRmw->targetAddr < vmeBD)) {
1367 rmw_pci_data_ptr =
1368 (int *)(tempBS + (vmeRmw->targetAddr - vmeBS));
1369 vaDataPtr =
1370 (int *)(out_image_va[i] +
1371 (vmeRmw->targetAddr - vmeBS));
1372 break;
1373 }
1374 }
1375
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001376 /* If no window - fail. */
Martyn Welch60479692009-07-31 09:28:17 +01001377 if (rmw_pci_data_ptr == NULL) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001378 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001379 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001380 /* Setup the RMW registers. */
1381 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1382 iowrite32(SWIZZLE(vmeRmw->enableMask), ca91cx42_bridge->base + SCYC_EN);
1383 iowrite32(SWIZZLE(vmeRmw->compareData), ca91cx42_bridge->base +
1384 SCYC_CMP);
1385 iowrite32(SWIZZLE(vmeRmw->swapData), ca91cx42_bridge->base + SCYC_SWP);
1386 iowrite32((int)rmw_pci_data_ptr, ca91cx42_bridge->base + SCYC_ADDR);
1387 iowrite32(1, ca91cx42_bridge->base + SCYC_CTL);
Martyn Welch60479692009-07-31 09:28:17 +01001388
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001389 /* Run the RMW cycle until either success or max attempts. */
Martyn Welch60479692009-07-31 09:28:17 +01001390 vmeRmw->numAttempts = 1;
1391 while (vmeRmw->numAttempts <= vmeRmw->maxAttempts) {
1392
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001393 if ((ioread32(vaDataPtr) & vmeRmw->enableMask) ==
Martyn Welch60479692009-07-31 09:28:17 +01001394 (vmeRmw->swapData & vmeRmw->enableMask)) {
1395
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001396 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
Martyn Welch60479692009-07-31 09:28:17 +01001397 break;
1398
1399 }
1400 vmeRmw->numAttempts++;
1401 }
1402
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001403 /* If no success, set num Attempts to be greater than max attempts */
Martyn Welch60479692009-07-31 09:28:17 +01001404 if (vmeRmw->numAttempts > vmeRmw->maxAttempts) {
1405 vmeRmw->numAttempts = vmeRmw->maxAttempts + 1;
1406 }
1407
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001408 return 0;
Martyn Welch60479692009-07-31 09:28:17 +01001409}
1410
Martyn Welch60479692009-07-31 09:28:17 +01001411int uniSetupDctlReg(vmeDmaPacket_t * vmeDma, int *dctlregreturn)
1412{
1413 unsigned int dctlreg = 0x80;
1414 struct vmeAttr *vmeAttr;
1415
1416 if (vmeDma->srcBus == VME_DMA_VME) {
1417 dctlreg = 0;
1418 vmeAttr = &vmeDma->srcVmeAttr;
1419 } else {
1420 dctlreg = 0x80000000;
1421 vmeAttr = &vmeDma->dstVmeAttr;
1422 }
1423
1424 switch (vmeAttr->maxDataWidth) {
1425 case VME_D8:
1426 break;
1427 case VME_D16:
1428 dctlreg |= 0x00400000;
1429 break;
1430 case VME_D32:
1431 dctlreg |= 0x00800000;
1432 break;
1433 case VME_D64:
1434 dctlreg |= 0x00C00000;
1435 break;
1436 }
1437
1438 switch (vmeAttr->addrSpace) {
1439 case VME_A16:
1440 break;
1441 case VME_A24:
1442 dctlreg |= 0x00010000;
1443 break;
1444 case VME_A32:
1445 dctlreg |= 0x00020000;
1446 break;
1447 case VME_USER1:
1448 dctlreg |= 0x00060000;
1449 break;
1450 case VME_USER2:
1451 dctlreg |= 0x00070000;
1452 break;
1453
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001454 case VME_A64: /* not supported in Universe DMA */
Martyn Welch60479692009-07-31 09:28:17 +01001455 case VME_CRCSR:
1456 case VME_USER3:
1457 case VME_USER4:
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001458 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001459 break;
1460 }
1461 if (vmeAttr->userAccessType == VME_PROG) {
1462 dctlreg |= 0x00004000;
1463 }
1464 if (vmeAttr->dataAccessType == VME_SUPER) {
1465 dctlreg |= 0x00001000;
1466 }
1467 if (vmeAttr->xferProtocol != VME_SCT) {
1468 dctlreg |= 0x00000100;
1469 }
1470 *dctlregreturn = dctlreg;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001471 return 0;
Martyn Welch60479692009-07-31 09:28:17 +01001472}
1473
Martyn Welch60479692009-07-31 09:28:17 +01001474unsigned int
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001475ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
Martyn Welch60479692009-07-31 09:28:17 +01001476{
1477 unsigned int val;
1478
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001479 /* Setup registers as needed for direct or chained. */
Martyn Welch60479692009-07-31 09:28:17 +01001480 if (dgcsreg & 0x8000000) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001481 iowrite32(0, ca91cx42_bridge->base + DTBC);
1482 iowrite32((unsigned int)vmeLL, ca91cx42_bridge->base + DCPP);
Martyn Welch60479692009-07-31 09:28:17 +01001483 } else {
1484#if 0
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001485 printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
1486 printk(KERN_ERR "Starting: DVA = %08x\n",
1487 ioread32(&vmeLL->dva));
1488 printk(KERN_ERR "Starting: DLV = %08x\n",
1489 ioread32(&vmeLL->dlv));
1490 printk(KERN_ERR "Starting: DTBC = %08x\n",
1491 ioread32(&vmeLL->dtbc));
1492 printk(KERN_ERR "Starting: DCTL = %08x\n",
1493 ioread32(&vmeLL->dctl));
Martyn Welch60479692009-07-31 09:28:17 +01001494#endif
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001495 /* Write registers */
1496 iowrite32(ioread32(&vmeLL->dva), ca91cx42_bridge->base + DVA);
1497 iowrite32(ioread32(&vmeLL->dlv), ca91cx42_bridge->base + DLA);
1498 iowrite32(ioread32(&vmeLL->dtbc), ca91cx42_bridge->base + DTBC);
1499 iowrite32(ioread32(&vmeLL->dctl), ca91cx42_bridge->base + DCTL);
1500 iowrite32(0, ca91cx42_bridge->base + DCPP);
Martyn Welch60479692009-07-31 09:28:17 +01001501 }
1502
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001503 /* Start the operation */
1504 iowrite32(dgcsreg, ca91cx42_bridge->base + DGCS);
Martyn Welch60479692009-07-31 09:28:17 +01001505 val = get_tbl();
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001506 iowrite32(dgcsreg | 0x8000000F, ca91cx42_bridge->base + DGCS);
1507 return val;
Martyn Welch60479692009-07-31 09:28:17 +01001508}
1509
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001510TDMA_Cmd_Packet *ca91cx42_setup_dma(vmeDmaPacket_t * vmeDma)
Martyn Welch60479692009-07-31 09:28:17 +01001511{
1512 vmeDmaPacket_t *vmeCur;
1513 int maxPerPage;
1514 int currentLLcount;
1515 TDMA_Cmd_Packet *startLL;
1516 TDMA_Cmd_Packet *currentLL;
1517 TDMA_Cmd_Packet *nextLL;
1518 unsigned int dctlreg = 0;
1519
1520 maxPerPage = PAGESIZE / sizeof(TDMA_Cmd_Packet) - 1;
1521 startLL = (TDMA_Cmd_Packet *) __get_free_pages(GFP_KERNEL, 0);
1522 if (startLL == 0) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001523 return startLL;
Martyn Welch60479692009-07-31 09:28:17 +01001524 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001525 /* First allocate pages for descriptors and create linked list */
Martyn Welch60479692009-07-31 09:28:17 +01001526 vmeCur = vmeDma;
1527 currentLL = startLL;
1528 currentLLcount = 0;
1529 while (vmeCur != 0) {
1530 if (vmeCur->pNextPacket != 0) {
1531 currentLL->dcpp = (unsigned int)(currentLL + 1);
1532 currentLLcount++;
1533 if (currentLLcount >= maxPerPage) {
1534 currentLL->dcpp =
1535 __get_free_pages(GFP_KERNEL, 0);
1536 currentLLcount = 0;
1537 }
1538 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1539 } else {
1540 currentLL->dcpp = (unsigned int)0;
1541 }
1542 vmeCur = vmeCur->pNextPacket;
1543 }
1544
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001545 /* Next fill in information for each descriptor */
Martyn Welch60479692009-07-31 09:28:17 +01001546 vmeCur = vmeDma;
1547 currentLL = startLL;
1548 while (vmeCur != 0) {
1549 if (vmeCur->srcBus == VME_DMA_VME) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001550 iowrite32(vmeCur->srcAddr, &currentLL->dva);
1551 iowrite32(vmeCur->dstAddr, &currentLL->dlv);
Martyn Welch60479692009-07-31 09:28:17 +01001552 } else {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001553 iowrite32(vmeCur->srcAddr, &currentLL->dlv);
1554 iowrite32(vmeCur->dstAddr, &currentLL->dva);
Martyn Welch60479692009-07-31 09:28:17 +01001555 }
1556 uniSetupDctlReg(vmeCur, &dctlreg);
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001557 iowrite32(dctlreg, &currentLL->dctl);
1558 iowrite32(vmeCur->byteCount, &currentLL->dtbc);
Martyn Welch60479692009-07-31 09:28:17 +01001559
1560 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1561 vmeCur = vmeCur->pNextPacket;
1562 }
1563
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001564 /* Convert Links to PCI addresses. */
Martyn Welch60479692009-07-31 09:28:17 +01001565 currentLL = startLL;
1566 while (currentLL != 0) {
1567 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1568 if (nextLL == 0) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001569 iowrite32(1, &currentLL->dcpp);
Martyn Welch60479692009-07-31 09:28:17 +01001570 } else {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001571 iowrite32((unsigned int)virt_to_bus(nextLL),
Martyn Welch60479692009-07-31 09:28:17 +01001572 &currentLL->dcpp);
1573 }
1574 currentLL = nextLL;
1575 }
1576
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001577 /* Return pointer to descriptors list */
1578 return startLL;
Martyn Welch60479692009-07-31 09:28:17 +01001579}
1580
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001581int ca91cx42_free_dma(TDMA_Cmd_Packet *startLL)
Martyn Welch60479692009-07-31 09:28:17 +01001582{
1583 TDMA_Cmd_Packet *currentLL;
1584 TDMA_Cmd_Packet *prevLL;
1585 TDMA_Cmd_Packet *nextLL;
1586 unsigned int dcppreg;
1587
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001588 /* Convert Links to virtual addresses. */
Martyn Welch60479692009-07-31 09:28:17 +01001589 currentLL = startLL;
1590 while (currentLL != 0) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001591 dcppreg = ioread32(&currentLL->dcpp);
Martyn Welch60479692009-07-31 09:28:17 +01001592 dcppreg &= ~6;
1593 if (dcppreg & 1) {
1594 currentLL->dcpp = 0;
1595 } else {
1596 currentLL->dcpp = (unsigned int)bus_to_virt(dcppreg);
1597 }
1598 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1599 }
1600
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001601 /* Free all pages associated with the descriptors. */
Martyn Welch60479692009-07-31 09:28:17 +01001602 currentLL = startLL;
1603 prevLL = currentLL;
1604 while (currentLL != 0) {
1605 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1606 if (currentLL + 1 != nextLL) {
1607 free_pages((int)prevLL, 0);
1608 prevLL = nextLL;
1609 }
1610 currentLL = nextLL;
1611 }
1612
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001613 /* Return pointer to descriptors list */
1614 return 0;
Martyn Welch60479692009-07-31 09:28:17 +01001615}
1616
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001617int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
Martyn Welch60479692009-07-31 09:28:17 +01001618{
1619 unsigned int dgcsreg = 0;
1620 unsigned int dctlreg = 0;
1621 int val;
1622 int channel, x;
1623 vmeDmaPacket_t *curDma;
1624 TDMA_Cmd_Packet *dmaLL;
1625
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001626 /* Sanity check the VME chain. */
Martyn Welch60479692009-07-31 09:28:17 +01001627 channel = vmeDma->channel_number;
1628 if (channel > 0) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001629 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001630 }
1631 curDma = vmeDma;
1632 while (curDma != 0) {
1633 if (curDma->byteCount == 0) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001634 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001635 }
1636 if (curDma->byteCount >= 0x1000000) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001637 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001638 }
1639 if ((curDma->srcAddr & 7) != (curDma->dstAddr & 7)) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001640 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001641 }
1642 switch (curDma->srcBus) {
1643 case VME_DMA_PCI:
1644 if (curDma->dstBus != VME_DMA_VME) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001645 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001646 }
1647 break;
1648 case VME_DMA_VME:
1649 if (curDma->dstBus != VME_DMA_PCI) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001650 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001651 }
1652 break;
1653 default:
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001654 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001655 break;
1656 }
1657 if (uniSetupDctlReg(curDma, &dctlreg) < 0) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001658 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001659 }
1660
1661 curDma = curDma->pNextPacket;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001662 if (curDma == vmeDma) { /* Endless Loop! */
1663 return -EINVAL;
Martyn Welch60479692009-07-31 09:28:17 +01001664 }
1665 }
1666
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001667 /* calculate control register */
Martyn Welch60479692009-07-31 09:28:17 +01001668 if (vmeDma->pNextPacket != 0) {
1669 dgcsreg = 0x8000000;
1670 } else {
1671 dgcsreg = 0;
1672 }
1673
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001674 for (x = 0; x < 8; x++) { /* vme block size */
Martyn Welch60479692009-07-31 09:28:17 +01001675 if ((256 << x) >= vmeDma->maxVmeBlockSize) {
1676 break;
1677 }
1678 }
1679 if (x == 8)
1680 x = 7;
1681 dgcsreg |= (x << 20);
1682
1683 if (vmeDma->vmeBackOffTimer) {
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001684 for (x = 1; x < 8; x++) { /* vme timer */
Martyn Welch60479692009-07-31 09:28:17 +01001685 if ((16 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1686 break;
1687 }
1688 }
1689 if (x == 8)
1690 x = 7;
1691 dgcsreg |= (x << 16);
1692 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001693 /*` Setup the dma chain */
1694 dmaLL = ca91cx42_setup_dma(vmeDma);
Martyn Welch60479692009-07-31 09:28:17 +01001695
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001696 /* Start the DMA */
Martyn Welch60479692009-07-31 09:28:17 +01001697 if (dgcsreg & 0x8000000) {
1698 vmeDma->vmeDmaStartTick =
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001699 ca91cx42_start_dma(channel, dgcsreg,
Martyn Welch60479692009-07-31 09:28:17 +01001700 (TDMA_Cmd_Packet *) virt_to_phys(dmaLL));
1701 } else {
1702 vmeDma->vmeDmaStartTick =
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001703 ca91cx42_start_dma(channel, dgcsreg, dmaLL);
Martyn Welch60479692009-07-31 09:28:17 +01001704 }
1705
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001706 wait_event_interruptible(dma_queue,
1707 ioread32(ca91cx42_bridge->base + DGCS) & 0x800);
Martyn Welch60479692009-07-31 09:28:17 +01001708
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001709 val = ioread32(ca91cx42_bridge->base + DGCS);
1710 iowrite32(val | 0xF00, ca91cx42_bridge->base + DGCS);
Martyn Welch60479692009-07-31 09:28:17 +01001711
1712 vmeDma->vmeDmaStatus = 0;
Martyn Welch60479692009-07-31 09:28:17 +01001713
1714 if (!(val & 0x00000800)) {
1715 vmeDma->vmeDmaStatus = val & 0x700;
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001716 printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1717 " DGCS=%08X\n", val);
1718 val = ioread32(ca91cx42_bridge->base + DCPP);
Martyn Welch60479692009-07-31 09:28:17 +01001719 printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001720 val = ioread32(ca91cx42_bridge->base + DCTL);
Martyn Welch60479692009-07-31 09:28:17 +01001721 printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001722 val = ioread32(ca91cx42_bridge->base + DTBC);
Martyn Welch60479692009-07-31 09:28:17 +01001723 printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001724 val = ioread32(ca91cx42_bridge->base + DLA);
Martyn Welch60479692009-07-31 09:28:17 +01001725 printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001726 val = ioread32(ca91cx42_bridge->base + DVA);
Martyn Welch60479692009-07-31 09:28:17 +01001727 printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
1728
1729 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001730 /* Free the dma chain */
1731 ca91cx42_free_dma(dmaLL);
Martyn Welch60479692009-07-31 09:28:17 +01001732
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001733 return 0;
Martyn Welch60479692009-07-31 09:28:17 +01001734}
1735
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001736int ca91cx42_lm_set(vmeLmCfg_t *vmeLm)
Martyn Welch60479692009-07-31 09:28:17 +01001737{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001738 int temp_ctl = 0;
Martyn Welch60479692009-07-31 09:28:17 +01001739
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001740 if (vmeLm->addrU)
1741 return -EINVAL;
1742
1743 switch (vmeLm->addrSpace) {
1744 case VME_A64:
1745 case VME_USER3:
1746 case VME_USER4:
1747 return -EINVAL;
1748 case VME_A16:
1749 temp_ctl |= 0x00000;
1750 break;
1751 case VME_A24:
1752 temp_ctl |= 0x10000;
1753 break;
1754 case VME_A32:
1755 temp_ctl |= 0x20000;
1756 break;
1757 case VME_CRCSR:
1758 temp_ctl |= 0x50000;
1759 break;
1760 case VME_USER1:
1761 temp_ctl |= 0x60000;
1762 break;
1763 case VME_USER2:
1764 temp_ctl |= 0x70000;
1765 break;
Martyn Welch60479692009-07-31 09:28:17 +01001766 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001767
1768 /* Disable while we are mucking around */
1769 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1770
1771 iowrite32(vmeLm->addr, ca91cx42_bridge->base + LM_BS);
1772
1773 /* Setup CTL register. */
1774 if (vmeLm->userAccessType & VME_SUPER)
1775 temp_ctl |= 0x00200000;
1776 if (vmeLm->userAccessType & VME_USER)
1777 temp_ctl |= 0x00100000;
1778 if (vmeLm->dataAccessType & VME_PROG)
1779 temp_ctl |= 0x00800000;
1780 if (vmeLm->dataAccessType & VME_DATA)
1781 temp_ctl |= 0x00400000;
1782
1783
1784 /* Write ctl reg and enable */
1785 iowrite32(0x80000000 | temp_ctl, ca91cx42_bridge->base + LM_CTL);
1786 temp_ctl = ioread32(ca91cx42_bridge->base + LM_CTL);
1787
1788 return 0;
Martyn Welch60479692009-07-31 09:28:17 +01001789}
1790
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001791int ca91cx42_wait_lm(vmeLmCfg_t *vmeLm)
Martyn Welch60479692009-07-31 09:28:17 +01001792{
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001793 unsigned long flags;
Martyn Welch60479692009-07-31 09:28:17 +01001794 unsigned int tmp;
Martyn Welch60479692009-07-31 09:28:17 +01001795
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001796 spin_lock_irqsave(&lm_lock, flags);
1797 spin_unlock_irqrestore(&lm_lock, flags);
1798 if (tmp == 0) {
1799 if (vmeLm->lmWait < 10)
1800 vmeLm->lmWait = 10;
1801 interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait);
Martyn Welch60479692009-07-31 09:28:17 +01001802 }
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001803 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
Martyn Welch60479692009-07-31 09:28:17 +01001804
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001805 return 0;
Martyn Welch60479692009-07-31 09:28:17 +01001806}
Martyn Welch3d0f8bc2009-08-27 17:00:40 +01001807
1808
1809
1810int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1811{
1812 int temp_ctl = 0;
1813 int vbto = 0;
1814
1815 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1816 temp_ctl &= 0x00FFFFFF;
1817
1818 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
1819 vbto = 7;
1820 } else if (vmeArb->globalTimeoutTimer > 1024) {
1821 return -EINVAL;
1822 } else if (vmeArb->globalTimeoutTimer == 0) {
1823 vbto = 0;
1824 } else {
1825 vbto = 1;
1826 while ((16 * (1 << (vbto - 1))) < vmeArb->globalTimeoutTimer)
1827 vbto += 1;
1828 }
1829 temp_ctl |= (vbto << 28);
1830
1831 if (vmeArb->arbiterMode == VME_PRIORITY_MODE)
1832 temp_ctl |= 1 << 26;
1833
1834 if (vmeArb->arbiterTimeoutFlag)
1835 temp_ctl |= 2 << 24;
1836
1837 iowrite32(temp_ctl, ca91cx42_bridge->base + MISC_CTL);
1838 return 0;
1839}
1840
1841int ca91cx42_get_arbiter(vmeArbiterCfg_t *vmeArb)
1842{
1843 int temp_ctl = 0;
1844 int vbto = 0;
1845
1846 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1847
1848 vbto = (temp_ctl >> 28) & 0xF;
1849 if (vbto != 0)
1850 vmeArb->globalTimeoutTimer = (16 * (1 << (vbto - 1)));
1851
1852 if (temp_ctl & (1 << 26))
1853 vmeArb->arbiterMode = VME_PRIORITY_MODE;
1854 else
1855 vmeArb->arbiterMode = VME_R_ROBIN_MODE;
1856
1857 if (temp_ctl & (3 << 24))
1858 vmeArb->arbiterTimeoutFlag = 1;
1859
1860 return 0;
1861}
1862
1863int ca91cx42_set_requestor(vmeRequesterCfg_t *vmeReq)
1864{
1865 int temp_ctl = 0;
1866
1867 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1868 temp_ctl &= 0xFF0FFFFF;
1869
1870 if (vmeReq->releaseMode == 1)
1871 temp_ctl |= (1 << 20);
1872
1873 if (vmeReq->fairMode == 1)
1874 temp_ctl |= (1 << 21);
1875
1876 temp_ctl |= (vmeReq->requestLevel << 22);
1877
1878 iowrite32(temp_ctl, ca91cx42_bridge->base + MAST_CTL);
1879 return 0;
1880}
1881
1882int ca91cx42_get_requestor(vmeRequesterCfg_t *vmeReq)
1883{
1884 int temp_ctl = 0;
1885
1886 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1887
1888 if (temp_ctl & (1 << 20))
1889 vmeReq->releaseMode = 1;
1890
1891 if (temp_ctl & (1 << 21))
1892 vmeReq->fairMode = 1;
1893
1894 vmeReq->requestLevel = (temp_ctl & 0xC00000) >> 22;
1895
1896 return 0;
1897}
1898
1899
1900#endif