blob: e59f57f363caf3054af9441e554690afad64a085 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ibm_ocp_mal.c
3 *
4 * Armin Kuster akuster@mvista.com
5 * Juen, 2002
6 *
7 * Copyright 2002 MontaVista Softare Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/netdevice.h>
20#include <linux/init.h>
21#include <linux/dma-mapping.h>
22
23#include <asm/io.h>
24#include <asm/irq.h>
25#include <asm/ocp.h>
26
27#include "ibm_emac_mal.h"
28
29// Locking: Should we share a lock with the client ? The client could provide
30// a lock pointer (optionally) in the commac structure... I don't think this is
31// really necessary though
32
33/* This lock protects the commac list. On today UP implementations, it's
34 * really only used as IRQ protection in mal_{register,unregister}_commac()
35 */
36static DEFINE_RWLOCK(mal_list_lock);
37
38int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
39{
40 unsigned long flags;
41
42 write_lock_irqsave(&mal_list_lock, flags);
43
44 /* Don't let multiple commacs claim the same channel */
45 if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
46 (mal->rx_chan_mask & commac->rx_chan_mask)) {
47 write_unlock_irqrestore(&mal_list_lock, flags);
48 return -EBUSY;
49 }
50
51 mal->tx_chan_mask |= commac->tx_chan_mask;
52 mal->rx_chan_mask |= commac->rx_chan_mask;
53
54 list_add(&commac->list, &mal->commac);
55
56 write_unlock_irqrestore(&mal_list_lock, flags);
57
58 return 0;
59}
60
61int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
62{
63 unsigned long flags;
64
65 write_lock_irqsave(&mal_list_lock, flags);
66
67 mal->tx_chan_mask &= ~commac->tx_chan_mask;
68 mal->rx_chan_mask &= ~commac->rx_chan_mask;
69
70 list_del_init(&commac->list);
71
72 write_unlock_irqrestore(&mal_list_lock, flags);
73
74 return 0;
75}
76
77int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
78{
79 switch (channel) {
80 case 0:
81 set_mal_dcrn(mal, DCRN_MALRCBS0, size);
82 break;
83#ifdef DCRN_MALRCBS1
84 case 1:
85 set_mal_dcrn(mal, DCRN_MALRCBS1, size);
86 break;
87#endif
88#ifdef DCRN_MALRCBS2
89 case 2:
90 set_mal_dcrn(mal, DCRN_MALRCBS2, size);
91 break;
92#endif
93#ifdef DCRN_MALRCBS3
94 case 3:
95 set_mal_dcrn(mal, DCRN_MALRCBS3, size);
96 break;
97#endif
98 default:
99 return -EINVAL;
100 }
101
102 return 0;
103}
104
105static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs)
106{
107 struct ibm_ocp_mal *mal = dev_instance;
108 unsigned long mal_error;
109
110 /*
111 * This SERR applies to one of the devices on the MAL, here we charge
112 * it against the first EMAC registered for the MAL.
113 */
114
115 mal_error = get_mal_dcrn(mal, DCRN_MALESR);
116
117 printk(KERN_ERR "%s: System Error (MALESR=%lx)\n",
118 "MAL" /* FIXME: get the name right */ , mal_error);
119
120 /* FIXME: decipher error */
121 /* DIXME: distribute to commacs, if possible */
122
123 /* Clear the error status register */
124 set_mal_dcrn(mal, DCRN_MALESR, mal_error);
125
126 return IRQ_HANDLED;
127}
128
129static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs)
130{
131 struct ibm_ocp_mal *mal = dev_instance;
132 struct list_head *l;
133 unsigned long isr;
134
135 isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR);
136 set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr);
137
138 read_lock(&mal_list_lock);
139 list_for_each(l, &mal->commac) {
140 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
141
142 if (isr & mc->tx_chan_mask) {
143 mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask);
144 }
145 }
146 read_unlock(&mal_list_lock);
147
148 return IRQ_HANDLED;
149}
150
151static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)
152{
153 struct ibm_ocp_mal *mal = dev_instance;
154 struct list_head *l;
155 unsigned long isr;
156
157 isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR);
158 set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr);
159
160 read_lock(&mal_list_lock);
161 list_for_each(l, &mal->commac) {
162 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
163
164 if (isr & mc->rx_chan_mask) {
165 mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask);
166 }
167 }
168 read_unlock(&mal_list_lock);
169
170 return IRQ_HANDLED;
171}
172
173static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs)
174{
175 struct ibm_ocp_mal *mal = dev_instance;
176 struct list_head *l;
177 unsigned long deir;
178
179 deir = get_mal_dcrn(mal, DCRN_MALTXDEIR);
180
181 /* FIXME: print which MAL correctly */
182 printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n",
183 "MAL", deir);
184
185 read_lock(&mal_list_lock);
186 list_for_each(l, &mal->commac) {
187 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
188
189 if (deir & mc->tx_chan_mask) {
190 mc->ops->txde(mc->dev, deir & mc->tx_chan_mask);
191 }
192 }
193 read_unlock(&mal_list_lock);
194
195 return IRQ_HANDLED;
196}
197
198/*
199 * This interrupt should be very rare at best. This occurs when
200 * the hardware has a problem with the receive descriptors. The manual
201 * states that it occurs when the hardware cannot the receive descriptor
202 * empty bit is not set. The recovery mechanism will be to
203 * traverse through the descriptors, handle any that are marked to be
204 * handled and reinitialize each along the way. At that point the driver
205 * will be restarted.
206 */
207static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
208{
209 struct ibm_ocp_mal *mal = dev_instance;
210 struct list_head *l;
211 unsigned long deir;
212
213 deir = get_mal_dcrn(mal, DCRN_MALRXDEIR);
214
215 /*
216 * This really is needed. This case encountered in stress testing.
217 */
218 if (deir == 0)
219 return IRQ_HANDLED;
220
221 /* FIXME: print which MAL correctly */
222 printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n",
223 "MAL", deir);
224
225 read_lock(&mal_list_lock);
226 list_for_each(l, &mal->commac) {
227 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
228
229 if (deir & mc->rx_chan_mask) {
230 mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask);
231 }
232 }
233 read_unlock(&mal_list_lock);
234
235 return IRQ_HANDLED;
236}
237
238static int __init mal_probe(struct ocp_device *ocpdev)
239{
240 struct ibm_ocp_mal *mal = NULL;
241 struct ocp_func_mal_data *maldata;
242 int err = 0;
243
244 maldata = (struct ocp_func_mal_data *)ocpdev->def->additions;
245 if (maldata == NULL) {
246 printk(KERN_ERR "mal%d: Missing additional datas !\n",
247 ocpdev->def->index);
248 return -ENODEV;
249 }
250
251 mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
252 if (mal == NULL) {
253 printk(KERN_ERR
254 "mal%d: Out of memory allocating MAL structure !\n",
255 ocpdev->def->index);
256 return -ENOMEM;
257 }
258 memset(mal, 0, sizeof(*mal));
259
260 switch (ocpdev->def->index) {
261 case 0:
262 mal->dcrbase = DCRN_MAL_BASE;
263 break;
264#ifdef DCRN_MAL1_BASE
265 case 1:
266 mal->dcrbase = DCRN_MAL1_BASE;
267 break;
268#endif
269 default:
270 BUG();
271 }
272
273 /**************************/
274
275 INIT_LIST_HEAD(&mal->commac);
276
277 set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF);
278 set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF);
279
280 set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR); /* 384 */
281 /* FIXME: Add delay */
282
283 /* Set the MAL configuration register */
284 set_mal_dcrn(mal, DCRN_MALCR,
285 MALCR_PLBB | MALCR_OPBBL | MALCR_LEA |
286 MALCR_PLBLT_DEFAULT);
287
288 /* It would be nice to allocate buffers separately for each
289 * channel, but we can't because the channels share the upper
290 * 13 bits of address lines. Each channels buffer must also
291 * be 4k aligned, so we allocate 4k for each channel. This is
292 * inefficient FIXME: do better, if possible */
293 mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
294 MAL_DT_ALIGN *
295 maldata->num_tx_chans,
296 &mal->tx_phys_addr, GFP_KERNEL);
297 if (mal->tx_virt_addr == NULL) {
298 printk(KERN_ERR
299 "mal%d: Out of memory allocating MAL descriptors !\n",
300 ocpdev->def->index);
301 err = -ENOMEM;
302 goto fail;
303 }
304
305 /* God, oh, god, I hate DCRs */
306 set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr);
307#ifdef DCRN_MALTXCTP1R
308 if (maldata->num_tx_chans > 1)
309 set_mal_dcrn(mal, DCRN_MALTXCTP1R,
310 mal->tx_phys_addr + MAL_DT_ALIGN);
311#endif /* DCRN_MALTXCTP1R */
312#ifdef DCRN_MALTXCTP2R
313 if (maldata->num_tx_chans > 2)
314 set_mal_dcrn(mal, DCRN_MALTXCTP2R,
315 mal->tx_phys_addr + 2 * MAL_DT_ALIGN);
316#endif /* DCRN_MALTXCTP2R */
317#ifdef DCRN_MALTXCTP3R
318 if (maldata->num_tx_chans > 3)
319 set_mal_dcrn(mal, DCRN_MALTXCTP3R,
320 mal->tx_phys_addr + 3 * MAL_DT_ALIGN);
321#endif /* DCRN_MALTXCTP3R */
322#ifdef DCRN_MALTXCTP4R
323 if (maldata->num_tx_chans > 4)
324 set_mal_dcrn(mal, DCRN_MALTXCTP4R,
325 mal->tx_phys_addr + 4 * MAL_DT_ALIGN);
326#endif /* DCRN_MALTXCTP4R */
327#ifdef DCRN_MALTXCTP5R
328 if (maldata->num_tx_chans > 5)
329 set_mal_dcrn(mal, DCRN_MALTXCTP5R,
330 mal->tx_phys_addr + 5 * MAL_DT_ALIGN);
331#endif /* DCRN_MALTXCTP5R */
332#ifdef DCRN_MALTXCTP6R
333 if (maldata->num_tx_chans > 6)
334 set_mal_dcrn(mal, DCRN_MALTXCTP6R,
335 mal->tx_phys_addr + 6 * MAL_DT_ALIGN);
336#endif /* DCRN_MALTXCTP6R */
337#ifdef DCRN_MALTXCTP7R
338 if (maldata->num_tx_chans > 7)
339 set_mal_dcrn(mal, DCRN_MALTXCTP7R,
340 mal->tx_phys_addr + 7 * MAL_DT_ALIGN);
341#endif /* DCRN_MALTXCTP7R */
342
343 mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
344 MAL_DT_ALIGN *
345 maldata->num_rx_chans,
346 &mal->rx_phys_addr, GFP_KERNEL);
347
348 set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr);
349#ifdef DCRN_MALRXCTP1R
350 if (maldata->num_rx_chans > 1)
351 set_mal_dcrn(mal, DCRN_MALRXCTP1R,
352 mal->rx_phys_addr + MAL_DT_ALIGN);
353#endif /* DCRN_MALRXCTP1R */
354#ifdef DCRN_MALRXCTP2R
355 if (maldata->num_rx_chans > 2)
356 set_mal_dcrn(mal, DCRN_MALRXCTP2R,
357 mal->rx_phys_addr + 2 * MAL_DT_ALIGN);
358#endif /* DCRN_MALRXCTP2R */
359#ifdef DCRN_MALRXCTP3R
360 if (maldata->num_rx_chans > 3)
361 set_mal_dcrn(mal, DCRN_MALRXCTP3R,
362 mal->rx_phys_addr + 3 * MAL_DT_ALIGN);
363#endif /* DCRN_MALRXCTP3R */
364
365 err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);
366 if (err)
367 goto fail;
368 err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal);
369 if (err)
370 goto fail;
371 err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
372 if (err)
373 goto fail;
374 err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
375 if (err)
376 goto fail;
377 err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
378 if (err)
379 goto fail;
380
381 set_mal_dcrn(mal, DCRN_MALIER,
382 MALIER_DE | MALIER_NE | MALIER_TE |
383 MALIER_OPBE | MALIER_PLBE);
384
385 /* Advertise me to the rest of the world */
386 ocp_set_drvdata(ocpdev, mal);
387
388 printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n",
389 ocpdev->def->index, maldata->num_tx_chans,
390 maldata->num_rx_chans);
391
392 return 0;
393
394 fail:
395 /* FIXME: dispose requested IRQs ! */
396 if (err && mal)
397 kfree(mal);
398 return err;
399}
400
401static void __exit mal_remove(struct ocp_device *ocpdev)
402{
403 struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
404 struct ocp_func_mal_data *maldata = ocpdev->def->additions;
405
406 BUG_ON(!maldata);
407
408 ocp_set_drvdata(ocpdev, NULL);
409
410 /* FIXME: shut down the MAL, deal with dependency with emac */
411 free_irq(maldata->serr_irq, mal);
412 free_irq(maldata->txde_irq, mal);
413 free_irq(maldata->txeob_irq, mal);
414 free_irq(maldata->rxde_irq, mal);
415 free_irq(maldata->rxeob_irq, mal);
416
417 if (mal->tx_virt_addr)
418 dma_free_coherent(&ocpdev->dev,
419 MAL_DT_ALIGN * maldata->num_tx_chans,
420 mal->tx_virt_addr, mal->tx_phys_addr);
421
422 if (mal->rx_virt_addr)
423 dma_free_coherent(&ocpdev->dev,
424 MAL_DT_ALIGN * maldata->num_rx_chans,
425 mal->rx_virt_addr, mal->rx_phys_addr);
426
427 kfree(mal);
428}
429
430/* Structure for a device driver */
431static struct ocp_device_id mal_ids[] = {
432 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL},
433 {.vendor = OCP_VENDOR_INVALID}
434};
435
436static struct ocp_driver mal_driver = {
437 .name = "mal",
438 .id_table = mal_ids,
439
440 .probe = mal_probe,
441 .remove = mal_remove,
442};
443
444static int __init init_mals(void)
445{
446 int rc;
447
448 rc = ocp_register_driver(&mal_driver);
449 if (rc < 0) {
450 ocp_unregister_driver(&mal_driver);
451 return -ENODEV;
452 }
453
454 return 0;
455}
456
457static void __exit exit_mals(void)
458{
459 ocp_unregister_driver(&mal_driver);
460}
461
462module_init(init_mals);
463module_exit(exit_mals);