blob: 8de81ecd3ba6d6f02fcf47f9f75a36b2d5c0646b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
31 *
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
35 *
36 * Things not implemented:
37 * . DMA error recovery
38 *
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42 */
43
44/*
45 * Acknowledgments:
46 *
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
49 *
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
52 *
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
55 *
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
58 *
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
61 *
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
64 *
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
67 *
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
70 *
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
75 *
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
79 *
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
82 *
83 */
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/kernel.h>
86#include <linux/list.h>
87#include <linux/slab.h>
88#include <linux/interrupt.h>
89#include <linux/wait.h>
90#include <linux/errno.h>
91#include <linux/module.h>
92#include <linux/moduleparam.h>
93#include <linux/pci.h>
94#include <linux/fs.h>
95#include <linux/poll.h>
96#include <asm/byteorder.h>
97#include <asm/atomic.h>
98#include <asm/uaccess.h>
99#include <linux/delay.h>
100#include <linux/spinlock.h>
101
102#include <asm/pgtable.h>
103#include <asm/page.h>
104#include <asm/irq.h>
105#include <linux/sched.h>
106#include <linux/types.h>
107#include <linux/vmalloc.h>
108#include <linux/init.h>
109
110#ifdef CONFIG_PPC_PMAC
111#include <asm/machdep.h>
112#include <asm/pmac_feature.h>
113#include <asm/prom.h>
114#include <asm/pci-bridge.h>
115#endif
116
117#include "csr1212.h"
118#include "ieee1394.h"
119#include "ieee1394_types.h"
120#include "hosts.h"
121#include "dma.h"
122#include "iso.h"
123#include "ieee1394_core.h"
124#include "highlevel.h"
125#include "ohci1394.h"
126
127#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
128#define OHCI1394_DEBUG
129#endif
130
131#ifdef DBGMSG
132#undef DBGMSG
133#endif
134
135#ifdef OHCI1394_DEBUG
136#define DBGMSG(fmt, args...) \
137printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
138#else
139#define DBGMSG(fmt, args...)
140#endif
141
142#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
143#define OHCI_DMA_ALLOC(fmt, args...) \
144 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
145 ++global_outstanding_dmas, ## args)
146#define OHCI_DMA_FREE(fmt, args...) \
147 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
148 --global_outstanding_dmas, ## args)
149static int global_outstanding_dmas = 0;
150#else
151#define OHCI_DMA_ALLOC(fmt, args...)
152#define OHCI_DMA_FREE(fmt, args...)
153#endif
154
155/* print general (card independent) information */
156#define PRINT_G(level, fmt, args...) \
157printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
158
159/* print card specific information */
160#define PRINT(level, fmt, args...) \
161printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163/* Module Parameters */
164static int phys_dma = 1;
Ben Collinsfa9b7392006-06-12 18:13:42 -0400165module_param(phys_dma, int, 0444);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
167
168static void dma_trm_tasklet(unsigned long data);
169static void dma_trm_reset(struct dma_trm_ctx *d);
170
171static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
172 enum context_type type, int ctx, int num_desc,
173 int buf_size, int split_buf_size, int context_base);
174static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
175static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
176
177static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
178 enum context_type type, int ctx, int num_desc,
179 int context_base);
180
181static void ohci1394_pci_remove(struct pci_dev *pdev);
182
183#ifndef __LITTLE_ENDIAN
184static unsigned hdr_sizes[] =
185{
186 3, /* TCODE_WRITEQ */
187 4, /* TCODE_WRITEB */
188 3, /* TCODE_WRITE_RESPONSE */
189 0, /* ??? */
190 3, /* TCODE_READQ */
191 4, /* TCODE_READB */
192 3, /* TCODE_READQ_RESPONSE */
193 4, /* TCODE_READB_RESPONSE */
194 1, /* TCODE_CYCLE_START (???) */
195 4, /* TCODE_LOCK_REQUEST */
196 2, /* TCODE_ISO_DATA */
197 4, /* TCODE_LOCK_RESPONSE */
198};
199
200/* Swap headers */
201static inline void packet_swab(quadlet_t *data, int tcode)
202{
203 size_t size = hdr_sizes[tcode];
204
205 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
206 return;
207
208 while (size--)
209 data[size] = swab32(data[size]);
210}
211#else
212/* Don't waste cycles on same sex byte swaps */
213#define packet_swab(w,x)
214#endif /* !LITTLE_ENDIAN */
215
216/***********************************
217 * IEEE-1394 functionality section *
218 ***********************************/
219
220static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
221{
222 int i;
223 unsigned long flags;
224 quadlet_t r;
225
226 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
227
228 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
229
230 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
231 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
232 break;
233
234 mdelay(1);
235 }
236
237 r = reg_read(ohci, OHCI1394_PhyControl);
238
239 if (i >= OHCI_LOOP_COUNT)
240 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
241 r, r & 0x80000000, i);
242
243 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
244
245 return (r & 0x00ff0000) >> 16;
246}
247
248static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
249{
250 int i;
251 unsigned long flags;
252 u32 r = 0;
253
254 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
255
256 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
257
258 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
259 r = reg_read(ohci, OHCI1394_PhyControl);
260 if (!(r & 0x00004000))
261 break;
262
263 mdelay(1);
264 }
265
266 if (i == OHCI_LOOP_COUNT)
267 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
268 r, r & 0x00004000, i);
269
270 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
271
272 return;
273}
274
275/* Or's our value into the current value */
276static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
277{
278 u8 old;
279
280 old = get_phy_reg (ohci, addr);
281 old |= data;
282 set_phy_reg (ohci, addr, old);
283
284 return;
285}
286
287static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
288 int phyid, int isroot)
289{
290 quadlet_t *q = ohci->selfid_buf_cpu;
291 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
292 size_t size;
293 quadlet_t q0, q1;
294
295 /* Check status of self-id reception */
296
297 if (ohci->selfid_swap)
298 q0 = le32_to_cpu(q[0]);
299 else
300 q0 = q[0];
301
302 if ((self_id_count & 0x80000000) ||
303 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
304 PRINT(KERN_ERR,
305 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
306 self_id_count, q0, ohci->self_id_errors);
307
308 /* Tip by James Goodwin <jamesg@Filanet.com>:
309 * We had an error, generate another bus reset in response. */
310 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
311 set_phy_reg_mask (ohci, 1, 0x40);
312 ohci->self_id_errors++;
313 } else {
314 PRINT(KERN_ERR,
315 "Too many errors on SelfID error reception, giving up!");
316 }
317 return;
318 }
319
320 /* SelfID Ok, reset error counter. */
321 ohci->self_id_errors = 0;
322
323 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
324 q++;
325
326 while (size > 0) {
327 if (ohci->selfid_swap) {
328 q0 = le32_to_cpu(q[0]);
329 q1 = le32_to_cpu(q[1]);
330 } else {
331 q0 = q[0];
332 q1 = q[1];
333 }
334
335 if (q0 == ~q1) {
336 DBGMSG ("SelfID packet 0x%x received", q0);
337 hpsb_selfid_received(host, cpu_to_be32(q0));
338 if (((q0 & 0x3f000000) >> 24) == phyid)
339 DBGMSG ("SelfID for this node is 0x%08x", q0);
340 } else {
341 PRINT(KERN_ERR,
342 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
343 }
344 q += 2;
345 size -= 2;
346 }
347
348 DBGMSG("SelfID complete");
349
350 return;
351}
352
353static void ohci_soft_reset(struct ti_ohci *ohci) {
354 int i;
355
356 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
357
358 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
359 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
360 break;
361 mdelay(1);
362 }
363 DBGMSG ("Soft reset finished");
364}
365
366
367/* Generate the dma receive prgs and start the context */
368static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
369{
370 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
371 int i;
372
373 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
374
375 for (i=0; i<d->num_desc; i++) {
376 u32 c;
377
378 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
379 if (generate_irq)
380 c |= DMA_CTL_IRQ;
381
382 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
383
384 /* End of descriptor list? */
385 if (i + 1 < d->num_desc) {
386 d->prg_cpu[i]->branchAddress =
387 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
388 } else {
389 d->prg_cpu[i]->branchAddress =
390 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
391 }
392
393 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
394 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
395 }
396
397 d->buf_ind = 0;
398 d->buf_offset = 0;
399
400 if (d->type == DMA_CTX_ISO) {
401 /* Clear contextControl */
402 reg_write(ohci, d->ctrlClear, 0xffffffff);
403
404 /* Set bufferFill, isochHeader, multichannel for IR context */
405 reg_write(ohci, d->ctrlSet, 0xd0000000);
406
407 /* Set the context match register to match on all tags */
408 reg_write(ohci, d->ctxtMatch, 0xf0000000);
409
410 /* Clear the multi channel mask high and low registers */
411 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
412 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
413
414 /* Set up isoRecvIntMask to generate interrupts */
415 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
416 }
417
418 /* Tell the controller where the first AR program is */
419 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
420
421 /* Run context */
422 reg_write(ohci, d->ctrlSet, 0x00008000);
423
424 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
425}
426
427/* Initialize the dma transmit context */
428static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
429{
430 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
431
432 /* Stop the context */
433 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
434
435 d->prg_ind = 0;
436 d->sent_ind = 0;
437 d->free_prgs = d->num_desc;
438 d->branchAddrPtr = NULL;
439 INIT_LIST_HEAD(&d->fifo_list);
440 INIT_LIST_HEAD(&d->pending_list);
441
442 if (d->type == DMA_CTX_ISO) {
443 /* enable interrupts */
444 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
445 }
446
447 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
448}
449
450/* Count the number of available iso contexts */
451static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
452{
453 int i,ctx=0;
454 u32 tmp;
455
456 reg_write(ohci, reg, 0xffffffff);
457 tmp = reg_read(ohci, reg);
458
459 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
460
461 /* Count the number of contexts */
462 for (i=0; i<32; i++) {
463 if (tmp & 1) ctx++;
464 tmp >>= 1;
465 }
466 return ctx;
467}
468
469/* Global initialization */
470static void ohci_initialize(struct ti_ohci *ohci)
471{
472 char irq_buf[16];
473 quadlet_t buf;
474 int num_ports, i;
475
476 spin_lock_init(&ohci->phy_reg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478 /* Put some defaults to these undefined bus options */
479 buf = reg_read(ohci, OHCI1394_BusOptions);
480 buf |= 0x60000000; /* Enable CMC and ISC */
Ben Collins1934b8b2005-07-09 20:01:23 -0400481 if (hpsb_disable_irm)
482 buf &= ~0x80000000;
483 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 buf |= 0x80000000; /* Enable IRMC */
485 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
486 buf &= ~0x18000000; /* Disable PMC and BMC */
487 reg_write(ohci, OHCI1394_BusOptions, buf);
488
489 /* Set the bus number */
490 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
491
492 /* Enable posted writes */
493 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
494
495 /* Clear link control register */
496 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
497
498 /* Enable cycle timer and cycle master and set the IRM
499 * contender bit in our self ID packets if appropriate. */
500 reg_write(ohci, OHCI1394_LinkControlSet,
501 OHCI1394_LinkControl_CycleTimerEnable |
502 OHCI1394_LinkControl_CycleMaster);
Ben Collins1934b8b2005-07-09 20:01:23 -0400503 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
504 if (hpsb_disable_irm)
505 i &= ~PHY_04_CONTENDER;
506 else
507 i |= PHY_04_CONTENDER;
508 set_phy_reg(ohci, 4, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510 /* Set up self-id dma buffer */
511 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
512
513 /* enable self-id and phys */
514 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
515 OHCI1394_LinkControl_RcvPhyPkt);
516
517 /* Set the Config ROM mapping register */
518 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
519
520 /* Now get our max packet size */
521 ohci->max_packet_size =
522 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
523
524 /* Don't accept phy packets into AR request context */
525 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
526
527 /* Clear the interrupt mask */
528 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
529 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
530
531 /* Clear the interrupt mask */
532 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
533 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
534
535 /* Initialize AR dma */
536 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
537 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
538
539 /* Initialize AT dma */
540 initialize_dma_trm_ctx(&ohci->at_req_context);
541 initialize_dma_trm_ctx(&ohci->at_resp_context);
542
Jody McIntyree4ec0f22005-04-21 14:09:42 -0700543 /* Initialize IR Legacy DMA channel mask */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 ohci->ir_legacy_channels = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
Stefan Richter180a4302006-03-28 19:57:34 -0500546 /* Accept AR requests from all nodes */
547 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
548
549 /* Set the address range of the physical response unit.
550 * Most controllers do not implement it as a writable register though.
551 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
552 * register content.
553 * To actually enable physical responses is the job of our interrupt
554 * handler which programs the physical request filter. */
Ben Collins4611ed32006-06-12 18:13:32 -0400555 reg_write(ohci, OHCI1394_PhyUpperBound,
556 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
Stefan Richter180a4302006-03-28 19:57:34 -0500557
558 DBGMSG("physUpperBoundOffset=%08x",
559 reg_read(ohci, OHCI1394_PhyUpperBound));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561 /* Specify AT retries */
562 reg_write(ohci, OHCI1394_ATRetries,
563 OHCI1394_MAX_AT_REQ_RETRIES |
564 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
565 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
566
567 /* We don't want hardware swapping */
568 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
569
570 /* Enable interrupts */
571 reg_write(ohci, OHCI1394_IntMaskSet,
572 OHCI1394_unrecoverableError |
573 OHCI1394_masterIntEnable |
574 OHCI1394_busReset |
575 OHCI1394_selfIDComplete |
576 OHCI1394_RSPkt |
577 OHCI1394_RQPkt |
578 OHCI1394_respTxComplete |
579 OHCI1394_reqTxComplete |
580 OHCI1394_isochRx |
581 OHCI1394_isochTx |
Jody McIntyree2f81652006-03-28 19:55:11 -0500582 OHCI1394_postedWriteErr |
Ben Collins57fdb582006-06-12 18:12:21 -0400583 OHCI1394_cycleTooLong |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 OHCI1394_cycleInconsistent);
585
586 /* Enable link */
587 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
588
589 buf = reg_read(ohci, OHCI1394_Version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 sprintf (irq_buf, "%d", ohci->dev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
Greg Kroah-Hartmane29419f2006-06-12 15:20:16 -0700592 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
594 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
Greg Kroah-Hartmane29419f2006-06-12 15:20:16 -0700595 (unsigned long long)pci_resource_start(ohci->dev, 0),
596 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
Stefan Richter209171a2005-12-13 11:05:00 -0500597 ohci->max_packet_size,
598 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 /* Check all of our ports to make sure that if anything is
601 * connected, we enable that port. */
602 num_ports = get_phy_reg(ohci, 2) & 0xf;
603 for (i = 0; i < num_ports; i++) {
604 unsigned int status;
605
606 set_phy_reg(ohci, 7, i);
607 status = get_phy_reg(ohci, 8);
608
609 if (status & 0x20)
610 set_phy_reg(ohci, 8, status & ~1);
611 }
612
613 /* Serial EEPROM Sanity check. */
614 if ((ohci->max_packet_size < 512) ||
615 (ohci->max_packet_size > 4096)) {
616 /* Serial EEPROM contents are suspect, set a sane max packet
617 * size and print the raw contents for bug reports if verbose
618 * debug is enabled. */
619#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
620 int i;
621#endif
622
623 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
624 "attempting to setting max_packet_size to 512 bytes");
625 reg_write(ohci, OHCI1394_BusOptions,
626 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
627 ohci->max_packet_size = 512;
628#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
629 PRINT(KERN_DEBUG, " EEPROM Present: %d",
630 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
631 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
632
633 for (i = 0;
634 ((i < 1000) &&
635 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
636 udelay(10);
637
638 for (i = 0; i < 0x20; i++) {
639 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
640 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
641 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
642 }
643#endif
644 }
645}
646
647/*
648 * Insert a packet in the DMA fifo and generate the DMA prg
649 * FIXME: rewrite the program in order to accept packets crossing
650 * page boundaries.
651 * check also that a single dma descriptor doesn't cross a
652 * page boundary.
653 */
654static void insert_packet(struct ti_ohci *ohci,
655 struct dma_trm_ctx *d, struct hpsb_packet *packet)
656{
657 u32 cycleTimer;
658 int idx = d->prg_ind;
659
660 DBGMSG("Inserting packet for node " NODE_BUS_FMT
661 ", tlabel=%d, tcode=0x%x, speed=%d",
662 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
663 packet->tcode, packet->speed_code);
664
665 d->prg_cpu[idx]->begin.address = 0;
666 d->prg_cpu[idx]->begin.branchAddress = 0;
667
668 if (d->type == DMA_CTX_ASYNC_RESP) {
669 /*
670 * For response packets, we need to put a timeout value in
671 * the 16 lower bits of the status... let's try 1 sec timeout
672 */
673 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
674 d->prg_cpu[idx]->begin.status = cpu_to_le32(
675 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
676 ((cycleTimer&0x01fff000)>>12));
677
678 DBGMSG("cycleTimer: %08x timeStamp: %08x",
679 cycleTimer, d->prg_cpu[idx]->begin.status);
680 } else
681 d->prg_cpu[idx]->begin.status = 0;
682
683 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
684
685 if (packet->type == hpsb_raw) {
686 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
687 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
688 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
689 } else {
690 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
691 (packet->header[0] & 0xFFFF);
692
693 if (packet->tcode == TCODE_ISO_DATA) {
694 /* Sending an async stream packet */
695 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
696 } else {
697 /* Sending a normal async request or response */
698 d->prg_cpu[idx]->data[1] =
699 (packet->header[1] & 0xFFFF) |
700 (packet->header[0] & 0xFFFF0000);
701 d->prg_cpu[idx]->data[2] = packet->header[2];
702 d->prg_cpu[idx]->data[3] = packet->header[3];
703 }
704 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
705 }
706
707 if (packet->data_size) { /* block transmit */
708 if (packet->tcode == TCODE_STREAM_DATA){
709 d->prg_cpu[idx]->begin.control =
710 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
711 DMA_CTL_IMMEDIATE | 0x8);
712 } else {
713 d->prg_cpu[idx]->begin.control =
714 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
715 DMA_CTL_IMMEDIATE | 0x10);
716 }
717 d->prg_cpu[idx]->end.control =
718 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
719 DMA_CTL_IRQ |
720 DMA_CTL_BRANCH |
721 packet->data_size);
722 /*
723 * Check that the packet data buffer
724 * does not cross a page boundary.
725 *
726 * XXX Fix this some day. eth1394 seems to trigger
727 * it, but ignoring it doesn't seem to cause a
728 * problem.
729 */
730#if 0
731 if (cross_bound((unsigned long)packet->data,
732 packet->data_size)>0) {
733 /* FIXME: do something about it */
734 PRINT(KERN_ERR,
735 "%s: packet data addr: %p size %Zd bytes "
736 "cross page boundary", __FUNCTION__,
737 packet->data, packet->data_size);
738 }
739#endif
740 d->prg_cpu[idx]->end.address = cpu_to_le32(
741 pci_map_single(ohci->dev, packet->data,
742 packet->data_size,
743 PCI_DMA_TODEVICE));
744 OHCI_DMA_ALLOC("single, block transmit packet");
745
746 d->prg_cpu[idx]->end.branchAddress = 0;
747 d->prg_cpu[idx]->end.status = 0;
748 if (d->branchAddrPtr)
749 *(d->branchAddrPtr) =
750 cpu_to_le32(d->prg_bus[idx] | 0x3);
751 d->branchAddrPtr =
752 &(d->prg_cpu[idx]->end.branchAddress);
753 } else { /* quadlet transmit */
754 if (packet->type == hpsb_raw)
755 d->prg_cpu[idx]->begin.control =
756 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
757 DMA_CTL_IMMEDIATE |
758 DMA_CTL_IRQ |
759 DMA_CTL_BRANCH |
760 (packet->header_size + 4));
761 else
762 d->prg_cpu[idx]->begin.control =
763 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
764 DMA_CTL_IMMEDIATE |
765 DMA_CTL_IRQ |
766 DMA_CTL_BRANCH |
767 packet->header_size);
768
769 if (d->branchAddrPtr)
770 *(d->branchAddrPtr) =
771 cpu_to_le32(d->prg_bus[idx] | 0x2);
772 d->branchAddrPtr =
773 &(d->prg_cpu[idx]->begin.branchAddress);
774 }
775
776 } else { /* iso packet */
777 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
778 (packet->header[0] & 0xFFFF);
779 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
780 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
781
782 d->prg_cpu[idx]->begin.control =
783 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
784 DMA_CTL_IMMEDIATE | 0x8);
785 d->prg_cpu[idx]->end.control =
786 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
787 DMA_CTL_UPDATE |
788 DMA_CTL_IRQ |
789 DMA_CTL_BRANCH |
790 packet->data_size);
791 d->prg_cpu[idx]->end.address = cpu_to_le32(
792 pci_map_single(ohci->dev, packet->data,
793 packet->data_size, PCI_DMA_TODEVICE));
794 OHCI_DMA_ALLOC("single, iso transmit packet");
795
796 d->prg_cpu[idx]->end.branchAddress = 0;
797 d->prg_cpu[idx]->end.status = 0;
798 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
799 " begin=%08x %08x %08x %08x\n"
800 " %08x %08x %08x %08x\n"
801 " end =%08x %08x %08x %08x",
802 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
803 d->prg_cpu[idx]->begin.control,
804 d->prg_cpu[idx]->begin.address,
805 d->prg_cpu[idx]->begin.branchAddress,
806 d->prg_cpu[idx]->begin.status,
807 d->prg_cpu[idx]->data[0],
808 d->prg_cpu[idx]->data[1],
809 d->prg_cpu[idx]->data[2],
810 d->prg_cpu[idx]->data[3],
811 d->prg_cpu[idx]->end.control,
812 d->prg_cpu[idx]->end.address,
813 d->prg_cpu[idx]->end.branchAddress,
814 d->prg_cpu[idx]->end.status);
815 if (d->branchAddrPtr)
816 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
817 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
818 }
819 d->free_prgs--;
820
821 /* queue the packet in the appropriate context queue */
822 list_add_tail(&packet->driver_list, &d->fifo_list);
823 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
824}
825
826/*
827 * This function fills the FIFO with the (eventual) pending packets
828 * and runs or wakes up the DMA prg if necessary.
829 *
830 * The function MUST be called with the d->lock held.
831 */
832static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
833{
834 struct hpsb_packet *packet, *ptmp;
835 int idx = d->prg_ind;
836 int z = 0;
837
838 /* insert the packets into the dma fifo */
839 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
840 if (!d->free_prgs)
841 break;
842
843 /* For the first packet only */
844 if (!z)
845 z = (packet->data_size) ? 3 : 2;
846
847 /* Insert the packet */
848 list_del_init(&packet->driver_list);
849 insert_packet(ohci, d, packet);
850 }
851
852 /* Nothing must have been done, either no free_prgs or no packets */
853 if (z == 0)
854 return;
855
856 /* Is the context running ? (should be unless it is
857 the first packet to be sent in this context) */
858 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
859 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
860
861 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
862 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
863
864 /* Check that the node id is valid, and not 63 */
865 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
866 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
867 else
868 reg_write(ohci, d->ctrlSet, 0x8000);
869 } else {
870 /* Wake up the dma context if necessary */
871 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
872 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
873
874 /* do this always, to avoid race condition */
875 reg_write(ohci, d->ctrlSet, 0x1000);
876 }
877
878 return;
879}
880
881/* Transmission of an async or iso packet */
882static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
883{
884 struct ti_ohci *ohci = host->hostdata;
885 struct dma_trm_ctx *d;
886 unsigned long flags;
887
888 if (packet->data_size > ohci->max_packet_size) {
889 PRINT(KERN_ERR,
890 "Transmit packet size %Zd is too big",
891 packet->data_size);
892 return -EOVERFLOW;
893 }
894
895 /* Decide whether we have an iso, a request, or a response packet */
896 if (packet->type == hpsb_raw)
897 d = &ohci->at_req_context;
898 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
899 /* The legacy IT DMA context is initialized on first
900 * use. However, the alloc cannot be run from
901 * interrupt context, so we bail out if that is the
902 * case. I don't see anyone sending ISO packets from
903 * interrupt context anyway... */
904
905 if (ohci->it_legacy_context.ohci == NULL) {
906 if (in_interrupt()) {
907 PRINT(KERN_ERR,
908 "legacy IT context cannot be initialized during interrupt");
909 return -EINVAL;
910 }
911
912 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
913 DMA_CTX_ISO, 0, IT_NUM_DESC,
914 OHCI1394_IsoXmitContextBase) < 0) {
915 PRINT(KERN_ERR,
916 "error initializing legacy IT context");
917 return -ENOMEM;
918 }
919
920 initialize_dma_trm_ctx(&ohci->it_legacy_context);
921 }
922
923 d = &ohci->it_legacy_context;
924 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
925 d = &ohci->at_resp_context;
926 else
927 d = &ohci->at_req_context;
928
929 spin_lock_irqsave(&d->lock,flags);
930
931 list_add_tail(&packet->driver_list, &d->pending_list);
932
933 dma_trm_flush(ohci, d);
934
935 spin_unlock_irqrestore(&d->lock,flags);
936
937 return 0;
938}
939
940static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
941{
942 struct ti_ohci *ohci = host->hostdata;
943 int retval = 0;
944 unsigned long flags;
945 int phy_reg;
946
947 switch (cmd) {
948 case RESET_BUS:
949 switch (arg) {
950 case SHORT_RESET:
951 phy_reg = get_phy_reg(ohci, 5);
952 phy_reg |= 0x40;
953 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
954 break;
955 case LONG_RESET:
956 phy_reg = get_phy_reg(ohci, 1);
957 phy_reg |= 0x40;
958 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
959 break;
960 case SHORT_RESET_NO_FORCE_ROOT:
961 phy_reg = get_phy_reg(ohci, 1);
962 if (phy_reg & 0x80) {
963 phy_reg &= ~0x80;
964 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
965 }
966
967 phy_reg = get_phy_reg(ohci, 5);
968 phy_reg |= 0x40;
969 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
970 break;
971 case LONG_RESET_NO_FORCE_ROOT:
972 phy_reg = get_phy_reg(ohci, 1);
973 phy_reg &= ~0x80;
974 phy_reg |= 0x40;
975 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
976 break;
977 case SHORT_RESET_FORCE_ROOT:
978 phy_reg = get_phy_reg(ohci, 1);
979 if (!(phy_reg & 0x80)) {
980 phy_reg |= 0x80;
981 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
982 }
983
984 phy_reg = get_phy_reg(ohci, 5);
985 phy_reg |= 0x40;
986 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
987 break;
988 case LONG_RESET_FORCE_ROOT:
989 phy_reg = get_phy_reg(ohci, 1);
990 phy_reg |= 0xc0;
991 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
992 break;
993 default:
994 retval = -1;
995 }
996 break;
997
998 case GET_CYCLE_COUNTER:
999 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1000 break;
1001
1002 case SET_CYCLE_COUNTER:
1003 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1004 break;
1005
1006 case SET_BUS_ID:
1007 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1008 break;
1009
1010 case ACT_CYCLE_MASTER:
1011 if (arg) {
1012 /* check if we are root and other nodes are present */
1013 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1014 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1015 /*
1016 * enable cycleTimer, cycleMaster
1017 */
1018 DBGMSG("Cycle master enabled");
1019 reg_write(ohci, OHCI1394_LinkControlSet,
1020 OHCI1394_LinkControl_CycleTimerEnable |
1021 OHCI1394_LinkControl_CycleMaster);
1022 }
1023 } else {
1024 /* disable cycleTimer, cycleMaster, cycleSource */
1025 reg_write(ohci, OHCI1394_LinkControlClear,
1026 OHCI1394_LinkControl_CycleTimerEnable |
1027 OHCI1394_LinkControl_CycleMaster |
1028 OHCI1394_LinkControl_CycleSource);
1029 }
1030 break;
1031
1032 case CANCEL_REQUESTS:
1033 DBGMSG("Cancel request received");
1034 dma_trm_reset(&ohci->at_req_context);
1035 dma_trm_reset(&ohci->at_resp_context);
1036 break;
1037
1038 case ISO_LISTEN_CHANNEL:
1039 {
1040 u64 mask;
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001041 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1042 int ir_legacy_active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
1044 if (arg<0 || arg>63) {
1045 PRINT(KERN_ERR,
1046 "%s: IS0 listen channel %d is out of range",
1047 __FUNCTION__, arg);
1048 return -EFAULT;
1049 }
1050
1051 mask = (u64)0x1<<arg;
1052
1053 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1054
1055 if (ohci->ISO_channel_usage & mask) {
1056 PRINT(KERN_ERR,
1057 "%s: IS0 listen channel %d is already used",
1058 __FUNCTION__, arg);
1059 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1060 return -EFAULT;
1061 }
1062
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001063 ir_legacy_active = ohci->ir_legacy_channels;
1064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 ohci->ISO_channel_usage |= mask;
1066 ohci->ir_legacy_channels |= mask;
1067
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001068 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1069
1070 if (!ir_legacy_active) {
1071 if (ohci1394_register_iso_tasklet(ohci,
1072 &ohci->ir_legacy_tasklet) < 0) {
1073 PRINT(KERN_ERR, "No IR DMA context available");
1074 return -EBUSY;
1075 }
1076
1077 /* the IR context can be assigned to any DMA context
1078 * by ohci1394_register_iso_tasklet */
1079 d->ctx = ohci->ir_legacy_tasklet.context;
1080 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1081 32*d->ctx;
1082 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1083 32*d->ctx;
1084 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1085 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1086
1087 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1088
Olaf Hering98848fa2005-07-14 00:33:45 -07001089 if (printk_ratelimit())
Jody McIntyre32e7a042005-09-30 11:59:19 -07001090 DBGMSG("IR legacy activated");
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001091 }
1092
1093 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1094
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 if (arg>31)
1096 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1097 1<<(arg-32));
1098 else
1099 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1100 1<<arg);
1101
1102 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1103 DBGMSG("Listening enabled on channel %d", arg);
1104 break;
1105 }
1106 case ISO_UNLISTEN_CHANNEL:
1107 {
1108 u64 mask;
1109
1110 if (arg<0 || arg>63) {
1111 PRINT(KERN_ERR,
1112 "%s: IS0 unlisten channel %d is out of range",
1113 __FUNCTION__, arg);
1114 return -EFAULT;
1115 }
1116
1117 mask = (u64)0x1<<arg;
1118
1119 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1120
1121 if (!(ohci->ISO_channel_usage & mask)) {
1122 PRINT(KERN_ERR,
1123 "%s: IS0 unlisten channel %d is not used",
1124 __FUNCTION__, arg);
1125 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1126 return -EFAULT;
1127 }
1128
1129 ohci->ISO_channel_usage &= ~mask;
1130 ohci->ir_legacy_channels &= ~mask;
1131
1132 if (arg>31)
1133 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1134 1<<(arg-32));
1135 else
1136 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1137 1<<arg);
1138
1139 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1140 DBGMSG("Listening disabled on channel %d", arg);
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001141
1142 if (ohci->ir_legacy_channels == 0) {
1143 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1144 DBGMSG("ISO legacy receive context stopped");
1145 }
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 break;
1148 }
1149 default:
1150 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1151 cmd);
1152 break;
1153 }
1154 return retval;
1155}
1156
1157/***********************************
1158 * rawiso ISO reception *
1159 ***********************************/
1160
1161/*
1162 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1163 buffer is split into "blocks" (regions described by one DMA
1164 descriptor). Each block must be one page or less in size, and
1165 must not cross a page boundary.
1166
1167 There is one little wrinkle with buffer-fill mode: a packet that
1168 starts in the final block may wrap around into the first block. But
1169 the user API expects all packets to be contiguous. Our solution is
1170 to keep the very last page of the DMA buffer in reserve - if a
1171 packet spans the gap, we copy its tail into this page.
1172*/
1173
1174struct ohci_iso_recv {
1175 struct ti_ohci *ohci;
1176
1177 struct ohci1394_iso_tasklet task;
1178 int task_active;
1179
1180 enum { BUFFER_FILL_MODE = 0,
1181 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1182
1183 /* memory and PCI mapping for the DMA descriptors */
1184 struct dma_prog_region prog;
1185 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1186
1187 /* how many DMA blocks fit in the buffer */
1188 unsigned int nblocks;
1189
1190 /* stride of DMA blocks */
1191 unsigned int buf_stride;
1192
1193 /* number of blocks to batch between interrupts */
1194 int block_irq_interval;
1195
1196 /* block that DMA will finish next */
1197 int block_dma;
1198
1199 /* (buffer-fill only) block that the reader will release next */
1200 int block_reader;
1201
1202 /* (buffer-fill only) bytes of buffer the reader has released,
1203 less than one block */
1204 int released_bytes;
1205
1206 /* (buffer-fill only) buffer offset at which the next packet will appear */
1207 int dma_offset;
1208
1209 /* OHCI DMA context control registers */
1210 u32 ContextControlSet;
1211 u32 ContextControlClear;
1212 u32 CommandPtr;
1213 u32 ContextMatch;
1214};
1215
1216static void ohci_iso_recv_task(unsigned long data);
1217static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1218static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1219static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1220static void ohci_iso_recv_program(struct hpsb_iso *iso);
1221
1222static int ohci_iso_recv_init(struct hpsb_iso *iso)
1223{
1224 struct ti_ohci *ohci = iso->host->hostdata;
1225 struct ohci_iso_recv *recv;
1226 int ctx;
1227 int ret = -ENOMEM;
1228
1229 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1230 if (!recv)
1231 return -ENOMEM;
1232
1233 iso->hostdata = recv;
1234 recv->ohci = ohci;
1235 recv->task_active = 0;
1236 dma_prog_region_init(&recv->prog);
1237 recv->block = NULL;
1238
1239 /* use buffer-fill mode, unless irq_interval is 1
1240 (note: multichannel requires buffer-fill) */
1241
1242 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1243 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1244 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1245 } else {
1246 recv->dma_mode = BUFFER_FILL_MODE;
1247 }
1248
1249 /* set nblocks, buf_stride, block_irq_interval */
1250
1251 if (recv->dma_mode == BUFFER_FILL_MODE) {
1252 recv->buf_stride = PAGE_SIZE;
1253
1254 /* one block per page of data in the DMA buffer, minus the final guard page */
1255 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1256 if (recv->nblocks < 3) {
1257 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1258 goto err;
1259 }
1260
1261 /* iso->irq_interval is in packets - translate that to blocks */
1262 if (iso->irq_interval == 1)
1263 recv->block_irq_interval = 1;
1264 else
1265 recv->block_irq_interval = iso->irq_interval *
1266 ((recv->nblocks+1)/iso->buf_packets);
1267 if (recv->block_irq_interval*4 > recv->nblocks)
1268 recv->block_irq_interval = recv->nblocks/4;
1269 if (recv->block_irq_interval < 1)
1270 recv->block_irq_interval = 1;
1271
1272 } else {
1273 int max_packet_size;
1274
1275 recv->nblocks = iso->buf_packets;
1276 recv->block_irq_interval = iso->irq_interval;
1277 if (recv->block_irq_interval * 4 > iso->buf_packets)
1278 recv->block_irq_interval = iso->buf_packets / 4;
1279 if (recv->block_irq_interval < 1)
1280 recv->block_irq_interval = 1;
1281
1282 /* choose a buffer stride */
1283 /* must be a power of 2, and <= PAGE_SIZE */
1284
1285 max_packet_size = iso->buf_size / iso->buf_packets;
1286
1287 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1288 recv->buf_stride *= 2);
1289
1290 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1291 recv->buf_stride > PAGE_SIZE) {
1292 /* this shouldn't happen, but anyway... */
1293 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1294 goto err;
1295 }
1296 }
1297
1298 recv->block_reader = 0;
1299 recv->released_bytes = 0;
1300 recv->block_dma = 0;
1301 recv->dma_offset = 0;
1302
1303 /* size of DMA program = one descriptor per block */
1304 if (dma_prog_region_alloc(&recv->prog,
1305 sizeof(struct dma_cmd) * recv->nblocks,
1306 recv->ohci->dev))
1307 goto err;
1308
1309 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1310
1311 ohci1394_init_iso_tasklet(&recv->task,
1312 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1313 OHCI_ISO_RECEIVE,
1314 ohci_iso_recv_task, (unsigned long) iso);
1315
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001316 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1317 ret = -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 goto err;
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001319 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 recv->task_active = 1;
1322
1323 /* recv context registers are spaced 32 bytes apart */
1324 ctx = recv->task.context;
1325 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1326 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1327 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1328 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1329
1330 if (iso->channel == -1) {
1331 /* clear multi-channel selection mask */
1332 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1333 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1334 }
1335
1336 /* write the DMA program */
1337 ohci_iso_recv_program(iso);
1338
1339 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1340 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1341 recv->dma_mode == BUFFER_FILL_MODE ?
1342 "buffer-fill" : "packet-per-buffer",
1343 iso->buf_size/PAGE_SIZE, iso->buf_size,
1344 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1345
1346 return 0;
1347
1348err:
1349 ohci_iso_recv_shutdown(iso);
1350 return ret;
1351}
1352
1353static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1354{
1355 struct ohci_iso_recv *recv = iso->hostdata;
1356
1357 /* disable interrupts */
1358 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1359
1360 /* halt DMA */
1361 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1362}
1363
1364static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1365{
1366 struct ohci_iso_recv *recv = iso->hostdata;
1367
1368 if (recv->task_active) {
1369 ohci_iso_recv_stop(iso);
1370 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1371 recv->task_active = 0;
1372 }
1373
1374 dma_prog_region_free(&recv->prog);
1375 kfree(recv);
1376 iso->hostdata = NULL;
1377}
1378
1379/* set up a "gapped" ring buffer DMA program */
1380static void ohci_iso_recv_program(struct hpsb_iso *iso)
1381{
1382 struct ohci_iso_recv *recv = iso->hostdata;
1383 int blk;
1384
1385 /* address of 'branch' field in previous DMA descriptor */
1386 u32 *prev_branch = NULL;
1387
1388 for (blk = 0; blk < recv->nblocks; blk++) {
1389 u32 control;
1390
1391 /* the DMA descriptor */
1392 struct dma_cmd *cmd = &recv->block[blk];
1393
1394 /* offset of the DMA descriptor relative to the DMA prog buffer */
1395 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1396
1397 /* offset of this packet's data within the DMA buffer */
1398 unsigned long buf_offset = blk * recv->buf_stride;
1399
1400 if (recv->dma_mode == BUFFER_FILL_MODE) {
1401 control = 2 << 28; /* INPUT_MORE */
1402 } else {
1403 control = 3 << 28; /* INPUT_LAST */
1404 }
1405
1406 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1407
1408 /* interrupt on last block, and at intervals */
1409 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1410 control |= 3 << 20; /* want interrupt */
1411 }
1412
1413 control |= 3 << 18; /* enable branch to address */
1414 control |= recv->buf_stride;
1415
1416 cmd->control = cpu_to_le32(control);
1417 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1418 cmd->branchAddress = 0; /* filled in on next loop */
1419 cmd->status = cpu_to_le32(recv->buf_stride);
1420
1421 /* link the previous descriptor to this one */
1422 if (prev_branch) {
1423 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1424 }
1425
1426 prev_branch = &cmd->branchAddress;
1427 }
1428
1429 /* the final descriptor's branch address and Z should be left at 0 */
1430}
1431
1432/* listen or unlisten to a specific channel (multi-channel mode only) */
1433static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1434{
1435 struct ohci_iso_recv *recv = iso->hostdata;
1436 int reg, i;
1437
1438 if (channel < 32) {
1439 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1440 i = channel;
1441 } else {
1442 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1443 i = channel - 32;
1444 }
1445
1446 reg_write(recv->ohci, reg, (1 << i));
1447
1448 /* issue a dummy read to force all PCI writes to be posted immediately */
1449 mb();
1450 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1451}
1452
1453static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1454{
1455 struct ohci_iso_recv *recv = iso->hostdata;
1456 int i;
1457
1458 for (i = 0; i < 64; i++) {
1459 if (mask & (1ULL << i)) {
1460 if (i < 32)
1461 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1462 else
1463 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1464 } else {
1465 if (i < 32)
1466 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1467 else
1468 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1469 }
1470 }
1471
1472 /* issue a dummy read to force all PCI writes to be posted immediately */
1473 mb();
1474 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1475}
1476
1477static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1478{
1479 struct ohci_iso_recv *recv = iso->hostdata;
1480 struct ti_ohci *ohci = recv->ohci;
1481 u32 command, contextMatch;
1482
1483 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1484 wmb();
1485
1486 /* always keep ISO headers */
1487 command = (1 << 30);
1488
1489 if (recv->dma_mode == BUFFER_FILL_MODE)
1490 command |= (1 << 31);
1491
1492 reg_write(recv->ohci, recv->ContextControlSet, command);
1493
1494 /* match on specified tags */
1495 contextMatch = tag_mask << 28;
1496
1497 if (iso->channel == -1) {
1498 /* enable multichannel reception */
1499 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1500 } else {
1501 /* listen on channel */
1502 contextMatch |= iso->channel;
1503 }
1504
1505 if (cycle != -1) {
1506 u32 seconds;
1507
1508 /* enable cycleMatch */
1509 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1510
1511 /* set starting cycle */
1512 cycle &= 0x1FFF;
1513
1514 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1515 just snarf them from the current time */
1516 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1517
1518 /* advance one second to give some extra time for DMA to start */
1519 seconds += 1;
1520
1521 cycle |= (seconds & 3) << 13;
1522
1523 contextMatch |= cycle << 12;
1524 }
1525
1526 if (sync != -1) {
1527 /* set sync flag on first DMA descriptor */
1528 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1529 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1530
1531 /* match sync field */
1532 contextMatch |= (sync&0xf)<<8;
1533 }
1534
1535 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1536
1537 /* address of first descriptor block */
1538 command = dma_prog_region_offset_to_bus(&recv->prog,
1539 recv->block_dma * sizeof(struct dma_cmd));
1540 command |= 1; /* Z=1 */
1541
1542 reg_write(recv->ohci, recv->CommandPtr, command);
1543
1544 /* enable interrupts */
1545 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1546
1547 wmb();
1548
1549 /* run */
1550 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1551
1552 /* issue a dummy read of the cycle timer register to force
1553 all PCI writes to be posted immediately */
1554 mb();
1555 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1556
1557 /* check RUN */
1558 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1559 PRINT(KERN_ERR,
1560 "Error starting IR DMA (ContextControl 0x%08x)\n",
1561 reg_read(recv->ohci, recv->ContextControlSet));
1562 return -1;
1563 }
1564
1565 return 0;
1566}
1567
1568static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1569{
1570 /* re-use the DMA descriptor for the block */
1571 /* by linking the previous descriptor to it */
1572
1573 int next_i = block;
1574 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1575
1576 struct dma_cmd *next = &recv->block[next_i];
1577 struct dma_cmd *prev = &recv->block[prev_i];
Ben Collins1934b8b2005-07-09 20:01:23 -04001578
1579 /* ignore out-of-range requests */
1580 if ((block < 0) || (block > recv->nblocks))
1581 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583 /* 'next' becomes the new end of the DMA chain,
1584 so disable branch and enable interrupt */
1585 next->branchAddress = 0;
1586 next->control |= cpu_to_le32(3 << 20);
1587 next->status = cpu_to_le32(recv->buf_stride);
1588
1589 /* link prev to next */
1590 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1591 sizeof(struct dma_cmd) * next_i)
1592 | 1); /* Z=1 */
1593
1594 /* disable interrupt on previous DMA descriptor, except at intervals */
1595 if ((prev_i % recv->block_irq_interval) == 0) {
1596 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1597 } else {
1598 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1599 }
1600 wmb();
1601
1602 /* wake up DMA in case it fell asleep */
1603 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1604}
1605
1606static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1607 struct hpsb_iso_packet_info *info)
1608{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 /* release the memory where the packet was */
Ben Collins1934b8b2005-07-09 20:01:23 -04001610 recv->released_bytes += info->total_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
1612 /* have we released enough memory for one block? */
1613 while (recv->released_bytes > recv->buf_stride) {
1614 ohci_iso_recv_release_block(recv, recv->block_reader);
1615 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1616 recv->released_bytes -= recv->buf_stride;
1617 }
1618}
1619
1620static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1621{
1622 struct ohci_iso_recv *recv = iso->hostdata;
1623 if (recv->dma_mode == BUFFER_FILL_MODE) {
1624 ohci_iso_recv_bufferfill_release(recv, info);
1625 } else {
1626 ohci_iso_recv_release_block(recv, info - iso->infos);
1627 }
1628}
1629
1630/* parse all packets from blocks that have been fully received */
1631static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1632{
1633 int wake = 0;
1634 int runaway = 0;
1635 struct ti_ohci *ohci = recv->ohci;
1636
1637 while (1) {
1638 /* we expect the next parsable packet to begin at recv->dma_offset */
1639 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1640
1641 unsigned int offset;
Ben Collins1934b8b2005-07-09 20:01:23 -04001642 unsigned short len, cycle, total_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 unsigned char channel, tag, sy;
1644
1645 unsigned char *p = iso->data_buf.kvirt;
1646
1647 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1648
1649 /* don't loop indefinitely */
1650 if (runaway++ > 100000) {
1651 atomic_inc(&iso->overflows);
1652 PRINT(KERN_ERR,
1653 "IR DMA error - Runaway during buffer parsing!\n");
1654 break;
1655 }
1656
1657 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1658 if (this_block == recv->block_dma)
1659 break;
1660
1661 wake = 1;
1662
1663 /* parse data length, tag, channel, and sy */
1664
1665 /* note: we keep our own local copies of 'len' and 'offset'
1666 so the user can't mess with them by poking in the mmap area */
1667
1668 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1669
1670 if (len > 4096) {
1671 PRINT(KERN_ERR,
1672 "IR DMA error - bogus 'len' value %u\n", len);
1673 }
1674
1675 channel = p[recv->dma_offset+1] & 0x3F;
1676 tag = p[recv->dma_offset+1] >> 6;
1677 sy = p[recv->dma_offset+0] & 0xF;
1678
1679 /* advance to data payload */
1680 recv->dma_offset += 4;
1681
1682 /* check for wrap-around */
1683 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1684 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1685 }
1686
1687 /* dma_offset now points to the first byte of the data payload */
1688 offset = recv->dma_offset;
1689
1690 /* advance to xferStatus/timeStamp */
1691 recv->dma_offset += len;
1692
Ben Collins1934b8b2005-07-09 20:01:23 -04001693 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 /* payload is padded to 4 bytes */
1695 if (len % 4) {
1696 recv->dma_offset += 4 - (len%4);
Ben Collins1934b8b2005-07-09 20:01:23 -04001697 total_len += 4 - (len%4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 }
1699
1700 /* check for wrap-around */
1701 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1702 /* uh oh, the packet data wraps from the last
1703 to the first DMA block - make the packet
1704 contiguous by copying its "tail" into the
1705 guard page */
1706
1707 int guard_off = recv->buf_stride*recv->nblocks;
1708 int tail_len = len - (guard_off - offset);
1709
1710 if (tail_len > 0 && tail_len < recv->buf_stride) {
1711 memcpy(iso->data_buf.kvirt + guard_off,
1712 iso->data_buf.kvirt,
1713 tail_len);
1714 }
1715
1716 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1717 }
1718
1719 /* parse timestamp */
1720 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1721 cycle &= 0x1FFF;
1722
1723 /* advance to next packet */
1724 recv->dma_offset += 4;
1725
1726 /* check for wrap-around */
1727 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1728 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1729 }
1730
Ben Collins1934b8b2005-07-09 20:01:23 -04001731 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 }
1733
1734 if (wake)
1735 hpsb_iso_wake(iso);
1736}
1737
1738static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1739{
1740 int loop;
1741 struct ti_ohci *ohci = recv->ohci;
1742
1743 /* loop over all blocks */
1744 for (loop = 0; loop < recv->nblocks; loop++) {
1745
1746 /* check block_dma to see if it's done */
1747 struct dma_cmd *im = &recv->block[recv->block_dma];
1748
1749 /* check the DMA descriptor for new writes to xferStatus */
1750 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1751
1752 /* rescount is the number of bytes *remaining to be written* in the block */
1753 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1754
1755 unsigned char event = xferstatus & 0x1F;
1756
1757 if (!event) {
1758 /* nothing has happened to this block yet */
1759 break;
1760 }
1761
1762 if (event != 0x11) {
1763 atomic_inc(&iso->overflows);
1764 PRINT(KERN_ERR,
1765 "IR DMA error - OHCI error code 0x%02x\n", event);
1766 }
1767
1768 if (rescount != 0) {
1769 /* the card is still writing to this block;
1770 we can't touch it until it's done */
1771 break;
1772 }
1773
1774 /* OK, the block is finished... */
1775
1776 /* sync our view of the block */
1777 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1778
1779 /* reset the DMA descriptor */
1780 im->status = recv->buf_stride;
1781
1782 /* advance block_dma */
1783 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1784
1785 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1786 atomic_inc(&iso->overflows);
1787 DBGMSG("ISO reception overflow - "
1788 "ran out of DMA blocks");
1789 }
1790 }
1791
1792 /* parse any packets that have arrived */
1793 ohci_iso_recv_bufferfill_parse(iso, recv);
1794}
1795
1796static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1797{
1798 int count;
1799 int wake = 0;
1800 struct ti_ohci *ohci = recv->ohci;
1801
1802 /* loop over the entire buffer */
1803 for (count = 0; count < recv->nblocks; count++) {
1804 u32 packet_len = 0;
1805
1806 /* pointer to the DMA descriptor */
1807 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1808
1809 /* check the DMA descriptor for new writes to xferStatus */
1810 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1811 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1812
1813 unsigned char event = xferstatus & 0x1F;
1814
1815 if (!event) {
1816 /* this packet hasn't come in yet; we are done for now */
1817 goto out;
1818 }
1819
1820 if (event == 0x11) {
1821 /* packet received successfully! */
1822
1823 /* rescount is the number of bytes *remaining* in the packet buffer,
1824 after the packet was written */
1825 packet_len = recv->buf_stride - rescount;
1826
1827 } else if (event == 0x02) {
1828 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1829 } else if (event) {
1830 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1831 }
1832
1833 /* sync our view of the buffer */
1834 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1835
1836 /* record the per-packet info */
1837 {
1838 /* iso header is 8 bytes ahead of the data payload */
1839 unsigned char *hdr;
1840
1841 unsigned int offset;
1842 unsigned short cycle;
1843 unsigned char channel, tag, sy;
1844
1845 offset = iso->pkt_dma * recv->buf_stride;
1846 hdr = iso->data_buf.kvirt + offset;
1847
1848 /* skip iso header */
1849 offset += 8;
1850 packet_len -= 8;
1851
1852 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1853 channel = hdr[5] & 0x3F;
1854 tag = hdr[5] >> 6;
1855 sy = hdr[4] & 0xF;
1856
Ben Collins1934b8b2005-07-09 20:01:23 -04001857 hpsb_iso_packet_received(iso, offset, packet_len,
1858 recv->buf_stride, cycle, channel, tag, sy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 }
1860
1861 /* reset the DMA descriptor */
1862 il->status = recv->buf_stride;
1863
1864 wake = 1;
1865 recv->block_dma = iso->pkt_dma;
1866 }
1867
1868out:
1869 if (wake)
1870 hpsb_iso_wake(iso);
1871}
1872
1873static void ohci_iso_recv_task(unsigned long data)
1874{
1875 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1876 struct ohci_iso_recv *recv = iso->hostdata;
1877
1878 if (recv->dma_mode == BUFFER_FILL_MODE)
1879 ohci_iso_recv_bufferfill_task(iso, recv);
1880 else
1881 ohci_iso_recv_packetperbuf_task(iso, recv);
1882}
1883
1884/***********************************
1885 * rawiso ISO transmission *
1886 ***********************************/
1887
1888struct ohci_iso_xmit {
1889 struct ti_ohci *ohci;
1890 struct dma_prog_region prog;
1891 struct ohci1394_iso_tasklet task;
1892 int task_active;
1893
1894 u32 ContextControlSet;
1895 u32 ContextControlClear;
1896 u32 CommandPtr;
1897};
1898
1899/* transmission DMA program:
1900 one OUTPUT_MORE_IMMEDIATE for the IT header
1901 one OUTPUT_LAST for the buffer data */
1902
1903struct iso_xmit_cmd {
1904 struct dma_cmd output_more_immediate;
1905 u8 iso_hdr[8];
1906 u32 unused[2];
1907 struct dma_cmd output_last;
1908};
1909
1910static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1911static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1912static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1913static void ohci_iso_xmit_task(unsigned long data);
1914
1915static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1916{
1917 struct ohci_iso_xmit *xmit;
1918 unsigned int prog_size;
1919 int ctx;
1920 int ret = -ENOMEM;
1921
1922 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1923 if (!xmit)
1924 return -ENOMEM;
1925
1926 iso->hostdata = xmit;
1927 xmit->ohci = iso->host->hostdata;
1928 xmit->task_active = 0;
1929
1930 dma_prog_region_init(&xmit->prog);
1931
1932 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1933
1934 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1935 goto err;
1936
1937 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1938 ohci_iso_xmit_task, (unsigned long) iso);
1939
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001940 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1941 ret = -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 goto err;
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
1945 xmit->task_active = 1;
1946
1947 /* xmit context registers are spaced 16 bytes apart */
1948 ctx = xmit->task.context;
1949 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1950 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1951 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1952
1953 return 0;
1954
1955err:
1956 ohci_iso_xmit_shutdown(iso);
1957 return ret;
1958}
1959
1960static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1961{
1962 struct ohci_iso_xmit *xmit = iso->hostdata;
1963 struct ti_ohci *ohci = xmit->ohci;
1964
1965 /* disable interrupts */
1966 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1967
1968 /* halt DMA */
1969 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1970 /* XXX the DMA context will lock up if you try to send too much data! */
1971 PRINT(KERN_ERR,
1972 "you probably exceeded the OHCI card's bandwidth limit - "
1973 "reload the module and reduce xmit bandwidth");
1974 }
1975}
1976
1977static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1978{
1979 struct ohci_iso_xmit *xmit = iso->hostdata;
1980
1981 if (xmit->task_active) {
1982 ohci_iso_xmit_stop(iso);
1983 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1984 xmit->task_active = 0;
1985 }
1986
1987 dma_prog_region_free(&xmit->prog);
1988 kfree(xmit);
1989 iso->hostdata = NULL;
1990}
1991
1992static void ohci_iso_xmit_task(unsigned long data)
1993{
1994 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1995 struct ohci_iso_xmit *xmit = iso->hostdata;
1996 struct ti_ohci *ohci = xmit->ohci;
1997 int wake = 0;
1998 int count;
1999
2000 /* check the whole buffer if necessary, starting at pkt_dma */
2001 for (count = 0; count < iso->buf_packets; count++) {
2002 int cycle;
2003
2004 /* DMA descriptor */
2005 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2006
2007 /* check for new writes to xferStatus */
2008 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2009 u8 event = xferstatus & 0x1F;
2010
2011 if (!event) {
2012 /* packet hasn't been sent yet; we are done for now */
2013 break;
2014 }
2015
2016 if (event != 0x11)
2017 PRINT(KERN_ERR,
2018 "IT DMA error - OHCI error code 0x%02x\n", event);
2019
2020 /* at least one packet went out, so wake up the writer */
2021 wake = 1;
2022
2023 /* parse cycle */
2024 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2025
2026 /* tell the subsystem the packet has gone out */
2027 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2028
2029 /* reset the DMA descriptor for next time */
2030 cmd->output_last.status = 0;
2031 }
2032
2033 if (wake)
2034 hpsb_iso_wake(iso);
2035}
2036
2037static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2038{
2039 struct ohci_iso_xmit *xmit = iso->hostdata;
2040 struct ti_ohci *ohci = xmit->ohci;
2041
2042 int next_i, prev_i;
2043 struct iso_xmit_cmd *next, *prev;
2044
2045 unsigned int offset;
2046 unsigned short len;
2047 unsigned char tag, sy;
2048
2049 /* check that the packet doesn't cross a page boundary
2050 (we could allow this if we added OUTPUT_MORE descriptor support) */
2051 if (cross_bound(info->offset, info->len)) {
2052 PRINT(KERN_ERR,
2053 "rawiso xmit: packet %u crosses a page boundary",
2054 iso->first_packet);
2055 return -EINVAL;
2056 }
2057
2058 offset = info->offset;
2059 len = info->len;
2060 tag = info->tag;
2061 sy = info->sy;
2062
2063 /* sync up the card's view of the buffer */
2064 dma_region_sync_for_device(&iso->data_buf, offset, len);
2065
2066 /* append first_packet to the DMA chain */
2067 /* by linking the previous descriptor to it */
2068 /* (next will become the new end of the DMA chain) */
2069
2070 next_i = iso->first_packet;
2071 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2072
2073 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2074 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2075
2076 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2077 memset(next, 0, sizeof(struct iso_xmit_cmd));
2078 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2079
2080 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2081
2082 /* tcode = 0xA, and sy */
2083 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2084
2085 /* tag and channel number */
2086 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2087
2088 /* transmission speed */
2089 next->iso_hdr[2] = iso->speed & 0x7;
2090
2091 /* payload size */
2092 next->iso_hdr[6] = len & 0xFF;
2093 next->iso_hdr[7] = len >> 8;
2094
2095 /* set up the OUTPUT_LAST */
2096 next->output_last.control = cpu_to_le32(1 << 28);
2097 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2098 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2099 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2100 next->output_last.control |= cpu_to_le32(len);
2101
2102 /* payload bus address */
2103 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2104
2105 /* leave branchAddress at zero for now */
2106
2107 /* re-write the previous DMA descriptor to chain to this one */
2108
2109 /* set prev branch address to point to next (Z=3) */
2110 prev->output_last.branchAddress = cpu_to_le32(
2111 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2112
2113 /* disable interrupt, unless required by the IRQ interval */
2114 if (prev_i % iso->irq_interval) {
2115 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2116 } else {
2117 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2118 }
2119
2120 wmb();
2121
2122 /* wake DMA in case it is sleeping */
2123 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2124
2125 /* issue a dummy read of the cycle timer to force all PCI
2126 writes to be posted immediately */
2127 mb();
2128 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2129
2130 return 0;
2131}
2132
2133static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2134{
2135 struct ohci_iso_xmit *xmit = iso->hostdata;
2136 struct ti_ohci *ohci = xmit->ohci;
2137
2138 /* clear out the control register */
2139 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2140 wmb();
2141
2142 /* address and length of first descriptor block (Z=3) */
2143 reg_write(xmit->ohci, xmit->CommandPtr,
2144 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2145
2146 /* cycle match */
2147 if (cycle != -1) {
2148 u32 start = cycle & 0x1FFF;
2149
2150 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2151 just snarf them from the current time */
2152 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2153
2154 /* advance one second to give some extra time for DMA to start */
2155 seconds += 1;
2156
2157 start |= (seconds & 3) << 13;
2158
2159 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2160 }
2161
2162 /* enable interrupts */
2163 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2164
2165 /* run */
2166 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2167 mb();
2168
2169 /* wait 100 usec to give the card time to go active */
2170 udelay(100);
2171
2172 /* check the RUN bit */
2173 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2174 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2175 reg_read(xmit->ohci, xmit->ContextControlSet));
2176 return -1;
2177 }
2178
2179 return 0;
2180}
2181
2182static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2183{
2184
2185 switch(cmd) {
2186 case XMIT_INIT:
2187 return ohci_iso_xmit_init(iso);
2188 case XMIT_START:
2189 return ohci_iso_xmit_start(iso, arg);
2190 case XMIT_STOP:
2191 ohci_iso_xmit_stop(iso);
2192 return 0;
2193 case XMIT_QUEUE:
2194 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2195 case XMIT_SHUTDOWN:
2196 ohci_iso_xmit_shutdown(iso);
2197 return 0;
2198
2199 case RECV_INIT:
2200 return ohci_iso_recv_init(iso);
2201 case RECV_START: {
2202 int *args = (int*) arg;
2203 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2204 }
2205 case RECV_STOP:
2206 ohci_iso_recv_stop(iso);
2207 return 0;
2208 case RECV_RELEASE:
2209 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2210 return 0;
2211 case RECV_FLUSH:
2212 ohci_iso_recv_task((unsigned long) iso);
2213 return 0;
2214 case RECV_SHUTDOWN:
2215 ohci_iso_recv_shutdown(iso);
2216 return 0;
2217 case RECV_LISTEN_CHANNEL:
2218 ohci_iso_recv_change_channel(iso, arg, 1);
2219 return 0;
2220 case RECV_UNLISTEN_CHANNEL:
2221 ohci_iso_recv_change_channel(iso, arg, 0);
2222 return 0;
2223 case RECV_SET_CHANNEL_MASK:
2224 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2225 return 0;
2226
2227 default:
2228 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2229 cmd);
2230 break;
2231 }
2232 return -EINVAL;
2233}
2234
2235/***************************************
2236 * IEEE-1394 functionality section END *
2237 ***************************************/
2238
2239
2240/********************************************************
2241 * Global stuff (interrupt handler, init/shutdown code) *
2242 ********************************************************/
2243
2244static void dma_trm_reset(struct dma_trm_ctx *d)
2245{
2246 unsigned long flags;
2247 LIST_HEAD(packet_list);
2248 struct ti_ohci *ohci = d->ohci;
2249 struct hpsb_packet *packet, *ptmp;
2250
2251 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2252
2253 /* Lock the context, reset it and release it. Move the packets
2254 * that were pending in the context to packet_list and free
2255 * them after releasing the lock. */
2256
2257 spin_lock_irqsave(&d->lock, flags);
2258
2259 list_splice(&d->fifo_list, &packet_list);
2260 list_splice(&d->pending_list, &packet_list);
2261 INIT_LIST_HEAD(&d->fifo_list);
2262 INIT_LIST_HEAD(&d->pending_list);
2263
2264 d->branchAddrPtr = NULL;
2265 d->sent_ind = d->prg_ind;
2266 d->free_prgs = d->num_desc;
2267
2268 spin_unlock_irqrestore(&d->lock, flags);
2269
2270 if (list_empty(&packet_list))
2271 return;
2272
2273 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2274
2275 /* Now process subsystem callbacks for the packets from this
2276 * context. */
2277 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2278 list_del_init(&packet->driver_list);
2279 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2280 }
2281}
2282
2283static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2284 quadlet_t rx_event,
2285 quadlet_t tx_event)
2286{
2287 struct ohci1394_iso_tasklet *t;
2288 unsigned long mask;
Andy Wingo4a9949d2005-10-19 21:23:46 -07002289 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
Andy Wingo4a9949d2005-10-19 21:23:46 -07002291 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
2293 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2294 mask = 1 << t->context;
2295
2296 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2297 tasklet_schedule(&t->tasklet);
2298 else if (rx_event & mask)
2299 tasklet_schedule(&t->tasklet);
2300 }
2301
Andy Wingo4a9949d2005-10-19 21:23:46 -07002302 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303}
2304
2305static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2306 struct pt_regs *regs_are_unused)
2307{
2308 quadlet_t event, node_id;
2309 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2310 struct hpsb_host *host = ohci->host;
2311 int phyid = -1, isroot = 0;
2312 unsigned long flags;
2313
2314 /* Read and clear the interrupt event register. Don't clear
2315 * the busReset event, though. This is done when we get the
2316 * selfIDComplete interrupt. */
2317 spin_lock_irqsave(&ohci->event_lock, flags);
2318 event = reg_read(ohci, OHCI1394_IntEventClear);
2319 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2320 spin_unlock_irqrestore(&ohci->event_lock, flags);
2321
2322 if (!event)
2323 return IRQ_NONE;
2324
2325 /* If event is ~(u32)0 cardbus card was ejected. In this case
2326 * we just return, and clean up in the ohci1394_pci_remove
2327 * function. */
2328 if (event == ~(u32) 0) {
2329 DBGMSG("Device removed.");
2330 return IRQ_NONE;
2331 }
2332
2333 DBGMSG("IntEvent: %08x", event);
2334
2335 if (event & OHCI1394_unrecoverableError) {
2336 int ctx;
2337 PRINT(KERN_ERR, "Unrecoverable error!");
2338
2339 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2340 PRINT(KERN_ERR, "Async Req Tx Context died: "
2341 "ctrl[%08x] cmdptr[%08x]",
2342 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2343 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2344
2345 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2346 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2347 "ctrl[%08x] cmdptr[%08x]",
2348 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2349 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2350
2351 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2352 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2353 "ctrl[%08x] cmdptr[%08x]",
2354 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2355 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2356
2357 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2358 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2359 "ctrl[%08x] cmdptr[%08x]",
2360 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2361 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2362
2363 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2364 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2365 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2366 "ctrl[%08x] cmdptr[%08x]", ctx,
2367 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2368 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2369 }
2370
2371 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2372 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2373 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2374 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2375 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2376 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2377 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2378 }
2379
2380 event &= ~OHCI1394_unrecoverableError;
2381 }
Jody McIntyree2f81652006-03-28 19:55:11 -05002382 if (event & OHCI1394_postedWriteErr) {
2383 PRINT(KERN_ERR, "physical posted write error");
2384 /* no recovery strategy yet, had to involve protocol drivers */
2385 }
Ben Collins57fdb582006-06-12 18:12:21 -04002386 if (event & OHCI1394_cycleTooLong) {
2387 if(printk_ratelimit())
2388 PRINT(KERN_WARNING, "isochronous cycle too long");
2389 else
2390 DBGMSG("OHCI1394_cycleTooLong");
2391 reg_write(ohci, OHCI1394_LinkControlSet,
2392 OHCI1394_LinkControl_CycleMaster);
2393 event &= ~OHCI1394_cycleTooLong;
2394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 if (event & OHCI1394_cycleInconsistent) {
2396 /* We subscribe to the cycleInconsistent event only to
2397 * clear the corresponding event bit... otherwise,
2398 * isochronous cycleMatch DMA won't work. */
2399 DBGMSG("OHCI1394_cycleInconsistent");
2400 event &= ~OHCI1394_cycleInconsistent;
2401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 if (event & OHCI1394_busReset) {
2403 /* The busReset event bit can't be cleared during the
2404 * selfID phase, so we disable busReset interrupts, to
2405 * avoid burying the cpu in interrupt requests. */
2406 spin_lock_irqsave(&ohci->event_lock, flags);
2407 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2408
2409 if (ohci->check_busreset) {
2410 int loop_count = 0;
2411
2412 udelay(10);
2413
2414 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2415 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2416
2417 spin_unlock_irqrestore(&ohci->event_lock, flags);
2418 udelay(10);
2419 spin_lock_irqsave(&ohci->event_lock, flags);
2420
2421 /* The loop counter check is to prevent the driver
2422 * from remaining in this state forever. For the
2423 * initial bus reset, the loop continues for ever
2424 * and the system hangs, until some device is plugged-in
2425 * or out manually into a port! The forced reset seems
2426 * to solve this problem. This mainly effects nForce2. */
2427 if (loop_count > 10000) {
2428 ohci_devctl(host, RESET_BUS, LONG_RESET);
2429 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2430 loop_count = 0;
2431 }
2432
2433 loop_count++;
2434 }
2435 }
2436 spin_unlock_irqrestore(&ohci->event_lock, flags);
2437 if (!host->in_bus_reset) {
2438 DBGMSG("irq_handler: Bus reset requested");
2439
2440 /* Subsystem call */
2441 hpsb_bus_reset(ohci->host);
2442 }
2443 event &= ~OHCI1394_busReset;
2444 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 if (event & OHCI1394_reqTxComplete) {
2446 struct dma_trm_ctx *d = &ohci->at_req_context;
2447 DBGMSG("Got reqTxComplete interrupt "
2448 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2449 if (reg_read(ohci, d->ctrlSet) & 0x800)
2450 ohci1394_stop_context(ohci, d->ctrlClear,
2451 "reqTxComplete");
2452 else
2453 dma_trm_tasklet((unsigned long)d);
2454 //tasklet_schedule(&d->task);
2455 event &= ~OHCI1394_reqTxComplete;
2456 }
2457 if (event & OHCI1394_respTxComplete) {
2458 struct dma_trm_ctx *d = &ohci->at_resp_context;
2459 DBGMSG("Got respTxComplete interrupt "
2460 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2461 if (reg_read(ohci, d->ctrlSet) & 0x800)
2462 ohci1394_stop_context(ohci, d->ctrlClear,
2463 "respTxComplete");
2464 else
2465 tasklet_schedule(&d->task);
2466 event &= ~OHCI1394_respTxComplete;
2467 }
2468 if (event & OHCI1394_RQPkt) {
2469 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2470 DBGMSG("Got RQPkt interrupt status=0x%08X",
2471 reg_read(ohci, d->ctrlSet));
2472 if (reg_read(ohci, d->ctrlSet) & 0x800)
2473 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2474 else
2475 tasklet_schedule(&d->task);
2476 event &= ~OHCI1394_RQPkt;
2477 }
2478 if (event & OHCI1394_RSPkt) {
2479 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2480 DBGMSG("Got RSPkt interrupt status=0x%08X",
2481 reg_read(ohci, d->ctrlSet));
2482 if (reg_read(ohci, d->ctrlSet) & 0x800)
2483 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2484 else
2485 tasklet_schedule(&d->task);
2486 event &= ~OHCI1394_RSPkt;
2487 }
2488 if (event & OHCI1394_isochRx) {
2489 quadlet_t rx_event;
2490
2491 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2492 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2493 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2494 event &= ~OHCI1394_isochRx;
2495 }
2496 if (event & OHCI1394_isochTx) {
2497 quadlet_t tx_event;
2498
2499 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2500 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2501 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2502 event &= ~OHCI1394_isochTx;
2503 }
2504 if (event & OHCI1394_selfIDComplete) {
2505 if (host->in_bus_reset) {
2506 node_id = reg_read(ohci, OHCI1394_NodeID);
2507
2508 if (!(node_id & 0x80000000)) {
2509 PRINT(KERN_ERR,
2510 "SelfID received, but NodeID invalid "
2511 "(probably new bus reset occurred): %08X",
2512 node_id);
2513 goto selfid_not_valid;
2514 }
2515
2516 phyid = node_id & 0x0000003f;
2517 isroot = (node_id & 0x40000000) != 0;
2518
2519 DBGMSG("SelfID interrupt received "
2520 "(phyid %d, %s)", phyid,
2521 (isroot ? "root" : "not root"));
2522
2523 handle_selfid(ohci, host, phyid, isroot);
2524
2525 /* Clear the bus reset event and re-enable the
2526 * busReset interrupt. */
2527 spin_lock_irqsave(&ohci->event_lock, flags);
2528 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2529 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2530 spin_unlock_irqrestore(&ohci->event_lock, flags);
2531
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 /* Turn on phys dma reception.
2533 *
2534 * TODO: Enable some sort of filtering management.
2535 */
2536 if (phys_dma) {
Stefan Richter180a4302006-03-28 19:57:34 -05002537 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2538 0xffffffff);
2539 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2540 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 }
2542
2543 DBGMSG("PhyReqFilter=%08x%08x",
Stefan Richter180a4302006-03-28 19:57:34 -05002544 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2545 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
2547 hpsb_selfid_complete(host, phyid, isroot);
2548 } else
2549 PRINT(KERN_ERR,
2550 "SelfID received outside of bus reset sequence");
2551
2552selfid_not_valid:
2553 event &= ~OHCI1394_selfIDComplete;
2554 }
2555
2556 /* Make sure we handle everything, just in case we accidentally
2557 * enabled an interrupt that we didn't write a handler for. */
2558 if (event)
2559 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2560 event);
2561
2562 return IRQ_HANDLED;
2563}
2564
2565/* Put the buffer back into the dma context */
2566static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2567{
2568 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2569 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2570
2571 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2572 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2573 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2574 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2575
2576 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2577 * context program descriptors before it sees the wakeup bit set. */
2578 wmb();
2579
2580 /* wake up the dma context if necessary */
2581 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2582 PRINT(KERN_INFO,
2583 "Waking dma ctx=%d ... processing is probably too slow",
2584 d->ctx);
2585 }
2586
2587 /* do this always, to avoid race condition */
2588 reg_write(ohci, d->ctrlSet, 0x1000);
2589}
2590
2591#define cond_le32_to_cpu(data, noswap) \
2592 (noswap ? data : le32_to_cpu(data))
2593
2594static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2595 -1, 0, -1, 0, -1, -1, 16, -1};
2596
2597/*
2598 * Determine the length of a packet in the buffer
2599 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2600 */
2601static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2602 int offset, unsigned char tcode, int noswap)
2603{
2604 int length = -1;
2605
2606 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2607 length = TCODE_SIZE[tcode];
2608 if (length == 0) {
2609 if (offset + 12 >= d->buf_size) {
2610 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2611 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2612 } else {
2613 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2614 }
2615 length += 20;
2616 }
2617 } else if (d->type == DMA_CTX_ISO) {
2618 /* Assumption: buffer fill mode with header/trailer */
2619 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2620 }
2621
2622 if (length > 0 && length % 4)
2623 length += 4 - (length % 4);
2624
2625 return length;
2626}
2627
2628/* Tasklet that processes dma receive buffers */
2629static void dma_rcv_tasklet (unsigned long data)
2630{
2631 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2632 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2633 unsigned int split_left, idx, offset, rescount;
2634 unsigned char tcode;
2635 int length, bytes_left, ack;
2636 unsigned long flags;
2637 quadlet_t *buf_ptr;
2638 char *split_ptr;
2639 char msg[256];
2640
2641 spin_lock_irqsave(&d->lock, flags);
2642
2643 idx = d->buf_ind;
2644 offset = d->buf_offset;
2645 buf_ptr = d->buf_cpu[idx] + offset/4;
2646
2647 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2648 bytes_left = d->buf_size - rescount - offset;
2649
2650 while (bytes_left > 0) {
2651 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2652
2653 /* packet_length() will return < 4 for an error */
2654 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2655
2656 if (length < 4) { /* something is wrong */
2657 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2658 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2659 d->ctx, length);
2660 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2661 spin_unlock_irqrestore(&d->lock, flags);
2662 return;
2663 }
2664
2665 /* The first case is where we have a packet that crosses
2666 * over more than one descriptor. The next case is where
2667 * it's all in the first descriptor. */
2668 if ((offset + length) > d->buf_size) {
2669 DBGMSG("Split packet rcv'd");
2670 if (length > d->split_buf_size) {
2671 ohci1394_stop_context(ohci, d->ctrlClear,
2672 "Split packet size exceeded");
2673 d->buf_ind = idx;
2674 d->buf_offset = offset;
2675 spin_unlock_irqrestore(&d->lock, flags);
2676 return;
2677 }
2678
2679 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2680 == d->buf_size) {
2681 /* Other part of packet not written yet.
2682 * this should never happen I think
2683 * anyway we'll get it on the next call. */
2684 PRINT(KERN_INFO,
2685 "Got only half a packet!");
2686 d->buf_ind = idx;
2687 d->buf_offset = offset;
2688 spin_unlock_irqrestore(&d->lock, flags);
2689 return;
2690 }
2691
2692 split_left = length;
2693 split_ptr = (char *)d->spb;
2694 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2695 split_left -= d->buf_size-offset;
2696 split_ptr += d->buf_size-offset;
2697 insert_dma_buffer(d, idx);
2698 idx = (idx+1) % d->num_desc;
2699 buf_ptr = d->buf_cpu[idx];
2700 offset=0;
2701
2702 while (split_left >= d->buf_size) {
2703 memcpy(split_ptr,buf_ptr,d->buf_size);
2704 split_ptr += d->buf_size;
2705 split_left -= d->buf_size;
2706 insert_dma_buffer(d, idx);
2707 idx = (idx+1) % d->num_desc;
2708 buf_ptr = d->buf_cpu[idx];
2709 }
2710
2711 if (split_left > 0) {
2712 memcpy(split_ptr, buf_ptr, split_left);
2713 offset = split_left;
2714 buf_ptr += offset/4;
2715 }
2716 } else {
2717 DBGMSG("Single packet rcv'd");
2718 memcpy(d->spb, buf_ptr, length);
2719 offset += length;
2720 buf_ptr += length/4;
2721 if (offset==d->buf_size) {
2722 insert_dma_buffer(d, idx);
2723 idx = (idx+1) % d->num_desc;
2724 buf_ptr = d->buf_cpu[idx];
2725 offset=0;
2726 }
2727 }
2728
2729 /* We get one phy packet to the async descriptor for each
2730 * bus reset. We always ignore it. */
2731 if (tcode != OHCI1394_TCODE_PHY) {
2732 if (!ohci->no_swap_incoming)
2733 packet_swab(d->spb, tcode);
2734 DBGMSG("Packet received from node"
2735 " %d ack=0x%02X spd=%d tcode=0x%X"
2736 " length=%d ctx=%d tlabel=%d",
2737 (d->spb[1]>>16)&0x3f,
2738 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2739 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2740 tcode, length, d->ctx,
Jody McIntyredfe547a2005-04-21 14:09:42 -07002741 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2744 == 0x11) ? 1 : 0;
2745
2746 hpsb_packet_received(ohci->host, d->spb,
2747 length-4, ack);
2748 }
2749#ifdef OHCI1394_DEBUG
2750 else
2751 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2752 d->ctx);
2753#endif
2754
2755 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2756
2757 bytes_left = d->buf_size - rescount - offset;
2758
2759 }
2760
2761 d->buf_ind = idx;
2762 d->buf_offset = offset;
2763
2764 spin_unlock_irqrestore(&d->lock, flags);
2765}
2766
2767/* Bottom half that processes sent packets */
2768static void dma_trm_tasklet (unsigned long data)
2769{
2770 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2771 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2772 struct hpsb_packet *packet, *ptmp;
2773 unsigned long flags;
2774 u32 status, ack;
2775 size_t datasize;
2776
2777 spin_lock_irqsave(&d->lock, flags);
2778
2779 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2780 datasize = packet->data_size;
2781 if (datasize && packet->type != hpsb_raw)
2782 status = le32_to_cpu(
2783 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2784 else
2785 status = le32_to_cpu(
2786 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2787
2788 if (status == 0)
2789 /* this packet hasn't been sent yet*/
2790 break;
2791
2792#ifdef OHCI1394_DEBUG
2793 if (datasize)
2794 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2795 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2796 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2797 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2798 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2799 status&0x1f, (status>>5)&0x3,
2800 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2801 d->ctx);
2802 else
2803 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
Jody McIntyredfe547a2005-04-21 14:09:42 -07002804 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2806 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2807 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2808 status&0x1f, (status>>5)&0x3,
2809 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2810 d->ctx);
2811 else
2812 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
Jody McIntyredfe547a2005-04-21 14:09:42 -07002813 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2815 >>16)&0x3f,
2816 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2817 >>4)&0xf,
2818 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2819 >>10)&0x3f,
2820 status&0x1f, (status>>5)&0x3,
2821 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2822 d->ctx);
2823#endif
2824
2825 if (status & 0x10) {
2826 ack = status & 0xf;
2827 } else {
2828 switch (status & 0x1f) {
2829 case EVT_NO_STATUS: /* that should never happen */
2830 case EVT_RESERVED_A: /* that should never happen */
2831 case EVT_LONG_PACKET: /* that should never happen */
2832 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2833 ack = ACKX_SEND_ERROR;
2834 break;
2835 case EVT_MISSING_ACK:
2836 ack = ACKX_TIMEOUT;
2837 break;
2838 case EVT_UNDERRUN:
2839 ack = ACKX_SEND_ERROR;
2840 break;
2841 case EVT_OVERRUN: /* that should never happen */
2842 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2843 ack = ACKX_SEND_ERROR;
2844 break;
2845 case EVT_DESCRIPTOR_READ:
2846 case EVT_DATA_READ:
2847 case EVT_DATA_WRITE:
2848 ack = ACKX_SEND_ERROR;
2849 break;
2850 case EVT_BUS_RESET: /* that should never happen */
2851 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2852 ack = ACKX_SEND_ERROR;
2853 break;
2854 case EVT_TIMEOUT:
2855 ack = ACKX_TIMEOUT;
2856 break;
2857 case EVT_TCODE_ERR:
2858 ack = ACKX_SEND_ERROR;
2859 break;
2860 case EVT_RESERVED_B: /* that should never happen */
2861 case EVT_RESERVED_C: /* that should never happen */
2862 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2863 ack = ACKX_SEND_ERROR;
2864 break;
2865 case EVT_UNKNOWN:
2866 case EVT_FLUSHED:
2867 ack = ACKX_SEND_ERROR;
2868 break;
2869 default:
2870 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2871 ack = ACKX_SEND_ERROR;
2872 BUG();
2873 }
2874 }
2875
2876 list_del_init(&packet->driver_list);
2877 hpsb_packet_sent(ohci->host, packet, ack);
2878
2879 if (datasize) {
2880 pci_unmap_single(ohci->dev,
2881 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2882 datasize, PCI_DMA_TODEVICE);
2883 OHCI_DMA_FREE("single Xmit data packet");
2884 }
2885
2886 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2887 d->free_prgs++;
2888 }
2889
2890 dma_trm_flush(ohci, d);
2891
2892 spin_unlock_irqrestore(&d->lock, flags);
2893}
2894
2895static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2896{
2897 if (d->ctrlClear) {
2898 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2899
2900 if (d->type == DMA_CTX_ISO) {
2901 /* disable interrupts */
2902 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2903 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2904 } else {
2905 tasklet_kill(&d->task);
2906 }
2907 }
2908}
2909
2910
2911static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2912{
2913 int i;
2914 struct ti_ohci *ohci = d->ohci;
2915
2916 if (ohci == NULL)
2917 return;
2918
2919 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2920
2921 if (d->buf_cpu) {
2922 for (i=0; i<d->num_desc; i++)
2923 if (d->buf_cpu[i] && d->buf_bus[i]) {
2924 pci_free_consistent(
2925 ohci->dev, d->buf_size,
2926 d->buf_cpu[i], d->buf_bus[i]);
2927 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2928 }
2929 kfree(d->buf_cpu);
2930 kfree(d->buf_bus);
2931 }
2932 if (d->prg_cpu) {
2933 for (i=0; i<d->num_desc; i++)
2934 if (d->prg_cpu[i] && d->prg_bus[i]) {
2935 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2936 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2937 }
2938 pci_pool_destroy(d->prg_pool);
2939 OHCI_DMA_FREE("dma_rcv prg pool");
2940 kfree(d->prg_cpu);
2941 kfree(d->prg_bus);
2942 }
Jody McIntyre616b8592005-05-16 21:54:01 -07002943 kfree(d->spb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944
2945 /* Mark this context as freed. */
2946 d->ohci = NULL;
2947}
2948
2949static int
2950alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2951 enum context_type type, int ctx, int num_desc,
2952 int buf_size, int split_buf_size, int context_base)
2953{
2954 int i, len;
2955 static int num_allocs;
2956 static char pool_name[20];
2957
2958 d->ohci = ohci;
2959 d->type = type;
2960 d->ctx = ctx;
2961
2962 d->num_desc = num_desc;
2963 d->buf_size = buf_size;
2964 d->split_buf_size = split_buf_size;
2965
2966 d->ctrlSet = 0;
2967 d->ctrlClear = 0;
2968 d->cmdPtr = 0;
2969
Stefan Richter85511582005-11-07 06:31:45 -05002970 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2971 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972
2973 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2974 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2975 free_dma_rcv_ctx(d);
2976 return -ENOMEM;
2977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
Stefan Richter85511582005-11-07 06:31:45 -05002979 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2980 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981
2982 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2983 PRINT(KERN_ERR, "Failed to allocate dma prg");
2984 free_dma_rcv_ctx(d);
2985 return -ENOMEM;
2986 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987
2988 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2989
2990 if (d->spb == NULL) {
2991 PRINT(KERN_ERR, "Failed to allocate split buffer");
2992 free_dma_rcv_ctx(d);
2993 return -ENOMEM;
2994 }
2995
2996 len = sprintf(pool_name, "ohci1394_rcv_prg");
2997 sprintf(pool_name+len, "%d", num_allocs);
2998 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2999 sizeof(struct dma_cmd), 4, 0);
3000 if(d->prg_pool == NULL)
3001 {
3002 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3003 free_dma_rcv_ctx(d);
3004 return -ENOMEM;
3005 }
3006 num_allocs++;
3007
3008 OHCI_DMA_ALLOC("dma_rcv prg pool");
3009
3010 for (i=0; i<d->num_desc; i++) {
3011 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3012 d->buf_size,
3013 d->buf_bus+i);
3014 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3015
3016 if (d->buf_cpu[i] != NULL) {
3017 memset(d->buf_cpu[i], 0, d->buf_size);
3018 } else {
3019 PRINT(KERN_ERR,
3020 "Failed to allocate dma buffer");
3021 free_dma_rcv_ctx(d);
3022 return -ENOMEM;
3023 }
3024
3025 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3026 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3027
3028 if (d->prg_cpu[i] != NULL) {
3029 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3030 } else {
3031 PRINT(KERN_ERR,
3032 "Failed to allocate dma prg");
3033 free_dma_rcv_ctx(d);
3034 return -ENOMEM;
3035 }
3036 }
3037
3038 spin_lock_init(&d->lock);
3039
3040 if (type == DMA_CTX_ISO) {
3041 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3042 OHCI_ISO_MULTICHANNEL_RECEIVE,
3043 dma_rcv_tasklet, (unsigned long) d);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 } else {
3045 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3046 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3047 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3048
3049 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3050 }
3051
3052 return 0;
3053}
3054
3055static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3056{
3057 int i;
3058 struct ti_ohci *ohci = d->ohci;
3059
3060 if (ohci == NULL)
3061 return;
3062
3063 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3064
3065 if (d->prg_cpu) {
3066 for (i=0; i<d->num_desc; i++)
3067 if (d->prg_cpu[i] && d->prg_bus[i]) {
3068 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3069 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3070 }
3071 pci_pool_destroy(d->prg_pool);
3072 OHCI_DMA_FREE("dma_trm prg pool");
3073 kfree(d->prg_cpu);
3074 kfree(d->prg_bus);
3075 }
3076
3077 /* Mark this context as freed. */
3078 d->ohci = NULL;
3079}
3080
3081static int
3082alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3083 enum context_type type, int ctx, int num_desc,
3084 int context_base)
3085{
3086 int i, len;
3087 static char pool_name[20];
3088 static int num_allocs=0;
3089
3090 d->ohci = ohci;
3091 d->type = type;
3092 d->ctx = ctx;
3093 d->num_desc = num_desc;
3094 d->ctrlSet = 0;
3095 d->ctrlClear = 0;
3096 d->cmdPtr = 0;
3097
Stefan Richter85511582005-11-07 06:31:45 -05003098 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3099 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100
3101 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3102 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3103 free_dma_trm_ctx(d);
3104 return -ENOMEM;
3105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106
3107 len = sprintf(pool_name, "ohci1394_trm_prg");
3108 sprintf(pool_name+len, "%d", num_allocs);
3109 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3110 sizeof(struct at_dma_prg), 4, 0);
3111 if (d->prg_pool == NULL) {
3112 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3113 free_dma_trm_ctx(d);
3114 return -ENOMEM;
3115 }
3116 num_allocs++;
3117
3118 OHCI_DMA_ALLOC("dma_rcv prg pool");
3119
3120 for (i = 0; i < d->num_desc; i++) {
3121 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3122 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3123
3124 if (d->prg_cpu[i] != NULL) {
3125 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3126 } else {
3127 PRINT(KERN_ERR,
3128 "Failed to allocate at dma prg");
3129 free_dma_trm_ctx(d);
3130 return -ENOMEM;
3131 }
3132 }
3133
3134 spin_lock_init(&d->lock);
3135
3136 /* initialize tasklet */
3137 if (type == DMA_CTX_ISO) {
3138 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3139 dma_trm_tasklet, (unsigned long) d);
3140 if (ohci1394_register_iso_tasklet(ohci,
3141 &ohci->it_legacy_tasklet) < 0) {
3142 PRINT(KERN_ERR, "No IT DMA context available");
3143 free_dma_trm_ctx(d);
3144 return -EBUSY;
3145 }
3146
3147 /* IT can be assigned to any context by register_iso_tasklet */
3148 d->ctx = ohci->it_legacy_tasklet.context;
3149 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3150 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3151 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3152 } else {
3153 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3154 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3155 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3156 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3157 }
3158
3159 return 0;
3160}
3161
3162static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3163{
3164 struct ti_ohci *ohci = host->hostdata;
3165
3166 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3167 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3168
3169 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3170}
3171
3172
3173static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3174 quadlet_t data, quadlet_t compare)
3175{
3176 struct ti_ohci *ohci = host->hostdata;
3177 int i;
3178
3179 reg_write(ohci, OHCI1394_CSRData, data);
3180 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3181 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3182
3183 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3184 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3185 break;
3186
3187 mdelay(1);
3188 }
3189
3190 return reg_read(ohci, OHCI1394_CSRData);
3191}
3192
3193static struct hpsb_host_driver ohci1394_driver = {
3194 .owner = THIS_MODULE,
3195 .name = OHCI1394_DRIVER_NAME,
3196 .set_hw_config_rom = ohci_set_hw_config_rom,
3197 .transmit_packet = ohci_transmit,
3198 .devctl = ohci_devctl,
3199 .isoctl = ohci_isoctl,
3200 .hw_csr_reg = ohci_hw_csr_reg,
3201};
3202
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203/***********************************
3204 * PCI Driver Interface functions *
3205 ***********************************/
3206
3207#define FAIL(err, fmt, args...) \
3208do { \
3209 PRINT_G(KERN_ERR, fmt , ## args); \
3210 ohci1394_pci_remove(dev); \
3211 return err; \
3212} while (0)
3213
3214static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3215 const struct pci_device_id *ent)
3216{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217 struct hpsb_host *host;
3218 struct ti_ohci *ohci; /* shortcut to currently handled device */
Greg Kroah-Hartman2427ddd2006-06-12 17:07:52 -07003219 resource_size_t ohci_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 if (pci_enable_device(dev))
3222 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3223 pci_set_master(dev);
3224
3225 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3226 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3227
3228 ohci = host->hostdata;
3229 ohci->dev = dev;
3230 ohci->host = host;
3231 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3232 host->pdev = dev;
3233 pci_set_drvdata(dev, ohci);
3234
3235 /* We don't want hardware swapping */
3236 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3237
3238 /* Some oddball Apple controllers do not order the selfid
3239 * properly, so we make up for it here. */
3240#ifndef __LITTLE_ENDIAN
3241 /* XXX: Need a better way to check this. I'm wondering if we can
3242 * read the values of the OHCI1394_PCI_HCI_Control and the
3243 * noByteSwapData registers to see if they were not cleared to
3244 * zero. Should this work? Obviously it's not defined what these
3245 * registers will read when they aren't supported. Bleh! */
3246 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3247 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3248 ohci->no_swap_incoming = 1;
3249 ohci->selfid_swap = 0;
3250 } else
3251 ohci->selfid_swap = 1;
3252#endif
3253
3254
3255#ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3256#define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3257#endif
3258
3259 /* These chipsets require a bit of extra care when checking after
3260 * a busreset. */
3261 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3262 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3263 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3264 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3265 ohci->check_busreset = 1;
3266
3267 /* We hardwire the MMIO length, since some CardBus adaptors
3268 * fail to report the right length. Anyway, the ohci spec
3269 * clearly says it's 2kb, so this shouldn't be a problem. */
3270 ohci_base = pci_resource_start(dev, 0);
Jody McIntyre94c2d012006-03-28 20:04:04 -05003271 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
Greg Kroah-Hartmane29419f2006-06-12 15:20:16 -07003272 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3273 (unsigned long long)pci_resource_len(dev, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274
3275 /* Seems PCMCIA handles this internally. Not sure why. Seems
3276 * pretty bogus to force a driver to special case this. */
3277#ifndef PCMCIA
3278 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
Greg Kroah-Hartmane29419f2006-06-12 15:20:16 -07003279 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
3280 (unsigned long long)ohci_base,
3281 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282#endif
3283 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3284
3285 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3286 if (ohci->registers == NULL)
3287 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3288 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3289 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3290
3291 /* csr_config rom allocation */
3292 ohci->csr_config_rom_cpu =
3293 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3294 &ohci->csr_config_rom_bus);
3295 OHCI_DMA_ALLOC("consistent csr_config_rom");
3296 if (ohci->csr_config_rom_cpu == NULL)
3297 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3298 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3299
3300 /* self-id dma buffer allocation */
3301 ohci->selfid_buf_cpu =
3302 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3303 &ohci->selfid_buf_bus);
3304 OHCI_DMA_ALLOC("consistent selfid_buf");
3305
3306 if (ohci->selfid_buf_cpu == NULL)
3307 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3308 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3309
3310 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3311 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3312 "8Kb boundary... may cause problems on some CXD3222 chip",
3313 ohci->selfid_buf_cpu);
3314
3315 /* No self-id errors at startup */
3316 ohci->self_id_errors = 0;
3317
3318 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3319 /* AR DMA request context allocation */
3320 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3321 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3322 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3323 OHCI1394_AsReqRcvContextBase) < 0)
3324 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3325
3326 /* AR DMA response context allocation */
3327 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3328 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3329 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3330 OHCI1394_AsRspRcvContextBase) < 0)
3331 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3332
3333 /* AT DMA request context */
3334 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3335 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3336 OHCI1394_AsReqTrContextBase) < 0)
3337 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3338
3339 /* AT DMA response context */
3340 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3341 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3342 OHCI1394_AsRspTrContextBase) < 0)
3343 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3344
3345 /* Start off with a soft reset, to clear everything to a sane
3346 * state. */
3347 ohci_soft_reset(ohci);
3348
3349 /* Now enable LPS, which we need in order to start accessing
3350 * most of the registers. In fact, on some cards (ALI M5251),
3351 * accessing registers in the SClk domain without LPS enabled
3352 * will lock up the machine. Wait 50msec to make sure we have
3353 * full link enabled. */
3354 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3355
3356 /* Disable and clear interrupts */
3357 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3358 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3359
3360 mdelay(50);
3361
3362 /* Determine the number of available IR and IT contexts. */
3363 ohci->nb_iso_rcv_ctx =
3364 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 ohci->nb_iso_xmit_ctx =
3366 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367
3368 /* Set the usage bits for non-existent contexts so they can't
3369 * be allocated */
3370 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3371 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3372
3373 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3374 spin_lock_init(&ohci->iso_tasklet_list_lock);
3375 ohci->ISO_channel_usage = 0;
3376 spin_lock_init(&ohci->IR_channel_lock);
3377
3378 /* Allocate the IR DMA context right here so we don't have
3379 * to do it in interrupt path - note that this doesn't
3380 * waste much memory and avoids the jugglery required to
3381 * allocate it in IRQ path. */
3382 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3383 DMA_CTX_ISO, 0, IR_NUM_DESC,
3384 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3385 OHCI1394_IsoRcvContextBase) < 0) {
3386 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3387 }
3388
3389 /* We hopefully don't have to pre-allocate IT DMA like we did
3390 * for IR DMA above. Allocate it on-demand and mark inactive. */
3391 ohci->it_legacy_context.ohci = NULL;
Al Viro3515d012005-08-25 23:13:14 +01003392 spin_lock_init(&ohci->event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393
Al Viro3515d012005-08-25 23:13:14 +01003394 /*
3395 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3396 * might get called anyway. We'll see no event, of course, but
3397 * we need to get to that "no event", so enough should be initialized
3398 * by that point.
3399 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3401 OHCI1394_DRIVER_NAME, ohci))
3402 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3403
3404 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3405 ohci_initialize(ohci);
3406
3407 /* Set certain csr values */
3408 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3409 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3410 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3411 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3412 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3413
Ben Collins4611ed32006-06-12 18:13:32 -04003414 if (phys_dma) {
3415 host->low_addr_space =
3416 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3417 if (!host->low_addr_space)
3418 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3419 }
3420 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3421
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 /* Tell the highlevel this host is ready */
3423 if (hpsb_add_host(host))
3424 FAIL(-ENOMEM, "Failed to register host with highlevel");
3425
3426 ohci->init_state = OHCI_INIT_DONE;
3427
3428 return 0;
3429#undef FAIL
3430}
3431
3432static void ohci1394_pci_remove(struct pci_dev *pdev)
3433{
3434 struct ti_ohci *ohci;
3435 struct device *dev;
3436
3437 ohci = pci_get_drvdata(pdev);
3438 if (!ohci)
3439 return;
3440
3441 dev = get_device(&ohci->host->device);
3442
3443 switch (ohci->init_state) {
3444 case OHCI_INIT_DONE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 hpsb_remove_host(ohci->host);
3446
3447 /* Clear out BUS Options */
3448 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3449 reg_write(ohci, OHCI1394_BusOptions,
3450 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3451 0x00ff0000);
3452 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3453
3454 case OHCI_INIT_HAVE_IRQ:
3455 /* Clear interrupt registers */
3456 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3457 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3458 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3459 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3460 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3461 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3462
3463 /* Disable IRM Contender */
3464 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3465
3466 /* Clear link control register */
3467 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3468
3469 /* Let all other nodes know to ignore us */
3470 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3471
3472 /* Soft reset before we start - this disables
3473 * interrupts and clears linkEnable and LPS. */
3474 ohci_soft_reset(ohci);
3475 free_irq(ohci->dev->irq, ohci);
3476
3477 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3478 /* The ohci_soft_reset() stops all DMA contexts, so we
3479 * dont need to do this. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 free_dma_rcv_ctx(&ohci->ar_req_context);
3481 free_dma_rcv_ctx(&ohci->ar_resp_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 free_dma_trm_ctx(&ohci->at_req_context);
3483 free_dma_trm_ctx(&ohci->at_resp_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 free_dma_rcv_ctx(&ohci->ir_legacy_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 free_dma_trm_ctx(&ohci->it_legacy_context);
3486
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487 case OHCI_INIT_HAVE_SELFID_BUFFER:
3488 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3489 ohci->selfid_buf_cpu,
3490 ohci->selfid_buf_bus);
3491 OHCI_DMA_FREE("consistent selfid_buf");
3492
3493 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3494 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3495 ohci->csr_config_rom_cpu,
3496 ohci->csr_config_rom_bus);
3497 OHCI_DMA_FREE("consistent csr_config_rom");
3498
3499 case OHCI_INIT_HAVE_IOMAPPING:
3500 iounmap(ohci->registers);
3501
3502 case OHCI_INIT_HAVE_MEM_REGION:
3503#ifndef PCMCIA
3504 release_mem_region(pci_resource_start(ohci->dev, 0),
3505 OHCI1394_REGISTER_SIZE);
3506#endif
3507
3508#ifdef CONFIG_PPC_PMAC
3509 /* On UniNorth, power down the cable and turn off the chip
3510 * clock when the module is removed to save power on
3511 * laptops. Turning it back ON is done by the arch code when
3512 * pci_enable_device() is called */
3513 {
3514 struct device_node* of_node;
3515
3516 of_node = pci_device_to_OF_node(ohci->dev);
3517 if (of_node) {
3518 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3519 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3520 }
3521 }
3522#endif /* CONFIG_PPC_PMAC */
3523
3524 case OHCI_INIT_ALLOC_HOST:
3525 pci_set_drvdata(ohci->dev, NULL);
3526 }
3527
3528 if (dev)
3529 put_device(dev);
3530}
3531
3532
3533static int ohci1394_pci_resume (struct pci_dev *pdev)
3534{
Benjamin Herrenschmidt8c870932005-06-27 14:36:34 -07003535#ifdef CONFIG_PPC_PMAC
Benjamin Herrenschmidte8222502006-03-28 23:15:54 +11003536 if (machine_is(powermac)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 struct device_node *of_node;
3538
3539 /* Re-enable 1394 */
3540 of_node = pci_device_to_OF_node (pdev);
3541 if (of_node)
3542 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3543 }
Benjamin Herrenschmidt8c870932005-06-27 14:36:34 -07003544#endif /* CONFIG_PPC_PMAC */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545
Ben Collinsb21efb52006-06-12 18:15:03 -04003546 pci_restore_state(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 pci_enable_device(pdev);
3548
3549 return 0;
3550}
3551
3552
3553static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3554{
Benjamin Herrenschmidt8c870932005-06-27 14:36:34 -07003555#ifdef CONFIG_PPC_PMAC
Benjamin Herrenschmidte8222502006-03-28 23:15:54 +11003556 if (machine_is(powermac)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 struct device_node *of_node;
3558
3559 /* Disable 1394 */
3560 of_node = pci_device_to_OF_node (pdev);
3561 if (of_node)
3562 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3563 }
3564#endif
3565
Ben Collinsb21efb52006-06-12 18:15:03 -04003566 pci_save_state(pdev);
3567
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568 return 0;
3569}
3570
3571
3572#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3573
3574static struct pci_device_id ohci1394_pci_tbl[] = {
3575 {
3576 .class = PCI_CLASS_FIREWIRE_OHCI,
3577 .class_mask = PCI_ANY_ID,
3578 .vendor = PCI_ANY_ID,
3579 .device = PCI_ANY_ID,
3580 .subvendor = PCI_ANY_ID,
3581 .subdevice = PCI_ANY_ID,
3582 },
3583 { 0, },
3584};
3585
3586MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3587
3588static struct pci_driver ohci1394_pci_driver = {
3589 .name = OHCI1394_DRIVER_NAME,
3590 .id_table = ohci1394_pci_tbl,
3591 .probe = ohci1394_pci_probe,
3592 .remove = ohci1394_pci_remove,
3593 .resume = ohci1394_pci_resume,
3594 .suspend = ohci1394_pci_suspend,
3595};
3596
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597/***********************************
3598 * OHCI1394 Video Interface *
3599 ***********************************/
3600
3601/* essentially the only purpose of this code is to allow another
3602 module to hook into ohci's interrupt handler */
3603
3604int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3605{
3606 int i=0;
3607
3608 /* stop the channel program if it's still running */
3609 reg_write(ohci, reg, 0x8000);
3610
3611 /* Wait until it effectively stops */
3612 while (reg_read(ohci, reg) & 0x400) {
3613 i++;
3614 if (i>5000) {
3615 PRINT(KERN_ERR,
3616 "Runaway loop while stopping context: %s...", msg ? msg : "");
3617 return 1;
3618 }
3619
3620 mb();
3621 udelay(10);
3622 }
3623 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3624 return 0;
3625}
3626
3627void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3628 void (*func)(unsigned long), unsigned long data)
3629{
3630 tasklet_init(&tasklet->tasklet, func, data);
3631 tasklet->type = type;
3632 /* We init the tasklet->link field, so we can list_del() it
3633 * without worrying whether it was added to the list or not. */
3634 INIT_LIST_HEAD(&tasklet->link);
3635}
3636
3637int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3638 struct ohci1394_iso_tasklet *tasklet)
3639{
3640 unsigned long flags, *usage;
3641 int n, i, r = -EBUSY;
3642
3643 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3644 n = ohci->nb_iso_xmit_ctx;
3645 usage = &ohci->it_ctx_usage;
3646 }
3647 else {
3648 n = ohci->nb_iso_rcv_ctx;
3649 usage = &ohci->ir_ctx_usage;
3650
3651 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3652 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3653 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3654 return r;
3655 }
3656 }
3657 }
3658
3659 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3660
3661 for (i = 0; i < n; i++)
3662 if (!test_and_set_bit(i, usage)) {
3663 tasklet->context = i;
3664 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3665 r = 0;
3666 break;
3667 }
3668
3669 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3670
3671 return r;
3672}
3673
3674void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3675 struct ohci1394_iso_tasklet *tasklet)
3676{
3677 unsigned long flags;
3678
3679 tasklet_kill(&tasklet->tasklet);
3680
3681 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3682
3683 if (tasklet->type == OHCI_ISO_TRANSMIT)
3684 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3685 else {
3686 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3687
3688 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3689 clear_bit(0, &ohci->ir_multichannel_used);
3690 }
3691 }
3692
3693 list_del(&tasklet->link);
3694
3695 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3696}
3697
3698EXPORT_SYMBOL(ohci1394_stop_context);
3699EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3700EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3701EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3702
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703/***********************************
3704 * General module initialization *
3705 ***********************************/
3706
3707MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3708MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3709MODULE_LICENSE("GPL");
3710
3711static void __exit ohci1394_cleanup (void)
3712{
3713 pci_unregister_driver(&ohci1394_pci_driver);
3714}
3715
3716static int __init ohci1394_init(void)
3717{
3718 return pci_register_driver(&ohci1394_pci_driver);
3719}
3720
3721module_init(ohci1394_init);
3722module_exit(ohci1394_cleanup);