blob: 996998d805e06a3edddeccbc5c40377fb512cb82 [file] [log] [blame]
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
Brett Rudley33279892010-10-01 18:03:27 -070017#include <linux/kernel.h>
18#include <linux/string.h>
Brett Rudleyc6ac24e2010-10-26 11:55:23 -070019#include <linux/netdevice.h>
20#include <linux/pci.h>
Henry Ptasinskia9533e72010-09-08 21:04:42 -070021#include <bcmdefs.h>
22#include <bcmdevs.h>
23#include <osl.h>
24#include <bcmendian.h>
25#include <hndsoc.h>
26#include <bcmutils.h>
27#include <siutils.h>
28
29#include <sbhnddma.h>
30#include <hnddma.h>
31
32/* debug/trace */
33#ifdef BCMDBG
Jason Cooperc5fe41c2010-09-14 09:45:40 -040034#define DMA_ERROR(args) \
35 do { \
36 if (!(*di->msg_level & 1)) \
37 ; \
38 else \
39 printf args; \
40 } while (0)
41#define DMA_TRACE(args) \
42 do { \
43 if (!(*di->msg_level & 2)) \
44 ; \
45 else \
46 printf args; \
47 } while (0)
Henry Ptasinskia9533e72010-09-08 21:04:42 -070048#else
49#define DMA_ERROR(args)
50#define DMA_TRACE(args)
51#endif /* BCMDBG */
52
53#define DMA_NONE(args)
54
55#define d32txregs dregs.d32_u.txregs_32
56#define d32rxregs dregs.d32_u.rxregs_32
57#define txd32 dregs.d32_u.txd_32
58#define rxd32 dregs.d32_u.rxd_32
59
60#define d64txregs dregs.d64_u.txregs_64
61#define d64rxregs dregs.d64_u.rxregs_64
62#define txd64 dregs.d64_u.txd_64
63#define rxd64 dregs.d64_u.rxd_64
64
65/* default dma message level (if input msg_level pointer is null in dma_attach()) */
Jason Cooper7e85c722010-09-14 09:45:38 -040066static uint dma_msg_level;
Henry Ptasinskia9533e72010-09-08 21:04:42 -070067
68#define MAXNAMEL 8 /* 8 char names */
69
70#define DI_INFO(dmah) ((dma_info_t *)dmah)
71
72/* dma engine software state */
73typedef struct dma_info {
74 struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
75 * which could be const
76 */
77 uint *msg_level; /* message level pointer */
78 char name[MAXNAMEL]; /* callers name for diag msgs */
79
80 void *osh; /* os handle */
81 si_t *sih; /* sb handle */
82
83 bool dma64; /* this dma engine is operating in 64-bit mode */
84 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
85
86 union {
87 struct {
88 dma32regs_t *txregs_32; /* 32-bit dma tx engine registers */
89 dma32regs_t *rxregs_32; /* 32-bit dma rx engine registers */
90 dma32dd_t *txd_32; /* pointer to dma32 tx descriptor ring */
91 dma32dd_t *rxd_32; /* pointer to dma32 rx descriptor ring */
92 } d32_u;
93 struct {
94 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
95 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
96 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
97 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
98 } d64_u;
99 } dregs;
100
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700101 u16 dmadesc_align; /* alignment requirement for dma descriptors */
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700102
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700103 u16 ntxd; /* # tx descriptors tunable */
104 u16 txin; /* index of next descriptor to reclaim */
105 u16 txout; /* index of next descriptor to post */
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700106 void **txp; /* pointer to parallel array of pointers to packets */
107 osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
108 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
109 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
110 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700111 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700112 u32 txdalloc; /* #bytes allocated for the ring */
113 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700114 * is not just an index, it needs all 13 bits to be
115 * an offset from the addr register.
116 */
117
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700118 u16 nrxd; /* # rx descriptors tunable */
119 u16 rxin; /* index of next descriptor to reclaim */
120 u16 rxout; /* index of next descriptor to post */
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700121 void **rxp; /* pointer to parallel array of pointers to packets */
122 osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
123 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
124 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
125 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700126 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700127 u32 rxdalloc; /* #bytes allocated for the ring */
128 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700129
130 /* tunables */
Greg Kroah-Hartmanc09cc582010-10-08 12:07:33 -0700131 unsigned int rxbufsize; /* rx buffer size in bytes,
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700132 * not including the extra headroom
133 */
134 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
135 * e.g. some rx pkt buffers will be bridged to tx side
136 * without byte copying. The extra headroom needs to be
137 * large enough to fit txheader needs.
138 * Some dongle driver may not need it.
139 */
140 uint nrxpost; /* # rx buffers to keep posted */
Greg Kroah-Hartmanc09cc582010-10-08 12:07:33 -0700141 unsigned int rxoffset; /* rxcontrol offset */
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700142 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
143 uint ddoffsethigh; /* high 32 bits */
144 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
145 uint dataoffsethigh; /* high 32 bits */
146 bool aligndesc_4k; /* descriptor base need to be aligned or not */
147} dma_info_t;
148
149/*
150 * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines.
151 * Otherwise it will support only 64-bit.
152 *
153 * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines.
154 * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines.
155 *
156 * DMA64_MODE indicates whether the current DMA engine is running as 64-bit.
157 */
158#ifdef BCMDMA32
159#define DMA32_ENAB(di) 1
160#define DMA64_ENAB(di) 1
161#define DMA64_MODE(di) ((di)->dma64)
162#else /* !BCMDMA32 */
163#define DMA32_ENAB(di) 0
164#define DMA64_ENAB(di) 1
165#define DMA64_MODE(di) 1
166#endif /* !BCMDMA32 */
167
168/* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
169#ifdef BCMDMASGLISTOSL
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -0700170#define DMASGLIST_ENAB true
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700171#else
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -0700172#define DMASGLIST_ENAB false
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700173#endif /* BCMDMASGLISTOSL */
174
175/* descriptor bumping macros */
176#define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
177#define TXD(x) XXD((x), di->ntxd)
178#define RXD(x) XXD((x), di->nrxd)
179#define NEXTTXD(i) TXD((i) + 1)
180#define PREVTXD(i) TXD((i) - 1)
181#define NEXTRXD(i) RXD((i) + 1)
182#define PREVRXD(i) RXD((i) - 1)
183
184#define NTXDACTIVE(h, t) TXD((t) - (h))
185#define NRXDACTIVE(h, t) RXD((t) - (h))
186
187/* macros to convert between byte offsets and indexes */
188#define B2I(bytes, type) ((bytes) / sizeof(type))
189#define I2B(index, type) ((index) * sizeof(type))
190
191#define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
192#define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
193
194#define PCI64ADDR_HIGH 0x80000000 /* address[63] */
195#define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
196
197/* Common prototypes */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400198static bool _dma_isaddrext(dma_info_t *di);
199static bool _dma_descriptor_align(dma_info_t *di);
200static bool _dma_alloc(dma_info_t *di, uint direction);
201static void _dma_detach(dma_info_t *di);
202static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
203static void _dma_rxinit(dma_info_t *di);
204static void *_dma_rx(dma_info_t *di);
205static bool _dma_rxfill(dma_info_t *di);
206static void _dma_rxreclaim(dma_info_t *di);
207static void _dma_rxenable(dma_info_t *di);
208static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700209static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
210 u16 *rxbufsize);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700211
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400212static void _dma_txblock(dma_info_t *di);
213static void _dma_txunblock(dma_info_t *di);
214static uint _dma_txactive(dma_info_t *di);
215static uint _dma_rxactive(dma_info_t *di);
216static uint _dma_txpending(dma_info_t *di);
217static uint _dma_txcommitted(dma_info_t *di);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700218
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400219static void *_dma_peeknexttxp(dma_info_t *di);
220static void *_dma_peeknextrxp(dma_info_t *di);
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -0700221static unsigned long _dma_getvar(dma_info_t *di, const char *name);
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400222static void _dma_counterreset(dma_info_t *di);
223static void _dma_fifoloopbackenable(dma_info_t *di);
224static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
Greg Kroah-Hartman36ef9a12010-10-05 10:02:49 -0700225static u8 dma_align_sizetobits(uint size);
Brett Rudleye69284f2010-11-16 15:45:48 -0800226static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700227 u16 *alignbits, uint *alloced,
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400228 dmaaddr_t *descpa, osldma_t **dmah);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700229
230/* Prototypes for 32-bit routines */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400231static bool dma32_alloc(dma_info_t *di, uint direction);
232static bool dma32_txreset(dma_info_t *di);
233static bool dma32_rxreset(dma_info_t *di);
234static bool dma32_txsuspendedidle(dma_info_t *di);
Arend van Sprielc26b1372010-11-23 14:06:23 +0100235static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400236static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range);
237static void *dma32_getnextrxp(dma_info_t *di, bool forceall);
238static void dma32_txrotate(dma_info_t *di);
239static bool dma32_rxidle(dma_info_t *di);
240static void dma32_txinit(dma_info_t *di);
241static bool dma32_txenabled(dma_info_t *di);
242static void dma32_txsuspend(dma_info_t *di);
243static void dma32_txresume(dma_info_t *di);
244static bool dma32_txsuspended(dma_info_t *di);
245static void dma32_txreclaim(dma_info_t *di, txd_range_t range);
246static bool dma32_txstopped(dma_info_t *di);
247static bool dma32_rxstopped(dma_info_t *di);
248static bool dma32_rxenabled(dma_info_t *di);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700249
Brett Rudleye69284f2010-11-16 15:45:48 -0800250static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700251
252/* Prototypes for 64-bit routines */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400253static bool dma64_alloc(dma_info_t *di, uint direction);
254static bool dma64_txreset(dma_info_t *di);
255static bool dma64_rxreset(dma_info_t *di);
256static bool dma64_txsuspendedidle(dma_info_t *di);
Arend van Sprielc26b1372010-11-23 14:06:23 +0100257static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400258static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
259static void *dma64_getpos(dma_info_t *di, bool direction);
260static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
261static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
262static void dma64_txrotate(dma_info_t *di);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700263
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400264static bool dma64_rxidle(dma_info_t *di);
265static void dma64_txinit(dma_info_t *di);
266static bool dma64_txenabled(dma_info_t *di);
267static void dma64_txsuspend(dma_info_t *di);
268static void dma64_txresume(dma_info_t *di);
269static bool dma64_txsuspended(dma_info_t *di);
270static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
271static bool dma64_txstopped(dma_info_t *di);
272static bool dma64_rxstopped(dma_info_t *di);
273static bool dma64_rxenabled(dma_info_t *di);
Brett Rudleye69284f2010-11-16 15:45:48 -0800274static bool _dma64_addrext(struct osl_info *osh, dma64regs_t *dma64regs);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700275
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700276static inline u32 parity32(u32 data);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700277
278const di_fcn_t dma64proc = {
279 (di_detach_t) _dma_detach,
280 (di_txinit_t) dma64_txinit,
281 (di_txreset_t) dma64_txreset,
282 (di_txenabled_t) dma64_txenabled,
283 (di_txsuspend_t) dma64_txsuspend,
284 (di_txresume_t) dma64_txresume,
285 (di_txsuspended_t) dma64_txsuspended,
286 (di_txsuspendedidle_t) dma64_txsuspendedidle,
287 (di_txfast_t) dma64_txfast,
288 (di_txunframed_t) dma64_txunframed,
289 (di_getpos_t) dma64_getpos,
290 (di_txstopped_t) dma64_txstopped,
291 (di_txreclaim_t) dma64_txreclaim,
292 (di_getnexttxp_t) dma64_getnexttxp,
293 (di_peeknexttxp_t) _dma_peeknexttxp,
294 (di_txblock_t) _dma_txblock,
295 (di_txunblock_t) _dma_txunblock,
296 (di_txactive_t) _dma_txactive,
297 (di_txrotate_t) dma64_txrotate,
298
299 (di_rxinit_t) _dma_rxinit,
300 (di_rxreset_t) dma64_rxreset,
301 (di_rxidle_t) dma64_rxidle,
302 (di_rxstopped_t) dma64_rxstopped,
303 (di_rxenable_t) _dma_rxenable,
304 (di_rxenabled_t) dma64_rxenabled,
305 (di_rx_t) _dma_rx,
306 (di_rxfill_t) _dma_rxfill,
307 (di_rxreclaim_t) _dma_rxreclaim,
308 (di_getnextrxp_t) _dma_getnextrxp,
309 (di_peeknextrxp_t) _dma_peeknextrxp,
310 (di_rxparam_get_t) _dma_rx_param_get,
311
312 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
313 (di_getvar_t) _dma_getvar,
314 (di_counterreset_t) _dma_counterreset,
315 (di_ctrlflags_t) _dma_ctrlflags,
316 NULL,
317 NULL,
318 NULL,
319 (di_rxactive_t) _dma_rxactive,
320 (di_txpending_t) _dma_txpending,
321 (di_txcommitted_t) _dma_txcommitted,
322 39
323};
324
325static const di_fcn_t dma32proc = {
326 (di_detach_t) _dma_detach,
327 (di_txinit_t) dma32_txinit,
328 (di_txreset_t) dma32_txreset,
329 (di_txenabled_t) dma32_txenabled,
330 (di_txsuspend_t) dma32_txsuspend,
331 (di_txresume_t) dma32_txresume,
332 (di_txsuspended_t) dma32_txsuspended,
333 (di_txsuspendedidle_t) dma32_txsuspendedidle,
334 (di_txfast_t) dma32_txfast,
335 NULL,
336 NULL,
337 (di_txstopped_t) dma32_txstopped,
338 (di_txreclaim_t) dma32_txreclaim,
339 (di_getnexttxp_t) dma32_getnexttxp,
340 (di_peeknexttxp_t) _dma_peeknexttxp,
341 (di_txblock_t) _dma_txblock,
342 (di_txunblock_t) _dma_txunblock,
343 (di_txactive_t) _dma_txactive,
344 (di_txrotate_t) dma32_txrotate,
345
346 (di_rxinit_t) _dma_rxinit,
347 (di_rxreset_t) dma32_rxreset,
348 (di_rxidle_t) dma32_rxidle,
349 (di_rxstopped_t) dma32_rxstopped,
350 (di_rxenable_t) _dma_rxenable,
351 (di_rxenabled_t) dma32_rxenabled,
352 (di_rx_t) _dma_rx,
353 (di_rxfill_t) _dma_rxfill,
354 (di_rxreclaim_t) _dma_rxreclaim,
355 (di_getnextrxp_t) _dma_getnextrxp,
356 (di_peeknextrxp_t) _dma_peeknextrxp,
357 (di_rxparam_get_t) _dma_rx_param_get,
358
359 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
360 (di_getvar_t) _dma_getvar,
361 (di_counterreset_t) _dma_counterreset,
362 (di_ctrlflags_t) _dma_ctrlflags,
363 NULL,
364 NULL,
365 NULL,
366 (di_rxactive_t) _dma_rxactive,
367 (di_txpending_t) _dma_txpending,
368 (di_txcommitted_t) _dma_txcommitted,
369 39
370};
371
Brett Rudleye69284f2010-11-16 15:45:48 -0800372hnddma_t *dma_attach(struct osl_info *osh, char *name, si_t *sih,
373 void *dmaregstx, void *dmaregsrx, uint ntxd,
374 uint nrxd, uint rxbufsize, int rxextheadroom,
375 uint nrxpost, uint rxoffset, uint *msg_level)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700376{
377 dma_info_t *di;
378 uint size;
379
380 /* allocate private info structure */
mike.rapoport@gmail.com5fcc1fc2010-10-13 00:09:10 +0200381 di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
Jason Cooperca8c1e52010-09-14 09:45:33 -0400382 if (di == NULL) {
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700383#ifdef BCMDBG
mike.rapoport@gmail.com97e17d02010-10-13 00:09:09 +0200384 printf("dma_attach: out of memory\n");
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700385#endif
Jason Cooper90ea2292010-09-14 09:45:32 -0400386 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700387 }
388
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700389 di->msg_level = msg_level ? msg_level : &dma_msg_level;
390
391 /* old chips w/o sb is no longer supported */
392 ASSERT(sih != NULL);
393
394 if (DMA64_ENAB(di))
395 di->dma64 =
396 ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
397 else
398 di->dma64 = 0;
399
400 /* check arguments */
401 ASSERT(ISPOWEROF2(ntxd));
402 ASSERT(ISPOWEROF2(nrxd));
403
404 if (nrxd == 0)
405 ASSERT(dmaregsrx == NULL);
406 if (ntxd == 0)
407 ASSERT(dmaregstx == NULL);
408
409 /* init dma reg pointer */
410 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
411 ASSERT(ntxd <= D64MAXDD);
412 ASSERT(nrxd <= D64MAXDD);
413 di->d64txregs = (dma64regs_t *) dmaregstx;
414 di->d64rxregs = (dma64regs_t *) dmaregsrx;
415 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
416 } else if (DMA32_ENAB(di)) {
417 ASSERT(ntxd <= D32MAXDD);
418 ASSERT(nrxd <= D32MAXDD);
419 di->d32txregs = (dma32regs_t *) dmaregstx;
420 di->d32rxregs = (dma32regs_t *) dmaregsrx;
421 di->hnddma.di_fn = (const di_fcn_t *)&dma32proc;
422 } else {
423 DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n"));
424 ASSERT(0);
425 goto fail;
426 }
427
428 /* Default flags (which can be changed by the driver calling dma_ctrlflags
429 * before enable): For backwards compatibility both Rx Overflow Continue
430 * and Parity are DISABLED.
431 * supports it.
432 */
433 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
434 0);
435
436 DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (DMA64_MODE(di) ? "DMA64" : "DMA32"), osh, di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
437
438 /* make a private copy of our callers name */
439 strncpy(di->name, name, MAXNAMEL);
440 di->name[MAXNAMEL - 1] = '\0';
441
442 di->osh = osh;
443 di->sih = sih;
444
445 /* save tunables */
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700446 di->ntxd = (u16) ntxd;
447 di->nrxd = (u16) nrxd;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700448
449 /* the actual dma size doesn't include the extra headroom */
450 di->rxextrahdrroom =
451 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
452 if (rxbufsize > BCMEXTRAHDROOM)
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700453 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700454 else
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700455 di->rxbufsize = (u16) rxbufsize;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700456
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700457 di->nrxpost = (u16) nrxpost;
Greg Kroah-Hartman36ef9a12010-10-05 10:02:49 -0700458 di->rxoffset = (u8) rxoffset;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700459
460 /*
461 * figure out the DMA physical address offset for dd and data
462 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
463 * Other bus: use zero
464 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
465 */
466 di->ddoffsetlow = 0;
467 di->dataoffsetlow = 0;
468 /* for pci bus, add offset */
469 if (sih->bustype == PCI_BUS) {
470 if ((sih->buscoretype == PCIE_CORE_ID) && DMA64_MODE(di)) {
471 /* pcie with DMA64 */
472 di->ddoffsetlow = 0;
473 di->ddoffsethigh = SI_PCIE_DMA_H32;
474 } else {
475 /* pci(DMA32/DMA64) or pcie with DMA32 */
476 di->ddoffsetlow = SI_PCI_DMA;
477 di->ddoffsethigh = 0;
478 }
479 di->dataoffsetlow = di->ddoffsetlow;
480 di->dataoffsethigh = di->ddoffsethigh;
481 }
482#if defined(__mips__) && defined(IL_BIGENDIAN)
483 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
484#endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
485 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
486 if ((si_coreid(sih) == SDIOD_CORE_ID)
487 && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
488 di->addrext = 0;
489 else if ((si_coreid(sih) == I2S_CORE_ID) &&
490 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
491 di->addrext = 0;
492 else
493 di->addrext = _dma_isaddrext(di);
494
495 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
496 di->aligndesc_4k = _dma_descriptor_align(di);
497 if (di->aligndesc_4k) {
498 if (DMA64_MODE(di)) {
499 di->dmadesc_align = D64RINGALIGN_BITS;
500 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
501 /* for smaller dd table, HW relax the alignment requirement */
502 di->dmadesc_align = D64RINGALIGN_BITS - 1;
503 }
504 } else
505 di->dmadesc_align = D32RINGALIGN_BITS;
506 } else
507 di->dmadesc_align = 4; /* 16 byte alignment */
508
509 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
510 di->aligndesc_4k, di->dmadesc_align));
511
512 /* allocate tx packet pointer vector */
513 if (ntxd) {
514 size = ntxd * sizeof(void *);
mike.rapoport@gmail.com5fcc1fc2010-10-13 00:09:10 +0200515 di->txp = kzalloc(size, GFP_ATOMIC);
Jason Cooperca8c1e52010-09-14 09:45:33 -0400516 if (di->txp == NULL) {
mike.rapoport@gmail.com97e17d02010-10-13 00:09:09 +0200517 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700518 goto fail;
519 }
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700520 }
521
522 /* allocate rx packet pointer vector */
523 if (nrxd) {
524 size = nrxd * sizeof(void *);
mike.rapoport@gmail.com5fcc1fc2010-10-13 00:09:10 +0200525 di->rxp = kzalloc(size, GFP_ATOMIC);
Jason Cooperca8c1e52010-09-14 09:45:33 -0400526 if (di->rxp == NULL) {
mike.rapoport@gmail.com97e17d02010-10-13 00:09:09 +0200527 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700528 goto fail;
529 }
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700530 }
531
532 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
533 if (ntxd) {
534 if (!_dma_alloc(di, DMA_TX))
535 goto fail;
536 }
537
538 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
539 if (nrxd) {
540 if (!_dma_alloc(di, DMA_RX))
541 goto fail;
542 }
543
544 if ((di->ddoffsetlow != 0) && !di->addrext) {
545 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700546 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700547 goto fail;
548 }
549 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700550 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700551 goto fail;
552 }
553 }
554
555 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
556
557 /* allocate DMA mapping vectors */
558 if (DMASGLIST_ENAB) {
559 if (ntxd) {
560 size = ntxd * sizeof(hnddma_seg_map_t);
mike.rapoport@gmail.com5fcc1fc2010-10-13 00:09:10 +0200561 di->txp_dmah = kzalloc(size, GFP_ATOMIC);
Jason Cooperca8c1e52010-09-14 09:45:33 -0400562 if (di->txp_dmah == NULL)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700563 goto fail;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700564 }
565
566 if (nrxd) {
567 size = nrxd * sizeof(hnddma_seg_map_t);
mike.rapoport@gmail.com5fcc1fc2010-10-13 00:09:10 +0200568 di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
Jason Cooperca8c1e52010-09-14 09:45:33 -0400569 if (di->rxp_dmah == NULL)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700570 goto fail;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700571 }
572 }
573
Jason Cooper90ea2292010-09-14 09:45:32 -0400574 return (hnddma_t *) di;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700575
576 fail:
577 _dma_detach(di);
Jason Cooper90ea2292010-09-14 09:45:32 -0400578 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700579}
580
581/* init the tx or rx descriptor */
Greg Kroah-Hartman2d956e22010-10-05 09:40:02 -0700582static inline void
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400583dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, dmaaddr_t pa, uint outidx,
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700584 u32 *flags, u32 bufcount)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700585{
586 /* dma32 uses 32-bit control to fit both flags and bufcounter */
587 *flags = *flags | (bufcount & CTRL_BC_MASK);
588
589 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
590 W_SM(&ddring[outidx].addr,
591 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
592 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
593 } else {
594 /* address extension */
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700595 u32 ae;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700596 ASSERT(di->addrext);
597 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
598 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
599
600 *flags |= (ae << CTRL_AE_SHIFT);
601 W_SM(&ddring[outidx].addr,
602 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
603 W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
604 }
605}
606
607/* Check for odd number of 1's */
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700608static inline u32 parity32(u32 data)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700609{
610 data ^= data >> 16;
611 data ^= data >> 8;
612 data ^= data >> 4;
613 data ^= data >> 2;
614 data ^= data >> 1;
615
Jason Cooper90ea2292010-09-14 09:45:32 -0400616 return data & 1;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700617}
618
619#define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
620
Greg Kroah-Hartman2d956e22010-10-05 09:40:02 -0700621static inline void
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400622dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700623 u32 *flags, u32 bufcount)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700624{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700625 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700626
627 /* PCI bus with big(>1G) physical address, use address extension */
628#if defined(__mips__) && defined(IL_BIGENDIAN)
629 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
630 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
631#else
632 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
633#endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
634 ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0);
635
636 W_SM(&ddring[outidx].addrlow,
637 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
638 W_SM(&ddring[outidx].addrhigh,
639 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
640 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
641 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
642 } else {
643 /* address extension for 32-bit PCI */
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700644 u32 ae;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700645 ASSERT(di->addrext);
646
647 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
648 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
649 ASSERT(PHYSADDRHI(pa) == 0);
650
651 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
652 W_SM(&ddring[outidx].addrlow,
653 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
654 W_SM(&ddring[outidx].addrhigh,
655 BUS_SWAP32(0 + di->dataoffsethigh));
656 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
657 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
658 }
659 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
660 if (DMA64_DD_PARITY(&ddring[outidx])) {
661 W_SM(&ddring[outidx].ctrl2,
662 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
663 }
664 }
665}
666
Brett Rudleye69284f2010-11-16 15:45:48 -0800667static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700668{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700669 u32 w;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700670
671 OR_REG(osh, &dma32regs->control, XC_AE);
672 w = R_REG(osh, &dma32regs->control);
673 AND_REG(osh, &dma32regs->control, ~XC_AE);
Jason Cooper90ea2292010-09-14 09:45:32 -0400674 return (w & XC_AE) == XC_AE;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700675}
676
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400677static bool _dma_alloc(dma_info_t *di, uint direction)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700678{
679 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
680 return dma64_alloc(di, direction);
681 } else if (DMA32_ENAB(di)) {
682 return dma32_alloc(di, direction);
683 } else
684 ASSERT(0);
685}
686
687/* !! may be called with core in reset */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400688static void _dma_detach(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700689{
690
691 DMA_TRACE(("%s: dma_detach\n", di->name));
692
693 /* shouldn't be here if descriptors are unreclaimed */
694 ASSERT(di->txin == di->txout);
695 ASSERT(di->rxin == di->rxout);
696
697 /* free dma descriptor rings */
698 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
699 if (di->txd64)
700 DMA_FREE_CONSISTENT(di->osh,
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -0700701 ((s8 *)di->txd64 -
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700702 di->txdalign), di->txdalloc,
703 (di->txdpaorig), &di->tx_dmah);
704 if (di->rxd64)
705 DMA_FREE_CONSISTENT(di->osh,
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -0700706 ((s8 *)di->rxd64 -
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700707 di->rxdalign), di->rxdalloc,
708 (di->rxdpaorig), &di->rx_dmah);
709 } else if (DMA32_ENAB(di)) {
710 if (di->txd32)
711 DMA_FREE_CONSISTENT(di->osh,
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -0700712 ((s8 *)di->txd32 -
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700713 di->txdalign), di->txdalloc,
714 (di->txdpaorig), &di->tx_dmah);
715 if (di->rxd32)
716 DMA_FREE_CONSISTENT(di->osh,
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -0700717 ((s8 *)di->rxd32 -
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700718 di->rxdalign), di->rxdalloc,
719 (di->rxdpaorig), &di->rx_dmah);
720 } else
721 ASSERT(0);
722
723 /* free packet pointer vectors */
724 if (di->txp)
mike.rapoport@gmail.com182acb32010-10-13 00:09:12 +0200725 kfree((void *)di->txp);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700726 if (di->rxp)
mike.rapoport@gmail.com182acb32010-10-13 00:09:12 +0200727 kfree((void *)di->rxp);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700728
729 /* free tx packet DMA handles */
730 if (di->txp_dmah)
mike.rapoport@gmail.com182acb32010-10-13 00:09:12 +0200731 kfree(di->txp_dmah);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700732
733 /* free rx packet DMA handles */
734 if (di->rxp_dmah)
mike.rapoport@gmail.com182acb32010-10-13 00:09:12 +0200735 kfree(di->rxp_dmah);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700736
737 /* free our private info structure */
mike.rapoport@gmail.com182acb32010-10-13 00:09:12 +0200738 kfree((void *)di);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700739
740}
741
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400742static bool _dma_descriptor_align(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700743{
744 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700745 u32 addrl;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700746
747 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
748 if (di->d64txregs != NULL) {
749 W_REG(di->osh, &di->d64txregs->addrlow, 0xff0);
750 addrl = R_REG(di->osh, &di->d64txregs->addrlow);
751 if (addrl != 0)
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -0700752 return false;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700753 } else if (di->d64rxregs != NULL) {
754 W_REG(di->osh, &di->d64rxregs->addrlow, 0xff0);
755 addrl = R_REG(di->osh, &di->d64rxregs->addrlow);
756 if (addrl != 0)
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -0700757 return false;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700758 }
759 }
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -0700760 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700761}
762
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -0700763/* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400764static bool _dma_isaddrext(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700765{
766 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
767 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
768
769 /* not all tx or rx channel are available */
770 if (di->d64txregs != NULL) {
771 if (!_dma64_addrext(di->osh, di->d64txregs)) {
772 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name));
773 ASSERT(0);
774 }
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -0700775 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700776 } else if (di->d64rxregs != NULL) {
777 if (!_dma64_addrext(di->osh, di->d64rxregs)) {
778 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name));
779 ASSERT(0);
780 }
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -0700781 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700782 }
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -0700783 return false;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700784 } else if (DMA32_ENAB(di)) {
785 if (di->d32txregs)
Jason Cooper90ea2292010-09-14 09:45:32 -0400786 return _dma32_addrext(di->osh, di->d32txregs);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700787 else if (di->d32rxregs)
Jason Cooper90ea2292010-09-14 09:45:32 -0400788 return _dma32_addrext(di->osh, di->d32rxregs);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700789 } else
790 ASSERT(0);
791
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -0700792 return false;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700793}
794
795/* initialize descriptor table base address */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400796static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700797{
798 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
799 if (!di->aligndesc_4k) {
800 if (direction == DMA_TX)
801 di->xmtptrbase = PHYSADDRLO(pa);
802 else
803 di->rcvptrbase = PHYSADDRLO(pa);
804 }
805
806 if ((di->ddoffsetlow == 0)
807 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
808 if (direction == DMA_TX) {
809 W_REG(di->osh, &di->d64txregs->addrlow,
810 (PHYSADDRLO(pa) + di->ddoffsetlow));
811 W_REG(di->osh, &di->d64txregs->addrhigh,
812 (PHYSADDRHI(pa) + di->ddoffsethigh));
813 } else {
814 W_REG(di->osh, &di->d64rxregs->addrlow,
815 (PHYSADDRLO(pa) + di->ddoffsetlow));
816 W_REG(di->osh, &di->d64rxregs->addrhigh,
817 (PHYSADDRHI(pa) + di->ddoffsethigh));
818 }
819 } else {
820 /* DMA64 32bits address extension */
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700821 u32 ae;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700822 ASSERT(di->addrext);
823 ASSERT(PHYSADDRHI(pa) == 0);
824
825 /* shift the high bit(s) from pa to ae */
826 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
827 PCI32ADDR_HIGH_SHIFT;
828 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
829
830 if (direction == DMA_TX) {
831 W_REG(di->osh, &di->d64txregs->addrlow,
832 (PHYSADDRLO(pa) + di->ddoffsetlow));
833 W_REG(di->osh, &di->d64txregs->addrhigh,
834 di->ddoffsethigh);
835 SET_REG(di->osh, &di->d64txregs->control,
836 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
837 } else {
838 W_REG(di->osh, &di->d64rxregs->addrlow,
839 (PHYSADDRLO(pa) + di->ddoffsetlow));
840 W_REG(di->osh, &di->d64rxregs->addrhigh,
841 di->ddoffsethigh);
842 SET_REG(di->osh, &di->d64rxregs->control,
843 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
844 }
845 }
846
847 } else if (DMA32_ENAB(di)) {
848 ASSERT(PHYSADDRHI(pa) == 0);
849 if ((di->ddoffsetlow == 0)
850 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
851 if (direction == DMA_TX)
852 W_REG(di->osh, &di->d32txregs->addr,
853 (PHYSADDRLO(pa) + di->ddoffsetlow));
854 else
855 W_REG(di->osh, &di->d32rxregs->addr,
856 (PHYSADDRLO(pa) + di->ddoffsetlow));
857 } else {
858 /* dma32 address extension */
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700859 u32 ae;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700860 ASSERT(di->addrext);
861
862 /* shift the high bit(s) from pa to ae */
863 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
864 PCI32ADDR_HIGH_SHIFT;
865 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
866
867 if (direction == DMA_TX) {
868 W_REG(di->osh, &di->d32txregs->addr,
869 (PHYSADDRLO(pa) + di->ddoffsetlow));
870 SET_REG(di->osh, &di->d32txregs->control, XC_AE,
871 ae << XC_AE_SHIFT);
872 } else {
873 W_REG(di->osh, &di->d32rxregs->addr,
874 (PHYSADDRLO(pa) + di->ddoffsetlow));
875 SET_REG(di->osh, &di->d32rxregs->control, RC_AE,
876 ae << RC_AE_SHIFT);
877 }
878 }
879 } else
880 ASSERT(0);
881}
882
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400883static void _dma_fifoloopbackenable(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700884{
885 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
886
887 if (DMA64_ENAB(di) && DMA64_MODE(di))
888 OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE);
889 else if (DMA32_ENAB(di))
890 OR_REG(di->osh, &di->d32txregs->control, XC_LE);
891 else
892 ASSERT(0);
893}
894
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400895static void _dma_rxinit(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700896{
897 DMA_TRACE(("%s: dma_rxinit\n", di->name));
898
899 if (di->nrxd == 0)
900 return;
901
902 di->rxin = di->rxout = 0;
903
904 /* clear rx descriptor ring */
905 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -0700906 BZERO_SM((void *)di->rxd64,
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700907 (di->nrxd * sizeof(dma64dd_t)));
908
909 /* DMA engine with out alignment requirement requires table to be inited
910 * before enabling the engine
911 */
912 if (!di->aligndesc_4k)
913 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
914
915 _dma_rxenable(di);
916
917 if (di->aligndesc_4k)
918 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
919 } else if (DMA32_ENAB(di)) {
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -0700920 BZERO_SM((void *)di->rxd32,
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700921 (di->nrxd * sizeof(dma32dd_t)));
922 _dma_rxenable(di);
923 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
924 } else
925 ASSERT(0);
926}
927
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400928static void _dma_rxenable(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700929{
930 uint dmactrlflags = di->hnddma.dmactrlflags;
931
932 DMA_TRACE(("%s: dma_rxenable\n", di->name));
933
934 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700935 u32 control =
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700936 (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) |
937 D64_RC_RE;
938
939 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
940 control |= D64_RC_PD;
941
942 if (dmactrlflags & DMA_CTRL_ROC)
943 control |= D64_RC_OC;
944
945 W_REG(di->osh, &di->d64rxregs->control,
946 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
947 } else if (DMA32_ENAB(di)) {
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -0700948 u32 control =
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700949 (R_REG(di->osh, &di->d32rxregs->control) & RC_AE) | RC_RE;
950
951 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
952 control |= RC_PD;
953
954 if (dmactrlflags & DMA_CTRL_ROC)
955 control |= RC_OC;
956
957 W_REG(di->osh, &di->d32rxregs->control,
958 ((di->rxoffset << RC_RO_SHIFT) | control));
959 } else
960 ASSERT(0);
961}
962
963static void
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700964_dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700965{
966 /* the normal values fit into 16 bits */
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -0700967 *rxoffset = (u16) di->rxoffset;
968 *rxbufsize = (u16) di->rxbufsize;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700969}
970
971/* !! rx entry routine
972 * returns a pointer to the next frame received, or NULL if there are no more
973 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
974 * with pkts chain
975 * otherwise, it's treated as giant pkt and will be tossed.
976 * The DMA scattering starts with normal DMA header, followed by first buffer data.
977 * After it reaches the max size of buffer, the data continues in next DMA descriptor
978 * buffer WITHOUT DMA header
979 */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -0400980static void *BCMFASTPATH _dma_rx(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700981{
Arend van Sprielc26b1372010-11-23 14:06:23 +0100982 struct sk_buff *p, *head, *tail;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700983 uint len;
984 uint pkt_len;
985 int resid = 0;
986
987 next_frame:
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -0700988 head = _dma_getnextrxp(di, false);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700989 if (head == NULL)
Jason Cooper90ea2292010-09-14 09:45:32 -0400990 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700991
Arend van Spriel54991ad2010-11-23 14:06:24 +0100992 len = ltoh16(*(u16 *) (head->data));
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700993 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
994
995#if defined(__mips__)
996 if (!len) {
Arend van Spriel54991ad2010-11-23 14:06:24 +0100997 while (!(len = *(u16 *) OSL_UNCACHED(head->data)))
mike.rapoport@gmail.com73831412010-10-13 00:09:07 +0200998 udelay(1);
Henry Ptasinskia9533e72010-09-08 21:04:42 -0700999
Arend van Spriel54991ad2010-11-23 14:06:24 +01001000 *(u16 *) (head->data) = htol16((u16) len);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001001 }
1002#endif /* defined(__mips__) */
1003
1004 /* set actual length */
Greg Kroah-Hartman7068c2f2010-10-08 11:34:59 -07001005 pkt_len = min((di->rxoffset + len), di->rxbufsize);
Arend van Spriel2cb8ada2010-11-18 20:46:44 +01001006 __skb_trim(head, pkt_len);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001007 resid = len - (di->rxbufsize - di->rxoffset);
1008
1009 /* check for single or multi-buffer rx */
1010 if (resid > 0) {
1011 tail = head;
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -07001012 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
Arend van Spriel54991ad2010-11-23 14:06:24 +01001013 tail->next = p;
Greg Kroah-Hartman7068c2f2010-10-08 11:34:59 -07001014 pkt_len = min(resid, (int)di->rxbufsize);
Arend van Spriel2cb8ada2010-11-18 20:46:44 +01001015 __skb_trim(p, pkt_len);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001016
1017 tail = p;
1018 resid -= di->rxbufsize;
1019 }
1020
1021#ifdef BCMDBG
1022 if (resid > 0) {
1023 uint cur;
1024 ASSERT(p == NULL);
1025 cur = (DMA64_ENAB(di) && DMA64_MODE(di)) ?
1026 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1027 D64_RS0_CD_MASK) -
1028 di->rcvptrbase) & D64_RS0_CD_MASK,
1029 dma64dd_t) : B2I(R_REG(di->osh,
1030 &di->d32rxregs->
1031 status) & RS_CD_MASK,
1032 dma32dd_t);
1033 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
1034 di->rxin, di->rxout, cur));
1035 }
1036#endif /* BCMDBG */
1037
1038 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
1039 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
1040 di->name, len));
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -07001041 PKTFREE(di->osh, head, false);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001042 di->hnddma.rxgiants++;
1043 goto next_frame;
1044 }
1045 }
1046
Jason Cooper90ea2292010-09-14 09:45:32 -04001047 return head;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001048}
1049
1050/* post receive buffers
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -07001051 * return false is refill failed completely and ring is empty
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001052 * this will stall the rx dma and user might want to call rxfill again asap
1053 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
1054 */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001055static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001056{
Arend van Sprielc26b1372010-11-23 14:06:23 +01001057 struct sk_buff *p;
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001058 u16 rxin, rxout;
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001059 u32 flags = 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001060 uint n;
1061 uint i;
1062 dmaaddr_t pa;
1063 uint extra_offset = 0;
1064 bool ring_empty;
1065
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -07001066 ring_empty = false;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001067
1068 /*
1069 * Determine how many receive buffers we're lacking
1070 * from the full complement, allocate, initialize,
1071 * and post them, then update the chip rx lastdscr.
1072 */
1073
1074 rxin = di->rxin;
1075 rxout = di->rxout;
1076
1077 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
1078
1079 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
1080
1081 if (di->rxbufsize > BCMEXTRAHDROOM)
1082 extra_offset = di->rxextrahdrroom;
1083
1084 for (i = 0; i < n; i++) {
1085 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
1086 size to be allocated
1087 */
1088
1089 p = osl_pktget(di->osh, di->rxbufsize + extra_offset);
1090
1091 if (p == NULL) {
1092 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
1093 di->name));
1094 if (i == 0) {
1095 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1096 if (dma64_rxidle(di)) {
1097 DMA_ERROR(("%s: rxfill64: ring is empty !\n", di->name));
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001098 ring_empty = true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001099 }
1100 } else if (DMA32_ENAB(di)) {
1101 if (dma32_rxidle(di)) {
1102 DMA_ERROR(("%s: rxfill32: ring is empty !\n", di->name));
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001103 ring_empty = true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001104 }
1105 } else
1106 ASSERT(0);
1107 }
1108 di->hnddma.rxnobuf++;
1109 break;
1110 }
1111 /* reserve an extra headroom, if applicable */
1112 if (extra_offset)
Arend van Sprielc303ecb2010-11-18 20:46:43 +01001113 skb_pull(p, extra_offset);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001114
1115 /* Do a cached write instead of uncached write since DMA_MAP
1116 * will flush the cache.
1117 */
Arend van Spriel54991ad2010-11-23 14:06:24 +01001118 *(u32 *) (p->data) = 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001119
1120 if (DMASGLIST_ENAB)
Brett Rudley9249ede2010-11-30 20:09:49 -08001121 memset(&di->rxp_dmah[rxout], 0,
1122 sizeof(hnddma_seg_map_t));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001123
Arend van Spriel54991ad2010-11-23 14:06:24 +01001124 pa = DMA_MAP(di->osh, p->data,
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001125 di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
1126
Greg Kroah-Hartman36c63ff2010-10-08 11:55:40 -07001127 ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001128
1129 /* save the free packet pointer */
1130 ASSERT(di->rxp[rxout] == NULL);
1131 di->rxp[rxout] = p;
1132
1133 /* reset flags for each descriptor */
1134 flags = 0;
1135 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1136 if (rxout == (di->nrxd - 1))
1137 flags = D64_CTRL1_EOT;
1138
1139 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
1140 di->rxbufsize);
1141 } else if (DMA32_ENAB(di)) {
1142 if (rxout == (di->nrxd - 1))
1143 flags = CTRL_EOT;
1144
1145 ASSERT(PHYSADDRHI(pa) == 0);
1146 dma32_dd_upd(di, di->rxd32, pa, rxout, &flags,
1147 di->rxbufsize);
1148 } else
1149 ASSERT(0);
1150 rxout = NEXTRXD(rxout);
1151 }
1152
1153 di->rxout = rxout;
1154
1155 /* update the chip lastdscr pointer */
1156 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1157 W_REG(di->osh, &di->d64rxregs->ptr,
1158 di->rcvptrbase + I2B(rxout, dma64dd_t));
1159 } else if (DMA32_ENAB(di)) {
1160 W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
1161 } else
1162 ASSERT(0);
1163
1164 return ring_empty;
1165}
1166
1167/* like getnexttxp but no reclaim */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001168static void *_dma_peeknexttxp(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001169{
1170 uint end, i;
1171
1172 if (di->ntxd == 0)
Jason Cooper90ea2292010-09-14 09:45:32 -04001173 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001174
1175 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1176 end =
1177 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1178 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1179 dma64dd_t);
1180 } else if (DMA32_ENAB(di)) {
1181 end =
1182 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1183 dma32dd_t);
1184 } else
1185 ASSERT(0);
1186
1187 for (i = di->txin; i != end; i = NEXTTXD(i))
1188 if (di->txp[i])
Jason Cooper90ea2292010-09-14 09:45:32 -04001189 return di->txp[i];
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001190
Jason Cooper90ea2292010-09-14 09:45:32 -04001191 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001192}
1193
1194/* like getnextrxp but not take off the ring */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001195static void *_dma_peeknextrxp(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001196{
1197 uint end, i;
1198
1199 if (di->nrxd == 0)
Jason Cooper90ea2292010-09-14 09:45:32 -04001200 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001201
1202 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1203 end =
1204 B2I(((R_REG(di->osh, &di->d64rxregs->status0) &
1205 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
1206 dma64dd_t);
1207 } else if (DMA32_ENAB(di)) {
1208 end =
1209 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK,
1210 dma32dd_t);
1211 } else
1212 ASSERT(0);
1213
1214 for (i = di->rxin; i != end; i = NEXTRXD(i))
1215 if (di->rxp[i])
Jason Cooper90ea2292010-09-14 09:45:32 -04001216 return di->rxp[i];
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001217
Jason Cooper90ea2292010-09-14 09:45:32 -04001218 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001219}
1220
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001221static void _dma_rxreclaim(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001222{
1223 void *p;
1224
1225 /* "unused local" warning suppression for OSLs that
1226 * define PKTFREE() without using the di->osh arg
1227 */
1228 di = di;
1229
1230 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
1231
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001232 while ((p = _dma_getnextrxp(di, true)))
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -07001233 PKTFREE(di->osh, p, false);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001234}
1235
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001236static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001237{
1238 if (di->nrxd == 0)
Jason Cooper90ea2292010-09-14 09:45:32 -04001239 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001240
1241 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1242 return dma64_getnextrxp(di, forceall);
1243 } else if (DMA32_ENAB(di)) {
1244 return dma32_getnextrxp(di, forceall);
1245 } else
1246 ASSERT(0);
1247}
1248
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001249static void _dma_txblock(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001250{
1251 di->hnddma.txavail = 0;
1252}
1253
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001254static void _dma_txunblock(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001255{
1256 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1257}
1258
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001259static uint _dma_txactive(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001260{
1261 return NTXDACTIVE(di->txin, di->txout);
1262}
1263
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001264static uint _dma_txpending(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001265{
1266 uint curr;
1267
1268 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1269 curr =
1270 B2I(((R_REG(di->osh, &di->d64txregs->status0) &
1271 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
1272 dma64dd_t);
1273 } else if (DMA32_ENAB(di)) {
1274 curr =
1275 B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK,
1276 dma32dd_t);
1277 } else
1278 ASSERT(0);
1279
1280 return NTXDACTIVE(curr, di->txout);
1281}
1282
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001283static uint _dma_txcommitted(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001284{
1285 uint ptr;
1286 uint txin = di->txin;
1287
1288 if (txin == di->txout)
1289 return 0;
1290
1291 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1292 ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t);
1293 } else if (DMA32_ENAB(di)) {
1294 ptr = B2I(R_REG(di->osh, &di->d32txregs->ptr), dma32dd_t);
1295 } else
1296 ASSERT(0);
1297
1298 return NTXDACTIVE(di->txin, ptr);
1299}
1300
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001301static uint _dma_rxactive(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001302{
1303 return NRXDACTIVE(di->rxin, di->rxout);
1304}
1305
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001306static void _dma_counterreset(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001307{
1308 /* reset all software counter */
1309 di->hnddma.rxgiants = 0;
1310 di->hnddma.rxnobuf = 0;
1311 di->hnddma.txnobuf = 0;
1312}
1313
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001314static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001315{
1316 uint dmactrlflags = di->hnddma.dmactrlflags;
1317
1318 if (di == NULL) {
1319 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
Jason Cooper90ea2292010-09-14 09:45:32 -04001320 return 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001321 }
1322
1323 ASSERT((flags & ~mask) == 0);
1324
1325 dmactrlflags &= ~mask;
1326 dmactrlflags |= flags;
1327
1328 /* If trying to enable parity, check if parity is actually supported */
1329 if (dmactrlflags & DMA_CTRL_PEN) {
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001330 u32 control;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001331
1332 if (DMA64_ENAB(di) && DMA64_MODE(di)) {
1333 control = R_REG(di->osh, &di->d64txregs->control);
1334 W_REG(di->osh, &di->d64txregs->control,
1335 control | D64_XC_PD);
1336 if (R_REG(di->osh, &di->d64txregs->control) & D64_XC_PD) {
1337 /* We *can* disable it so it is supported,
1338 * restore control register
1339 */
1340 W_REG(di->osh, &di->d64txregs->control,
1341 control);
1342 } else {
1343 /* Not supported, don't allow it to be enabled */
1344 dmactrlflags &= ~DMA_CTRL_PEN;
1345 }
1346 } else if (DMA32_ENAB(di)) {
1347 control = R_REG(di->osh, &di->d32txregs->control);
1348 W_REG(di->osh, &di->d32txregs->control,
1349 control | XC_PD);
1350 if (R_REG(di->osh, &di->d32txregs->control) & XC_PD) {
1351 W_REG(di->osh, &di->d32txregs->control,
1352 control);
1353 } else {
1354 /* Not supported, don't allow it to be enabled */
1355 dmactrlflags &= ~DMA_CTRL_PEN;
1356 }
1357 } else
1358 ASSERT(0);
1359 }
1360
1361 di->hnddma.dmactrlflags = dmactrlflags;
1362
Jason Cooper90ea2292010-09-14 09:45:32 -04001363 return dmactrlflags;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001364}
1365
1366/* get the address of the var in order to change later */
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07001367static unsigned long _dma_getvar(dma_info_t *di, const char *name)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001368{
1369 if (!strcmp(name, "&txavail"))
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07001370 return (unsigned long)&(di->hnddma.txavail);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001371 else {
1372 ASSERT(0);
1373 }
Jason Cooper90ea2292010-09-14 09:45:32 -04001374 return 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001375}
1376
Brett Rudleye69284f2010-11-16 15:45:48 -08001377void dma_txpioloopback(struct osl_info *osh, dma32regs_t *regs)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001378{
1379 OR_REG(osh, &regs->control, XC_LE);
1380}
1381
1382static
Greg Kroah-Hartman36ef9a12010-10-05 10:02:49 -07001383u8 dma_align_sizetobits(uint size)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001384{
Greg Kroah-Hartman36ef9a12010-10-05 10:02:49 -07001385 u8 bitpos = 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001386 ASSERT(size);
1387 ASSERT(!(size & (size - 1)));
1388 while (size >>= 1) {
1389 bitpos++;
1390 }
Jason Cooper90ea2292010-09-14 09:45:32 -04001391 return bitpos;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001392}
1393
1394/* This function ensures that the DMA descriptor ring will not get allocated
1395 * across Page boundary. If the allocation is done across the page boundary
1396 * at the first time, then it is freed and the allocation is done at
1397 * descriptor ring size aligned location. This will ensure that the ring will
1398 * not cross page boundary
1399 */
Brett Rudleye69284f2010-11-16 15:45:48 -08001400static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001401 u16 *alignbits, uint *alloced,
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001402 dmaaddr_t *descpa, osldma_t **dmah)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001403{
1404 void *va;
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001405 u32 desc_strtaddr;
1406 u32 alignbytes = 1 << *alignbits;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001407
Jason Cooperca8c1e52010-09-14 09:45:33 -04001408 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa,
1409 dmah);
1410 if (NULL == va)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001411 return NULL;
1412
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07001413 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001414 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1415 & boundary)) {
1416 *alignbits = dma_align_sizetobits(size);
1417 DMA_FREE_CONSISTENT(osh, va, size, *descpa, dmah);
1418 va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced,
1419 descpa, dmah);
1420 }
1421 return va;
1422}
1423
1424/* 32-bit DMA functions */
1425
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001426static void dma32_txinit(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001427{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001428 u32 control = XC_XE;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001429
1430 DMA_TRACE(("%s: dma_txinit\n", di->name));
1431
1432 if (di->ntxd == 0)
1433 return;
1434
1435 di->txin = di->txout = 0;
1436 di->hnddma.txavail = di->ntxd - 1;
1437
1438 /* clear tx descriptor ring */
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -07001439 BZERO_SM((void *)di->txd32, (di->ntxd * sizeof(dma32dd_t)));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001440
1441 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1442 control |= XC_PD;
1443 W_REG(di->osh, &di->d32txregs->control, control);
1444 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1445}
1446
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001447static bool dma32_txenabled(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001448{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001449 u32 xc;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001450
1451 /* If the chip is dead, it is not enabled :-) */
1452 xc = R_REG(di->osh, &di->d32txregs->control);
Jason Cooper90ea2292010-09-14 09:45:32 -04001453 return (xc != 0xffffffff) && (xc & XC_XE);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001454}
1455
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001456static void dma32_txsuspend(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001457{
1458 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1459
1460 if (di->ntxd == 0)
1461 return;
1462
1463 OR_REG(di->osh, &di->d32txregs->control, XC_SE);
1464}
1465
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001466static void dma32_txresume(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001467{
1468 DMA_TRACE(("%s: dma_txresume\n", di->name));
1469
1470 if (di->ntxd == 0)
1471 return;
1472
1473 AND_REG(di->osh, &di->d32txregs->control, ~XC_SE);
1474}
1475
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001476static bool dma32_txsuspended(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001477{
1478 return (di->ntxd == 0)
1479 || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
1480}
1481
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001482static void dma32_txreclaim(dma_info_t *di, txd_range_t range)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001483{
1484 void *p;
1485
1486 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1487 (range == HNDDMA_RANGE_ALL) ? "all" :
1488 ((range ==
1489 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1490 "transfered")));
1491
1492 if (di->txin == di->txout)
1493 return;
1494
1495 while ((p = dma32_getnexttxp(di, range)))
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001496 PKTFREE(di->osh, p, true);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001497}
1498
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001499static bool dma32_txstopped(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001500{
1501 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1502 XS_XS_STOPPED);
1503}
1504
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001505static bool dma32_rxstopped(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001506{
1507 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) ==
1508 RS_RS_STOPPED);
1509}
1510
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001511static bool dma32_alloc(dma_info_t *di, uint direction)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001512{
1513 uint size;
1514 uint ddlen;
1515 void *va;
1516 uint alloced;
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001517 u16 align;
1518 u16 align_bits;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001519
1520 ddlen = sizeof(dma32dd_t);
1521
1522 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1523
1524 alloced = 0;
1525 align_bits = di->dmadesc_align;
1526 align = (1 << align_bits);
1527
1528 if (direction == DMA_TX) {
Jason Cooperca8c1e52010-09-14 09:45:33 -04001529 va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1530 &alloced, &di->txdpaorig, &di->tx_dmah);
1531 if (va == NULL) {
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001532 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -07001533 return false;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001534 }
1535
1536 PHYSADDRHISET(di->txdpa, 0);
1537 ASSERT(PHYSADDRHI(di->txdpaorig) == 0);
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07001538 di->txd32 = (dma32dd_t *) roundup((unsigned long)va, align);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001539 di->txdalign =
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -07001540 (uint) ((s8 *)di->txd32 - (s8 *) va);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001541
1542 PHYSADDRLOSET(di->txdpa,
1543 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1544 /* Make sure that alignment didn't overflow */
1545 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
1546
1547 di->txdalloc = alloced;
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07001548 ASSERT(IS_ALIGNED((unsigned long)di->txd32, align));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001549 } else {
Jason Cooperca8c1e52010-09-14 09:45:33 -04001550 va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits,
1551 &alloced, &di->rxdpaorig, &di->rx_dmah);
1552 if (va == NULL) {
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001553 DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -07001554 return false;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001555 }
1556
1557 PHYSADDRHISET(di->rxdpa, 0);
1558 ASSERT(PHYSADDRHI(di->rxdpaorig) == 0);
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07001559 di->rxd32 = (dma32dd_t *) roundup((unsigned long)va, align);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001560 di->rxdalign =
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -07001561 (uint) ((s8 *)di->rxd32 - (s8 *) va);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001562
1563 PHYSADDRLOSET(di->rxdpa,
1564 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1565 /* Make sure that alignment didn't overflow */
1566 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
1567 di->rxdalloc = alloced;
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07001568 ASSERT(IS_ALIGNED((unsigned long)di->rxd32, align));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001569 }
1570
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001571 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001572}
1573
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001574static bool dma32_txreset(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001575{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001576 u32 status;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001577
1578 if (di->ntxd == 0)
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001579 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001580
1581 /* suspend tx DMA first */
1582 W_REG(di->osh, &di->d32txregs->control, XC_SE);
1583 SPINWAIT(((status =
1584 (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK))
1585 != XS_XS_DISABLED) && (status != XS_XS_IDLE)
1586 && (status != XS_XS_STOPPED), (10000));
1587
1588 W_REG(di->osh, &di->d32txregs->control, 0);
1589 SPINWAIT(((status = (R_REG(di->osh,
1590 &di->d32txregs->status) & XS_XS_MASK)) !=
1591 XS_XS_DISABLED), 10000);
1592
1593 /* wait for the last transaction to complete */
mike.rapoport@gmail.com73831412010-10-13 00:09:07 +02001594 udelay(300);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001595
Jason Cooper90ea2292010-09-14 09:45:32 -04001596 return status == XS_XS_DISABLED;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001597}
1598
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001599static bool dma32_rxidle(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001600{
1601 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1602
1603 if (di->nrxd == 0)
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001604 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001605
1606 return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
1607 R_REG(di->osh, &di->d32rxregs->ptr));
1608}
1609
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001610static bool dma32_rxreset(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001611{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001612 u32 status;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001613
1614 if (di->nrxd == 0)
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001615 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001616
1617 W_REG(di->osh, &di->d32rxregs->control, 0);
1618 SPINWAIT(((status = (R_REG(di->osh,
1619 &di->d32rxregs->status) & RS_RS_MASK)) !=
1620 RS_RS_DISABLED), 10000);
1621
Jason Cooper90ea2292010-09-14 09:45:32 -04001622 return status == RS_RS_DISABLED;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001623}
1624
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001625static bool dma32_rxenabled(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001626{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001627 u32 rc;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001628
1629 rc = R_REG(di->osh, &di->d32rxregs->control);
Jason Cooper90ea2292010-09-14 09:45:32 -04001630 return (rc != 0xffffffff) && (rc & RC_RE);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001631}
1632
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001633static bool dma32_txsuspendedidle(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001634{
1635 if (di->ntxd == 0)
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001636 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001637
1638 if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE))
1639 return 0;
1640
1641 if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
1642 return 0;
1643
mike.rapoport@gmail.com73831412010-10-13 00:09:07 +02001644 udelay(2);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001645 return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) ==
1646 XS_XS_IDLE);
1647}
1648
1649/* !! tx entry routine
1650 * supports full 32bit dma engine buffer addressing so
1651 * dma buffers can cross 4 Kbyte page boundaries.
1652 *
1653 * WARNING: call must check the return value for error.
1654 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1655 */
Arend van Sprielc26b1372010-11-23 14:06:23 +01001656static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001657{
Arend van Sprielc26b1372010-11-23 14:06:23 +01001658 struct sk_buff *p, *next;
Greg Kroah-Hartman580a0bd2010-10-05 11:09:48 -07001659 unsigned char *data;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001660 uint len;
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001661 u16 txout;
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001662 u32 flags = 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001663 dmaaddr_t pa;
1664
1665 DMA_TRACE(("%s: dma_txfast\n", di->name));
1666
1667 txout = di->txout;
1668
1669 /*
1670 * Walk the chain of packet buffers
1671 * allocating and initializing transmit descriptor entries.
1672 */
1673 for (p = p0; p; p = next) {
1674 uint nsegs, j;
1675 hnddma_seg_map_t *map;
1676
Arend van Spriel54991ad2010-11-23 14:06:24 +01001677 data = p->data;
1678 len = p->len;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001679#ifdef BCM_DMAPAD
1680 len += PKTDMAPAD(di->osh, p);
1681#endif
Arend van Spriel54991ad2010-11-23 14:06:24 +01001682 next = p->next;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001683
1684 /* return nonzero if out of tx descriptors */
1685 if (NEXTTXD(txout) == di->txin)
1686 goto outoftxd;
1687
1688 if (len == 0)
1689 continue;
1690
1691 if (DMASGLIST_ENAB)
Brett Rudley9249ede2010-11-30 20:09:49 -08001692 memset(&di->txp_dmah[txout], 0,
1693 sizeof(hnddma_seg_map_t));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001694
1695 /* get physical address of buffer start */
1696 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
1697 &di->txp_dmah[txout]);
1698
1699 if (DMASGLIST_ENAB) {
1700 map = &di->txp_dmah[txout];
1701
1702 /* See if all the segments can be accounted for */
1703 if (map->nsegs >
1704 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1705 1))
1706 goto outoftxd;
1707
1708 nsegs = map->nsegs;
1709 } else
1710 nsegs = 1;
1711
1712 for (j = 1; j <= nsegs; j++) {
1713 flags = 0;
1714 if (p == p0 && j == 1)
1715 flags |= CTRL_SOF;
1716
1717 /* With a DMA segment list, Descriptor table is filled
1718 * using the segment list instead of looping over
1719 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1720 * end of segment list is reached.
1721 */
1722 if ((!DMASGLIST_ENAB && next == NULL) ||
1723 (DMASGLIST_ENAB && j == nsegs))
1724 flags |= (CTRL_IOC | CTRL_EOF);
1725 if (txout == (di->ntxd - 1))
1726 flags |= CTRL_EOT;
1727
1728 if (DMASGLIST_ENAB) {
1729 len = map->segs[j - 1].length;
1730 pa = map->segs[j - 1].addr;
1731 }
1732 ASSERT(PHYSADDRHI(pa) == 0);
1733
1734 dma32_dd_upd(di, di->txd32, pa, txout, &flags, len);
1735 ASSERT(di->txp[txout] == NULL);
1736
1737 txout = NEXTTXD(txout);
1738 }
1739
1740 /* See above. No need to loop over individual buffers */
1741 if (DMASGLIST_ENAB)
1742 break;
1743 }
1744
1745 /* if last txd eof not set, fix it */
1746 if (!(flags & CTRL_EOF))
1747 W_SM(&di->txd32[PREVTXD(txout)].ctrl,
1748 BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF));
1749
1750 /* save the packet */
1751 di->txp[PREVTXD(txout)] = p0;
1752
1753 /* bump the tx descriptor index */
1754 di->txout = txout;
1755
1756 /* kick the chip */
1757 if (commit)
1758 W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t));
1759
1760 /* tx flow control */
1761 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1762
Jason Cooper90ea2292010-09-14 09:45:32 -04001763 return 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001764
1765 outoftxd:
1766 DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07001767 PKTFREE(di->osh, p0, true);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001768 di->hnddma.txavail = 0;
1769 di->hnddma.txnobuf++;
Jason Cooper90ea2292010-09-14 09:45:32 -04001770 return -1;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001771}
1772
1773/*
1774 * Reclaim next completed txd (txds if using chained buffers) in the range
1775 * specified and return associated packet.
1776 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1777 * transmitted as noted by the hardware "CurrDescr" pointer.
1778 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1779 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
1780 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1781 * return associated packet regardless of the value of hardware pointers.
1782 */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001783static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001784{
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001785 u16 start, end, i;
1786 u16 active_desc;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001787 void *txp;
1788
1789 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1790 (range == HNDDMA_RANGE_ALL) ? "all" :
1791 ((range ==
1792 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1793 "transfered")));
1794
1795 if (di->ntxd == 0)
Jason Cooper90ea2292010-09-14 09:45:32 -04001796 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001797
1798 txp = NULL;
1799
1800 start = di->txin;
1801 if (range == HNDDMA_RANGE_ALL)
1802 end = di->txout;
1803 else {
1804 dma32regs_t *dregs = di->d32txregs;
1805
1806 end =
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001807 (u16) B2I(R_REG(di->osh, &dregs->status) & XS_CD_MASK,
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001808 dma32dd_t);
1809
1810 if (range == HNDDMA_RANGE_TRANSFERED) {
1811 active_desc =
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001812 (u16) ((R_REG(di->osh, &dregs->status) &
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001813 XS_AD_MASK) >> XS_AD_SHIFT);
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001814 active_desc = (u16) B2I(active_desc, dma32dd_t);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001815 if (end != active_desc)
1816 end = PREVTXD(active_desc);
1817 }
1818 }
1819
1820 if ((start == 0) && (end > di->txout))
1821 goto bogus;
1822
1823 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1824 dmaaddr_t pa;
1825 hnddma_seg_map_t *map = NULL;
1826 uint size, j, nsegs;
1827
1828 PHYSADDRLOSET(pa,
1829 (BUS_SWAP32(R_SM(&di->txd32[i].addr)) -
1830 di->dataoffsetlow));
1831 PHYSADDRHISET(pa, 0);
1832
1833 if (DMASGLIST_ENAB) {
1834 map = &di->txp_dmah[i];
1835 size = map->origsize;
1836 nsegs = map->nsegs;
1837 } else {
1838 size =
1839 (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) &
1840 CTRL_BC_MASK);
1841 nsegs = 1;
1842 }
1843
1844 for (j = nsegs; j > 0; j--) {
1845 W_SM(&di->txd32[i].addr, 0xdeadbeef);
1846
1847 txp = di->txp[i];
1848 di->txp[i] = NULL;
1849 if (j > 1)
1850 i = NEXTTXD(i);
1851 }
1852
1853 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
1854 }
1855
1856 di->txin = i;
1857
1858 /* tx flow control */
1859 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1860
Jason Cooper90ea2292010-09-14 09:45:32 -04001861 return txp;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001862
1863 bogus:
1864 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
Jason Cooper90ea2292010-09-14 09:45:32 -04001865 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001866}
1867
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001868static void *dma32_getnextrxp(dma_info_t *di, bool forceall)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001869{
1870 uint i, curr;
1871 void *rxp;
1872 dmaaddr_t pa;
1873 /* if forcing, dma engine must be disabled */
1874 ASSERT(!forceall || !dma32_rxenabled(di));
1875
1876 i = di->rxin;
1877
1878 /* return if no packets posted */
1879 if (i == di->rxout)
Jason Cooper90ea2292010-09-14 09:45:32 -04001880 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001881
1882 curr =
1883 B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t);
1884
1885 /* ignore curr if forceall */
1886 if (!forceall && (i == curr))
Jason Cooper90ea2292010-09-14 09:45:32 -04001887 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001888
1889 /* get the packet pointer that corresponds to the rx descriptor */
1890 rxp = di->rxp[i];
1891 ASSERT(rxp);
1892 di->rxp[i] = NULL;
1893
1894 PHYSADDRLOSET(pa,
1895 (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) -
1896 di->dataoffsetlow));
1897 PHYSADDRHISET(pa, 0);
1898
1899 /* clear this packet from the descriptor ring */
1900 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
1901
1902 W_SM(&di->rxd32[i].addr, 0xdeadbeef);
1903
1904 di->rxin = NEXTRXD(i);
1905
Jason Cooper90ea2292010-09-14 09:45:32 -04001906 return rxp;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001907}
1908
1909/*
1910 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1911 */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001912static void dma32_txrotate(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001913{
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001914 u16 ad;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001915 uint nactive;
1916 uint rot;
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001917 u16 old, new;
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001918 u32 w;
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001919 u16 first, last;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001920
1921 ASSERT(dma32_txsuspendedidle(di));
1922
1923 nactive = _dma_txactive(di);
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07001924 ad = (u16) (B2I
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001925 (((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK)
1926 >> XS_AD_SHIFT), dma32dd_t));
1927 rot = TXD(ad - di->txin);
1928
1929 ASSERT(rot < di->ntxd);
1930
1931 /* full-ring case is a lot harder - don't worry about this */
1932 if (rot >= (di->ntxd - nactive)) {
1933 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1934 return;
1935 }
1936
1937 first = di->txin;
1938 last = PREVTXD(di->txout);
1939
1940 /* move entries starting at last and moving backwards to first */
1941 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1942 new = TXD(old + rot);
1943
1944 /*
1945 * Move the tx dma descriptor.
1946 * EOT is set only in the last entry in the ring.
1947 */
1948 w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT;
1949 if (new == (di->ntxd - 1))
1950 w |= CTRL_EOT;
1951 W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w));
1952 W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
1953
1954 /* zap the old tx dma descriptor address field */
1955 W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef));
1956
1957 /* move the corresponding txp[] entry */
1958 ASSERT(di->txp[new] == NULL);
1959 di->txp[new] = di->txp[old];
1960
1961 /* Move the segment map as well */
1962 if (DMASGLIST_ENAB) {
1963 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
1964 sizeof(hnddma_seg_map_t));
Brett Rudley9249ede2010-11-30 20:09:49 -08001965 memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001966 }
1967
1968 di->txp[old] = NULL;
1969 }
1970
1971 /* update txin and txout */
1972 di->txin = ad;
1973 di->txout = TXD(di->txout + rot);
1974 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1975
1976 /* kick the chip */
1977 W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
1978}
1979
1980/* 64-bit DMA functions */
1981
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04001982static void dma64_txinit(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001983{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07001984 u32 control = D64_XC_XE;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001985
1986 DMA_TRACE(("%s: dma_txinit\n", di->name));
1987
1988 if (di->ntxd == 0)
1989 return;
1990
1991 di->txin = di->txout = 0;
1992 di->hnddma.txavail = di->ntxd - 1;
1993
1994 /* clear tx descriptor ring */
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -07001995 BZERO_SM((void *)di->txd64, (di->ntxd * sizeof(dma64dd_t)));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07001996
1997 /* DMA engine with out alignment requirement requires table to be inited
1998 * before enabling the engine
1999 */
2000 if (!di->aligndesc_4k)
2001 _dma_ddtable_init(di, DMA_TX, di->txdpa);
2002
2003 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
2004 control |= D64_XC_PD;
2005 OR_REG(di->osh, &di->d64txregs->control, control);
2006
2007 /* DMA engine with alignment requirement requires table to be inited
2008 * before enabling the engine
2009 */
2010 if (di->aligndesc_4k)
2011 _dma_ddtable_init(di, DMA_TX, di->txdpa);
2012}
2013
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002014static bool dma64_txenabled(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002015{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07002016 u32 xc;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002017
2018 /* If the chip is dead, it is not enabled :-) */
2019 xc = R_REG(di->osh, &di->d64txregs->control);
Jason Cooper90ea2292010-09-14 09:45:32 -04002020 return (xc != 0xffffffff) && (xc & D64_XC_XE);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002021}
2022
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002023static void dma64_txsuspend(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002024{
2025 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
2026
2027 if (di->ntxd == 0)
2028 return;
2029
2030 OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2031}
2032
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002033static void dma64_txresume(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002034{
2035 DMA_TRACE(("%s: dma_txresume\n", di->name));
2036
2037 if (di->ntxd == 0)
2038 return;
2039
2040 AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE);
2041}
2042
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002043static bool dma64_txsuspended(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002044{
2045 return (di->ntxd == 0) ||
2046 ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE) ==
2047 D64_XC_SE);
2048}
2049
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002050static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002051{
2052 void *p;
2053
2054 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
2055 (range == HNDDMA_RANGE_ALL) ? "all" :
2056 ((range ==
2057 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2058 "transfered")));
2059
2060 if (di->txin == di->txout)
2061 return;
2062
2063 while ((p = dma64_getnexttxp(di, range))) {
2064 /* For unframed data, we don't have any packets to free */
2065 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07002066 PKTFREE(di->osh, p, true);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002067 }
2068}
2069
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002070static bool dma64_txstopped(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002071{
2072 return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2073 D64_XS0_XS_STOPPED);
2074}
2075
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002076static bool dma64_rxstopped(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002077{
2078 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
2079 D64_RS0_RS_STOPPED);
2080}
2081
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002082static bool dma64_alloc(dma_info_t *di, uint direction)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002083{
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002084 u16 size;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002085 uint ddlen;
2086 void *va;
2087 uint alloced = 0;
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002088 u16 align;
2089 u16 align_bits;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002090
2091 ddlen = sizeof(dma64dd_t);
2092
2093 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
2094 align_bits = di->dmadesc_align;
2095 align = (1 << align_bits);
2096
2097 if (direction == DMA_TX) {
Jason Cooperca8c1e52010-09-14 09:45:33 -04002098 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2099 &alloced, &di->txdpaorig, &di->tx_dmah);
2100 if (va == NULL) {
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002101 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -07002102 return false;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002103 }
2104 align = (1 << align_bits);
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07002105 di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -07002106 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002107 PHYSADDRLOSET(di->txdpa,
2108 PHYSADDRLO(di->txdpaorig) + di->txdalign);
2109 /* Make sure that alignment didn't overflow */
2110 ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig));
2111
2112 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
2113 di->txdalloc = alloced;
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07002114 ASSERT(IS_ALIGNED((unsigned long)di->txd64, align));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002115 } else {
Jason Cooperca8c1e52010-09-14 09:45:33 -04002116 va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
2117 &alloced, &di->rxdpaorig, &di->rx_dmah);
2118 if (va == NULL) {
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002119 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
Greg Kroah-Hartman0965ae82010-10-12 12:50:15 -07002120 return false;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002121 }
2122 align = (1 << align_bits);
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07002123 di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
Greg Kroah-Hartmanc03b63c2010-10-08 11:20:01 -07002124 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002125 PHYSADDRLOSET(di->rxdpa,
2126 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
2127 /* Make sure that alignment didn't overflow */
2128 ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig));
2129
2130 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
2131 di->rxdalloc = alloced;
Greg Kroah-Hartmanf024c482010-10-21 10:50:21 -07002132 ASSERT(IS_ALIGNED((unsigned long)di->rxd64, align));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002133 }
2134
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07002135 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002136}
2137
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002138static bool dma64_txreset(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002139{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07002140 u32 status;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002141
2142 if (di->ntxd == 0)
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07002143 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002144
2145 /* suspend tx DMA first */
2146 W_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
2147 SPINWAIT(((status =
2148 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2149 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
2150 && (status != D64_XS0_XS_STOPPED), 10000);
2151
2152 W_REG(di->osh, &di->d64txregs->control, 0);
2153 SPINWAIT(((status =
2154 (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK))
2155 != D64_XS0_XS_DISABLED), 10000);
2156
2157 /* wait for the last transaction to complete */
mike.rapoport@gmail.com73831412010-10-13 00:09:07 +02002158 udelay(300);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002159
Jason Cooper90ea2292010-09-14 09:45:32 -04002160 return status == D64_XS0_XS_DISABLED;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002161}
2162
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002163static bool dma64_rxidle(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002164{
2165 DMA_TRACE(("%s: dma_rxidle\n", di->name));
2166
2167 if (di->nrxd == 0)
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07002168 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002169
2170 return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
2171 (R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK));
2172}
2173
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002174static bool dma64_rxreset(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002175{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07002176 u32 status;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002177
2178 if (di->nrxd == 0)
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07002179 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002180
2181 W_REG(di->osh, &di->d64rxregs->control, 0);
2182 SPINWAIT(((status =
2183 (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK))
2184 != D64_RS0_RS_DISABLED), 10000);
2185
Jason Cooper90ea2292010-09-14 09:45:32 -04002186 return status == D64_RS0_RS_DISABLED;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002187}
2188
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002189static bool dma64_rxenabled(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002190{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07002191 u32 rc;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002192
2193 rc = R_REG(di->osh, &di->d64rxregs->control);
Jason Cooper90ea2292010-09-14 09:45:32 -04002194 return (rc != 0xffffffff) && (rc & D64_RC_RE);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002195}
2196
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002197static bool dma64_txsuspendedidle(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002198{
2199
2200 if (di->ntxd == 0)
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07002201 return true;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002202
2203 if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE))
2204 return 0;
2205
2206 if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) ==
2207 D64_XS0_XS_IDLE)
2208 return 1;
2209
2210 return 0;
2211}
2212
2213/* Useful when sending unframed data. This allows us to get a progress report from the DMA.
2214 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
2215 * If DMA is idle, we return NULL.
2216 */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002217static void *dma64_getpos(dma_info_t *di, bool direction)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002218{
2219 void *va;
2220 bool idle;
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07002221 u32 cd_offset;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002222
2223 if (direction == DMA_TX) {
2224 cd_offset =
2225 R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK;
2226 idle = !NTXDACTIVE(di->txin, di->txout);
2227 va = di->txp[B2I(cd_offset, dma64dd_t)];
2228 } else {
2229 cd_offset =
2230 R_REG(di->osh, &di->d64rxregs->status0) & D64_XS0_CD_MASK;
2231 idle = !NRXDACTIVE(di->rxin, di->rxout);
2232 va = di->rxp[B2I(cd_offset, dma64dd_t)];
2233 }
2234
2235 /* If DMA is IDLE, return NULL */
2236 if (idle) {
2237 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
2238 va = NULL;
2239 }
2240
2241 return va;
2242}
2243
2244/* TX of unframed data
2245 *
2246 * Adds a DMA ring descriptor for the data pointed to by "buf".
2247 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
2248 * that take a pointer to a "packet"
2249 * Each call to this is results in a single descriptor being added for "len" bytes of
2250 * data starting at "buf", it doesn't handle chained buffers.
2251 */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002252static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002253{
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002254 u16 txout;
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07002255 u32 flags = 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002256 dmaaddr_t pa; /* phys addr */
2257
2258 txout = di->txout;
2259
2260 /* return nonzero if out of tx descriptors */
2261 if (NEXTTXD(txout) == di->txin)
2262 goto outoftxd;
2263
2264 if (len == 0)
2265 return 0;
2266
2267 pa = DMA_MAP(di->osh, buf, len, DMA_TX, NULL, &di->txp_dmah[txout]);
2268
2269 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
2270
2271 if (txout == (di->ntxd - 1))
2272 flags |= D64_CTRL1_EOT;
2273
2274 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2275 ASSERT(di->txp[txout] == NULL);
2276
2277 /* save the buffer pointer - used by dma_getpos */
2278 di->txp[txout] = buf;
2279
2280 txout = NEXTTXD(txout);
2281 /* bump the tx descriptor index */
2282 di->txout = txout;
2283
2284 /* kick the chip */
2285 if (commit) {
2286 W_REG(di->osh, &di->d64txregs->ptr,
2287 di->xmtptrbase + I2B(txout, dma64dd_t));
2288 }
2289
2290 /* tx flow control */
2291 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2292
Jason Cooper90ea2292010-09-14 09:45:32 -04002293 return 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002294
2295 outoftxd:
2296 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
2297 di->hnddma.txavail = 0;
2298 di->hnddma.txnobuf++;
Jason Cooper90ea2292010-09-14 09:45:32 -04002299 return -1;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002300}
2301
2302/* !! tx entry routine
2303 * WARNING: call must check the return value for error.
2304 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
2305 */
Arend van Sprielc26b1372010-11-23 14:06:23 +01002306static int BCMFASTPATH dma64_txfast(dma_info_t *di, struct sk_buff *p0,
2307 bool commit)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002308{
Arend van Sprielc26b1372010-11-23 14:06:23 +01002309 struct sk_buff *p, *next;
Greg Kroah-Hartman580a0bd2010-10-05 11:09:48 -07002310 unsigned char *data;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002311 uint len;
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002312 u16 txout;
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07002313 u32 flags = 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002314 dmaaddr_t pa;
2315
2316 DMA_TRACE(("%s: dma_txfast\n", di->name));
2317
2318 txout = di->txout;
2319
2320 /*
2321 * Walk the chain of packet buffers
2322 * allocating and initializing transmit descriptor entries.
2323 */
2324 for (p = p0; p; p = next) {
2325 uint nsegs, j;
2326 hnddma_seg_map_t *map;
2327
Arend van Spriel54991ad2010-11-23 14:06:24 +01002328 data = p->data;
2329 len = p->len;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002330#ifdef BCM_DMAPAD
2331 len += PKTDMAPAD(di->osh, p);
2332#endif /* BCM_DMAPAD */
Arend van Spriel54991ad2010-11-23 14:06:24 +01002333 next = p->next;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002334
2335 /* return nonzero if out of tx descriptors */
2336 if (NEXTTXD(txout) == di->txin)
2337 goto outoftxd;
2338
2339 if (len == 0)
2340 continue;
2341
2342 /* get physical address of buffer start */
2343 if (DMASGLIST_ENAB)
Brett Rudley9249ede2010-11-30 20:09:49 -08002344 memset(&di->txp_dmah[txout], 0,
2345 sizeof(hnddma_seg_map_t));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002346
2347 pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
2348 &di->txp_dmah[txout]);
2349
2350 if (DMASGLIST_ENAB) {
2351 map = &di->txp_dmah[txout];
2352
2353 /* See if all the segments can be accounted for */
2354 if (map->nsegs >
2355 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
2356 1))
2357 goto outoftxd;
2358
2359 nsegs = map->nsegs;
2360 } else
2361 nsegs = 1;
2362
2363 for (j = 1; j <= nsegs; j++) {
2364 flags = 0;
2365 if (p == p0 && j == 1)
2366 flags |= D64_CTRL1_SOF;
2367
2368 /* With a DMA segment list, Descriptor table is filled
2369 * using the segment list instead of looping over
2370 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
2371 * end of segment list is reached.
2372 */
2373 if ((!DMASGLIST_ENAB && next == NULL) ||
2374 (DMASGLIST_ENAB && j == nsegs))
2375 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
2376 if (txout == (di->ntxd - 1))
2377 flags |= D64_CTRL1_EOT;
2378
2379 if (DMASGLIST_ENAB) {
2380 len = map->segs[j - 1].length;
2381 pa = map->segs[j - 1].addr;
2382 }
2383 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
2384 ASSERT(di->txp[txout] == NULL);
2385
2386 txout = NEXTTXD(txout);
2387 }
2388
2389 /* See above. No need to loop over individual buffers */
2390 if (DMASGLIST_ENAB)
2391 break;
2392 }
2393
2394 /* if last txd eof not set, fix it */
2395 if (!(flags & D64_CTRL1_EOF))
2396 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
2397 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
2398
2399 /* save the packet */
2400 di->txp[PREVTXD(txout)] = p0;
2401
2402 /* bump the tx descriptor index */
2403 di->txout = txout;
2404
2405 /* kick the chip */
2406 if (commit)
2407 W_REG(di->osh, &di->d64txregs->ptr,
2408 di->xmtptrbase + I2B(txout, dma64dd_t));
2409
2410 /* tx flow control */
2411 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2412
Jason Cooper90ea2292010-09-14 09:45:32 -04002413 return 0;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002414
2415 outoftxd:
2416 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07002417 PKTFREE(di->osh, p0, true);
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002418 di->hnddma.txavail = 0;
2419 di->hnddma.txnobuf++;
Jason Cooper90ea2292010-09-14 09:45:32 -04002420 return -1;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002421}
2422
2423/*
2424 * Reclaim next completed txd (txds if using chained buffers) in the range
2425 * specified and return associated packet.
2426 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
2427 * transmitted as noted by the hardware "CurrDescr" pointer.
2428 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
2429 * transfered by the DMA as noted by the hardware "ActiveDescr" pointer.
2430 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
2431 * return associated packet regardless of the value of hardware pointers.
2432 */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002433static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002434{
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002435 u16 start, end, i;
2436 u16 active_desc;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002437 void *txp;
2438
2439 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
2440 (range == HNDDMA_RANGE_ALL) ? "all" :
2441 ((range ==
2442 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
2443 "transfered")));
2444
2445 if (di->ntxd == 0)
Jason Cooper90ea2292010-09-14 09:45:32 -04002446 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002447
2448 txp = NULL;
2449
2450 start = di->txin;
2451 if (range == HNDDMA_RANGE_ALL)
2452 end = di->txout;
2453 else {
2454 dma64regs_t *dregs = di->d64txregs;
2455
2456 end =
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002457 (u16) (B2I
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002458 (((R_REG(di->osh, &dregs->status0) &
2459 D64_XS0_CD_MASK) -
2460 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
2461
2462 if (range == HNDDMA_RANGE_TRANSFERED) {
2463 active_desc =
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002464 (u16) (R_REG(di->osh, &dregs->status1) &
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002465 D64_XS1_AD_MASK);
2466 active_desc =
2467 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
2468 active_desc = B2I(active_desc, dma64dd_t);
2469 if (end != active_desc)
2470 end = PREVTXD(active_desc);
2471 }
2472 }
2473
2474 if ((start == 0) && (end > di->txout))
2475 goto bogus;
2476
2477 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
2478 dmaaddr_t pa;
2479 hnddma_seg_map_t *map = NULL;
2480 uint size, j, nsegs;
2481
2482 PHYSADDRLOSET(pa,
2483 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
2484 di->dataoffsetlow));
2485 PHYSADDRHISET(pa,
2486 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
2487 di->dataoffsethigh));
2488
2489 if (DMASGLIST_ENAB) {
2490 map = &di->txp_dmah[i];
2491 size = map->origsize;
2492 nsegs = map->nsegs;
2493 } else {
2494 size =
2495 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
2496 D64_CTRL2_BC_MASK);
2497 nsegs = 1;
2498 }
2499
2500 for (j = nsegs; j > 0; j--) {
2501 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
2502 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
2503
2504 txp = di->txp[i];
2505 di->txp[i] = NULL;
2506 if (j > 1)
2507 i = NEXTTXD(i);
2508 }
2509
2510 DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map);
2511 }
2512
2513 di->txin = i;
2514
2515 /* tx flow control */
2516 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2517
Jason Cooper90ea2292010-09-14 09:45:32 -04002518 return txp;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002519
2520 bogus:
2521 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
Jason Cooper90ea2292010-09-14 09:45:32 -04002522 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002523}
2524
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002525static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002526{
2527 uint i, curr;
2528 void *rxp;
2529 dmaaddr_t pa;
2530
2531 /* if forcing, dma engine must be disabled */
2532 ASSERT(!forceall || !dma64_rxenabled(di));
2533
2534 i = di->rxin;
2535
2536 /* return if no packets posted */
2537 if (i == di->rxout)
Jason Cooper90ea2292010-09-14 09:45:32 -04002538 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002539
2540 curr =
2541 B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) -
2542 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
2543
2544 /* ignore curr if forceall */
2545 if (!forceall && (i == curr))
Jason Cooper90ea2292010-09-14 09:45:32 -04002546 return NULL;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002547
2548 /* get the packet pointer that corresponds to the rx descriptor */
2549 rxp = di->rxp[i];
2550 ASSERT(rxp);
2551 di->rxp[i] = NULL;
2552
2553 PHYSADDRLOSET(pa,
2554 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
2555 di->dataoffsetlow));
2556 PHYSADDRHISET(pa,
2557 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
2558 di->dataoffsethigh));
2559
2560 /* clear this packet from the descriptor ring */
2561 DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]);
2562
2563 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
2564 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
2565
2566 di->rxin = NEXTRXD(i);
2567
Jason Cooper90ea2292010-09-14 09:45:32 -04002568 return rxp;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002569}
2570
Brett Rudleye69284f2010-11-16 15:45:48 -08002571static bool _dma64_addrext(struct osl_info *osh, dma64regs_t * dma64regs)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002572{
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07002573 u32 w;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002574 OR_REG(osh, &dma64regs->control, D64_XC_AE);
2575 w = R_REG(osh, &dma64regs->control);
2576 AND_REG(osh, &dma64regs->control, ~D64_XC_AE);
Jason Cooper90ea2292010-09-14 09:45:32 -04002577 return (w & D64_XC_AE) == D64_XC_AE;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002578}
2579
2580/*
2581 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
2582 */
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002583static void dma64_txrotate(dma_info_t *di)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002584{
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002585 u16 ad;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002586 uint nactive;
2587 uint rot;
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002588 u16 old, new;
Greg Kroah-Hartman66cbd3a2010-10-08 11:05:47 -07002589 u32 w;
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002590 u16 first, last;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002591
2592 ASSERT(dma64_txsuspendedidle(di));
2593
2594 nactive = _dma_txactive(di);
Greg Kroah-Hartman7d4df482010-10-07 17:04:47 -07002595 ad = (u16) (B2I
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002596 ((((R_REG(di->osh, &di->d64txregs->status1) &
2597 D64_XS1_AD_MASK)
2598 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
2599 rot = TXD(ad - di->txin);
2600
2601 ASSERT(rot < di->ntxd);
2602
2603 /* full-ring case is a lot harder - don't worry about this */
2604 if (rot >= (di->ntxd - nactive)) {
2605 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
2606 return;
2607 }
2608
2609 first = di->txin;
2610 last = PREVTXD(di->txout);
2611
2612 /* move entries starting at last and moving backwards to first */
2613 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
2614 new = TXD(old + rot);
2615
2616 /*
2617 * Move the tx dma descriptor.
2618 * EOT is set only in the last entry in the ring.
2619 */
2620 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
2621 if (new == (di->ntxd - 1))
2622 w |= D64_CTRL1_EOT;
2623 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
2624
2625 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
2626 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
2627
2628 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
2629 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
2630
2631 /* zap the old tx dma descriptor address field */
2632 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
2633 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
2634
2635 /* move the corresponding txp[] entry */
2636 ASSERT(di->txp[new] == NULL);
2637 di->txp[new] = di->txp[old];
2638
2639 /* Move the map */
2640 if (DMASGLIST_ENAB) {
2641 bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
2642 sizeof(hnddma_seg_map_t));
Brett Rudley9249ede2010-11-30 20:09:49 -08002643 memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002644 }
2645
2646 di->txp[old] = NULL;
2647 }
2648
2649 /* update txin and txout */
2650 di->txin = ad;
2651 di->txout = TXD(di->txout + rot);
2652 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
2653
2654 /* kick the chip */
2655 W_REG(di->osh, &di->d64txregs->ptr,
2656 di->xmtptrbase + I2B(di->txout, dma64dd_t));
2657}
2658
Jason Cooper7cc4a4c2010-09-14 09:45:30 -04002659uint dma_addrwidth(si_t *sih, void *dmaregs)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002660{
2661 dma32regs_t *dma32regs;
Brett Rudleye69284f2010-11-16 15:45:48 -08002662 struct osl_info *osh;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002663
2664 osh = si_osh(sih);
2665
2666 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
2667 /* DMA engine is 64-bit capable */
2668 if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
2669 /* backplane are 64-bit capable */
2670 if (si_backplane64(sih))
2671 /* If bus is System Backplane or PCIE then we can access 64-bits */
Brett Rudleyfa7a1db2010-11-23 15:30:02 -08002672 if ((sih->bustype == SI_BUS) ||
2673 ((sih->bustype == PCI_BUS) &&
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002674 (sih->buscoretype == PCIE_CORE_ID)))
Jason Cooper90ea2292010-09-14 09:45:32 -04002675 return DMADDRWIDTH_64;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002676
Greg Kroah-Hartman0f0881b2010-10-12 12:15:18 -07002677 /* DMA64 is always 32-bit capable, AE is always true */
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002678 ASSERT(_dma64_addrext(osh, (dma64regs_t *) dmaregs));
2679
Jason Cooper90ea2292010-09-14 09:45:32 -04002680 return DMADDRWIDTH_32;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002681 }
2682
2683 /* Start checking for 32-bit / 30-bit addressing */
2684 dma32regs = (dma32regs_t *) dmaregs;
2685
2686 /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
Brett Rudleyfa7a1db2010-11-23 15:30:02 -08002687 if ((sih->bustype == SI_BUS) ||
2688 ((sih->bustype == PCI_BUS)
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002689 && sih->buscoretype == PCIE_CORE_ID)
2690 || (_dma32_addrext(osh, dma32regs)))
Jason Cooper90ea2292010-09-14 09:45:32 -04002691 return DMADDRWIDTH_32;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002692
2693 /* Fallthru */
Jason Cooper90ea2292010-09-14 09:45:32 -04002694 return DMADDRWIDTH_30;
Henry Ptasinskia9533e72010-09-08 21:04:42 -07002695}