Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2010 Broadcom Corporation |
| 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for any |
| 5 | * purpose with or without fee is hereby granted, provided that the above |
| 6 | * copyright notice and this permission notice appear in all copies. |
| 7 | * |
| 8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
| 11 | * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION |
| 13 | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN |
| 14 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 15 | */ |
| 16 | |
Brett Rudley | 3327989 | 2010-10-01 18:03:27 -0700 | [diff] [blame] | 17 | #include <linux/kernel.h> |
| 18 | #include <linux/string.h> |
Brett Rudley | c6ac24e | 2010-10-26 11:55:23 -0700 | [diff] [blame] | 19 | #include <linux/netdevice.h> |
| 20 | #include <linux/pci.h> |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 21 | #include <bcmdefs.h> |
| 22 | #include <bcmdevs.h> |
| 23 | #include <osl.h> |
| 24 | #include <bcmendian.h> |
| 25 | #include <hndsoc.h> |
| 26 | #include <bcmutils.h> |
| 27 | #include <siutils.h> |
| 28 | |
| 29 | #include <sbhnddma.h> |
| 30 | #include <hnddma.h> |
| 31 | |
| 32 | /* debug/trace */ |
| 33 | #ifdef BCMDBG |
Jason Cooper | c5fe41c | 2010-09-14 09:45:40 -0400 | [diff] [blame] | 34 | #define DMA_ERROR(args) \ |
| 35 | do { \ |
| 36 | if (!(*di->msg_level & 1)) \ |
| 37 | ; \ |
| 38 | else \ |
| 39 | printf args; \ |
| 40 | } while (0) |
| 41 | #define DMA_TRACE(args) \ |
| 42 | do { \ |
| 43 | if (!(*di->msg_level & 2)) \ |
| 44 | ; \ |
| 45 | else \ |
| 46 | printf args; \ |
| 47 | } while (0) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 48 | #else |
| 49 | #define DMA_ERROR(args) |
| 50 | #define DMA_TRACE(args) |
| 51 | #endif /* BCMDBG */ |
| 52 | |
| 53 | #define DMA_NONE(args) |
| 54 | |
| 55 | #define d32txregs dregs.d32_u.txregs_32 |
| 56 | #define d32rxregs dregs.d32_u.rxregs_32 |
| 57 | #define txd32 dregs.d32_u.txd_32 |
| 58 | #define rxd32 dregs.d32_u.rxd_32 |
| 59 | |
| 60 | #define d64txregs dregs.d64_u.txregs_64 |
| 61 | #define d64rxregs dregs.d64_u.rxregs_64 |
| 62 | #define txd64 dregs.d64_u.txd_64 |
| 63 | #define rxd64 dregs.d64_u.rxd_64 |
| 64 | |
| 65 | /* default dma message level (if input msg_level pointer is null in dma_attach()) */ |
Jason Cooper | 7e85c72 | 2010-09-14 09:45:38 -0400 | [diff] [blame] | 66 | static uint dma_msg_level; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 67 | |
| 68 | #define MAXNAMEL 8 /* 8 char names */ |
| 69 | |
| 70 | #define DI_INFO(dmah) ((dma_info_t *)dmah) |
| 71 | |
| 72 | /* dma engine software state */ |
| 73 | typedef struct dma_info { |
| 74 | struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t, |
| 75 | * which could be const |
| 76 | */ |
| 77 | uint *msg_level; /* message level pointer */ |
| 78 | char name[MAXNAMEL]; /* callers name for diag msgs */ |
| 79 | |
| 80 | void *osh; /* os handle */ |
| 81 | si_t *sih; /* sb handle */ |
| 82 | |
| 83 | bool dma64; /* this dma engine is operating in 64-bit mode */ |
| 84 | bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ |
| 85 | |
| 86 | union { |
| 87 | struct { |
| 88 | dma32regs_t *txregs_32; /* 32-bit dma tx engine registers */ |
| 89 | dma32regs_t *rxregs_32; /* 32-bit dma rx engine registers */ |
| 90 | dma32dd_t *txd_32; /* pointer to dma32 tx descriptor ring */ |
| 91 | dma32dd_t *rxd_32; /* pointer to dma32 rx descriptor ring */ |
| 92 | } d32_u; |
| 93 | struct { |
| 94 | dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */ |
| 95 | dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */ |
| 96 | dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */ |
| 97 | dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */ |
| 98 | } d64_u; |
| 99 | } dregs; |
| 100 | |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 101 | u16 dmadesc_align; /* alignment requirement for dma descriptors */ |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 102 | |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 103 | u16 ntxd; /* # tx descriptors tunable */ |
| 104 | u16 txin; /* index of next descriptor to reclaim */ |
| 105 | u16 txout; /* index of next descriptor to post */ |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 106 | void **txp; /* pointer to parallel array of pointers to packets */ |
| 107 | osldma_t *tx_dmah; /* DMA TX descriptor ring handle */ |
| 108 | hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */ |
| 109 | dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */ |
| 110 | dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */ |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 111 | u16 txdalign; /* #bytes added to alloc'd mem to align txd */ |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 112 | u32 txdalloc; /* #bytes allocated for the ring */ |
| 113 | u32 xmtptrbase; /* When using unaligned descriptors, the ptr register |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 114 | * is not just an index, it needs all 13 bits to be |
| 115 | * an offset from the addr register. |
| 116 | */ |
| 117 | |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 118 | u16 nrxd; /* # rx descriptors tunable */ |
| 119 | u16 rxin; /* index of next descriptor to reclaim */ |
| 120 | u16 rxout; /* index of next descriptor to post */ |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 121 | void **rxp; /* pointer to parallel array of pointers to packets */ |
| 122 | osldma_t *rx_dmah; /* DMA RX descriptor ring handle */ |
| 123 | hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */ |
| 124 | dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */ |
| 125 | dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */ |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 126 | u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */ |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 127 | u32 rxdalloc; /* #bytes allocated for the ring */ |
| 128 | u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */ |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 129 | |
| 130 | /* tunables */ |
Greg Kroah-Hartman | c09cc58 | 2010-10-08 12:07:33 -0700 | [diff] [blame] | 131 | unsigned int rxbufsize; /* rx buffer size in bytes, |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 132 | * not including the extra headroom |
| 133 | */ |
| 134 | uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack |
| 135 | * e.g. some rx pkt buffers will be bridged to tx side |
| 136 | * without byte copying. The extra headroom needs to be |
| 137 | * large enough to fit txheader needs. |
| 138 | * Some dongle driver may not need it. |
| 139 | */ |
| 140 | uint nrxpost; /* # rx buffers to keep posted */ |
Greg Kroah-Hartman | c09cc58 | 2010-10-08 12:07:33 -0700 | [diff] [blame] | 141 | unsigned int rxoffset; /* rxcontrol offset */ |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 142 | uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */ |
| 143 | uint ddoffsethigh; /* high 32 bits */ |
| 144 | uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */ |
| 145 | uint dataoffsethigh; /* high 32 bits */ |
| 146 | bool aligndesc_4k; /* descriptor base need to be aligned or not */ |
| 147 | } dma_info_t; |
| 148 | |
| 149 | /* |
| 150 | * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines. |
| 151 | * Otherwise it will support only 64-bit. |
| 152 | * |
| 153 | * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines. |
| 154 | * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines. |
| 155 | * |
| 156 | * DMA64_MODE indicates whether the current DMA engine is running as 64-bit. |
| 157 | */ |
| 158 | #ifdef BCMDMA32 |
| 159 | #define DMA32_ENAB(di) 1 |
| 160 | #define DMA64_ENAB(di) 1 |
| 161 | #define DMA64_MODE(di) ((di)->dma64) |
| 162 | #else /* !BCMDMA32 */ |
| 163 | #define DMA32_ENAB(di) 0 |
| 164 | #define DMA64_ENAB(di) 1 |
| 165 | #define DMA64_MODE(di) 1 |
| 166 | #endif /* !BCMDMA32 */ |
| 167 | |
| 168 | /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */ |
| 169 | #ifdef BCMDMASGLISTOSL |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 170 | #define DMASGLIST_ENAB true |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 171 | #else |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 172 | #define DMASGLIST_ENAB false |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 173 | #endif /* BCMDMASGLISTOSL */ |
| 174 | |
| 175 | /* descriptor bumping macros */ |
| 176 | #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */ |
| 177 | #define TXD(x) XXD((x), di->ntxd) |
| 178 | #define RXD(x) XXD((x), di->nrxd) |
| 179 | #define NEXTTXD(i) TXD((i) + 1) |
| 180 | #define PREVTXD(i) TXD((i) - 1) |
| 181 | #define NEXTRXD(i) RXD((i) + 1) |
| 182 | #define PREVRXD(i) RXD((i) - 1) |
| 183 | |
| 184 | #define NTXDACTIVE(h, t) TXD((t) - (h)) |
| 185 | #define NRXDACTIVE(h, t) RXD((t) - (h)) |
| 186 | |
| 187 | /* macros to convert between byte offsets and indexes */ |
| 188 | #define B2I(bytes, type) ((bytes) / sizeof(type)) |
| 189 | #define I2B(index, type) ((index) * sizeof(type)) |
| 190 | |
| 191 | #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */ |
| 192 | #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */ |
| 193 | |
| 194 | #define PCI64ADDR_HIGH 0x80000000 /* address[63] */ |
| 195 | #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */ |
| 196 | |
| 197 | /* Common prototypes */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 198 | static bool _dma_isaddrext(dma_info_t *di); |
| 199 | static bool _dma_descriptor_align(dma_info_t *di); |
| 200 | static bool _dma_alloc(dma_info_t *di, uint direction); |
| 201 | static void _dma_detach(dma_info_t *di); |
| 202 | static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa); |
| 203 | static void _dma_rxinit(dma_info_t *di); |
| 204 | static void *_dma_rx(dma_info_t *di); |
| 205 | static bool _dma_rxfill(dma_info_t *di); |
| 206 | static void _dma_rxreclaim(dma_info_t *di); |
| 207 | static void _dma_rxenable(dma_info_t *di); |
| 208 | static void *_dma_getnextrxp(dma_info_t *di, bool forceall); |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 209 | static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, |
| 210 | u16 *rxbufsize); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 211 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 212 | static void _dma_txblock(dma_info_t *di); |
| 213 | static void _dma_txunblock(dma_info_t *di); |
| 214 | static uint _dma_txactive(dma_info_t *di); |
| 215 | static uint _dma_rxactive(dma_info_t *di); |
| 216 | static uint _dma_txpending(dma_info_t *di); |
| 217 | static uint _dma_txcommitted(dma_info_t *di); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 218 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 219 | static void *_dma_peeknexttxp(dma_info_t *di); |
| 220 | static void *_dma_peeknextrxp(dma_info_t *di); |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 221 | static unsigned long _dma_getvar(dma_info_t *di, const char *name); |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 222 | static void _dma_counterreset(dma_info_t *di); |
| 223 | static void _dma_fifoloopbackenable(dma_info_t *di); |
| 224 | static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags); |
Greg Kroah-Hartman | 36ef9a1 | 2010-10-05 10:02:49 -0700 | [diff] [blame] | 225 | static u8 dma_align_sizetobits(uint size); |
Brett Rudley | e69284f | 2010-11-16 15:45:48 -0800 | [diff] [blame] | 226 | static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size, |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 227 | u16 *alignbits, uint *alloced, |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 228 | dmaaddr_t *descpa, osldma_t **dmah); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 229 | |
| 230 | /* Prototypes for 32-bit routines */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 231 | static bool dma32_alloc(dma_info_t *di, uint direction); |
| 232 | static bool dma32_txreset(dma_info_t *di); |
| 233 | static bool dma32_rxreset(dma_info_t *di); |
| 234 | static bool dma32_txsuspendedidle(dma_info_t *di); |
Arend van Spriel | c26b137 | 2010-11-23 14:06:23 +0100 | [diff] [blame] | 235 | static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit); |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 236 | static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range); |
| 237 | static void *dma32_getnextrxp(dma_info_t *di, bool forceall); |
| 238 | static void dma32_txrotate(dma_info_t *di); |
| 239 | static bool dma32_rxidle(dma_info_t *di); |
| 240 | static void dma32_txinit(dma_info_t *di); |
| 241 | static bool dma32_txenabled(dma_info_t *di); |
| 242 | static void dma32_txsuspend(dma_info_t *di); |
| 243 | static void dma32_txresume(dma_info_t *di); |
| 244 | static bool dma32_txsuspended(dma_info_t *di); |
| 245 | static void dma32_txreclaim(dma_info_t *di, txd_range_t range); |
| 246 | static bool dma32_txstopped(dma_info_t *di); |
| 247 | static bool dma32_rxstopped(dma_info_t *di); |
| 248 | static bool dma32_rxenabled(dma_info_t *di); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 249 | |
Brett Rudley | e69284f | 2010-11-16 15:45:48 -0800 | [diff] [blame] | 250 | static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 251 | |
| 252 | /* Prototypes for 64-bit routines */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 253 | static bool dma64_alloc(dma_info_t *di, uint direction); |
| 254 | static bool dma64_txreset(dma_info_t *di); |
| 255 | static bool dma64_rxreset(dma_info_t *di); |
| 256 | static bool dma64_txsuspendedidle(dma_info_t *di); |
Arend van Spriel | c26b137 | 2010-11-23 14:06:23 +0100 | [diff] [blame] | 257 | static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit); |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 258 | static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit); |
| 259 | static void *dma64_getpos(dma_info_t *di, bool direction); |
| 260 | static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range); |
| 261 | static void *dma64_getnextrxp(dma_info_t *di, bool forceall); |
| 262 | static void dma64_txrotate(dma_info_t *di); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 263 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 264 | static bool dma64_rxidle(dma_info_t *di); |
| 265 | static void dma64_txinit(dma_info_t *di); |
| 266 | static bool dma64_txenabled(dma_info_t *di); |
| 267 | static void dma64_txsuspend(dma_info_t *di); |
| 268 | static void dma64_txresume(dma_info_t *di); |
| 269 | static bool dma64_txsuspended(dma_info_t *di); |
| 270 | static void dma64_txreclaim(dma_info_t *di, txd_range_t range); |
| 271 | static bool dma64_txstopped(dma_info_t *di); |
| 272 | static bool dma64_rxstopped(dma_info_t *di); |
| 273 | static bool dma64_rxenabled(dma_info_t *di); |
Brett Rudley | e69284f | 2010-11-16 15:45:48 -0800 | [diff] [blame] | 274 | static bool _dma64_addrext(struct osl_info *osh, dma64regs_t *dma64regs); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 275 | |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 276 | static inline u32 parity32(u32 data); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 277 | |
| 278 | const di_fcn_t dma64proc = { |
| 279 | (di_detach_t) _dma_detach, |
| 280 | (di_txinit_t) dma64_txinit, |
| 281 | (di_txreset_t) dma64_txreset, |
| 282 | (di_txenabled_t) dma64_txenabled, |
| 283 | (di_txsuspend_t) dma64_txsuspend, |
| 284 | (di_txresume_t) dma64_txresume, |
| 285 | (di_txsuspended_t) dma64_txsuspended, |
| 286 | (di_txsuspendedidle_t) dma64_txsuspendedidle, |
| 287 | (di_txfast_t) dma64_txfast, |
| 288 | (di_txunframed_t) dma64_txunframed, |
| 289 | (di_getpos_t) dma64_getpos, |
| 290 | (di_txstopped_t) dma64_txstopped, |
| 291 | (di_txreclaim_t) dma64_txreclaim, |
| 292 | (di_getnexttxp_t) dma64_getnexttxp, |
| 293 | (di_peeknexttxp_t) _dma_peeknexttxp, |
| 294 | (di_txblock_t) _dma_txblock, |
| 295 | (di_txunblock_t) _dma_txunblock, |
| 296 | (di_txactive_t) _dma_txactive, |
| 297 | (di_txrotate_t) dma64_txrotate, |
| 298 | |
| 299 | (di_rxinit_t) _dma_rxinit, |
| 300 | (di_rxreset_t) dma64_rxreset, |
| 301 | (di_rxidle_t) dma64_rxidle, |
| 302 | (di_rxstopped_t) dma64_rxstopped, |
| 303 | (di_rxenable_t) _dma_rxenable, |
| 304 | (di_rxenabled_t) dma64_rxenabled, |
| 305 | (di_rx_t) _dma_rx, |
| 306 | (di_rxfill_t) _dma_rxfill, |
| 307 | (di_rxreclaim_t) _dma_rxreclaim, |
| 308 | (di_getnextrxp_t) _dma_getnextrxp, |
| 309 | (di_peeknextrxp_t) _dma_peeknextrxp, |
| 310 | (di_rxparam_get_t) _dma_rx_param_get, |
| 311 | |
| 312 | (di_fifoloopbackenable_t) _dma_fifoloopbackenable, |
| 313 | (di_getvar_t) _dma_getvar, |
| 314 | (di_counterreset_t) _dma_counterreset, |
| 315 | (di_ctrlflags_t) _dma_ctrlflags, |
| 316 | NULL, |
| 317 | NULL, |
| 318 | NULL, |
| 319 | (di_rxactive_t) _dma_rxactive, |
| 320 | (di_txpending_t) _dma_txpending, |
| 321 | (di_txcommitted_t) _dma_txcommitted, |
| 322 | 39 |
| 323 | }; |
| 324 | |
| 325 | static const di_fcn_t dma32proc = { |
| 326 | (di_detach_t) _dma_detach, |
| 327 | (di_txinit_t) dma32_txinit, |
| 328 | (di_txreset_t) dma32_txreset, |
| 329 | (di_txenabled_t) dma32_txenabled, |
| 330 | (di_txsuspend_t) dma32_txsuspend, |
| 331 | (di_txresume_t) dma32_txresume, |
| 332 | (di_txsuspended_t) dma32_txsuspended, |
| 333 | (di_txsuspendedidle_t) dma32_txsuspendedidle, |
| 334 | (di_txfast_t) dma32_txfast, |
| 335 | NULL, |
| 336 | NULL, |
| 337 | (di_txstopped_t) dma32_txstopped, |
| 338 | (di_txreclaim_t) dma32_txreclaim, |
| 339 | (di_getnexttxp_t) dma32_getnexttxp, |
| 340 | (di_peeknexttxp_t) _dma_peeknexttxp, |
| 341 | (di_txblock_t) _dma_txblock, |
| 342 | (di_txunblock_t) _dma_txunblock, |
| 343 | (di_txactive_t) _dma_txactive, |
| 344 | (di_txrotate_t) dma32_txrotate, |
| 345 | |
| 346 | (di_rxinit_t) _dma_rxinit, |
| 347 | (di_rxreset_t) dma32_rxreset, |
| 348 | (di_rxidle_t) dma32_rxidle, |
| 349 | (di_rxstopped_t) dma32_rxstopped, |
| 350 | (di_rxenable_t) _dma_rxenable, |
| 351 | (di_rxenabled_t) dma32_rxenabled, |
| 352 | (di_rx_t) _dma_rx, |
| 353 | (di_rxfill_t) _dma_rxfill, |
| 354 | (di_rxreclaim_t) _dma_rxreclaim, |
| 355 | (di_getnextrxp_t) _dma_getnextrxp, |
| 356 | (di_peeknextrxp_t) _dma_peeknextrxp, |
| 357 | (di_rxparam_get_t) _dma_rx_param_get, |
| 358 | |
| 359 | (di_fifoloopbackenable_t) _dma_fifoloopbackenable, |
| 360 | (di_getvar_t) _dma_getvar, |
| 361 | (di_counterreset_t) _dma_counterreset, |
| 362 | (di_ctrlflags_t) _dma_ctrlflags, |
| 363 | NULL, |
| 364 | NULL, |
| 365 | NULL, |
| 366 | (di_rxactive_t) _dma_rxactive, |
| 367 | (di_txpending_t) _dma_txpending, |
| 368 | (di_txcommitted_t) _dma_txcommitted, |
| 369 | 39 |
| 370 | }; |
| 371 | |
Brett Rudley | e69284f | 2010-11-16 15:45:48 -0800 | [diff] [blame] | 372 | hnddma_t *dma_attach(struct osl_info *osh, char *name, si_t *sih, |
| 373 | void *dmaregstx, void *dmaregsrx, uint ntxd, |
| 374 | uint nrxd, uint rxbufsize, int rxextheadroom, |
| 375 | uint nrxpost, uint rxoffset, uint *msg_level) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 376 | { |
| 377 | dma_info_t *di; |
| 378 | uint size; |
| 379 | |
| 380 | /* allocate private info structure */ |
mike.rapoport@gmail.com | 5fcc1fc | 2010-10-13 00:09:10 +0200 | [diff] [blame] | 381 | di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC); |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 382 | if (di == NULL) { |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 383 | #ifdef BCMDBG |
mike.rapoport@gmail.com | 97e17d0 | 2010-10-13 00:09:09 +0200 | [diff] [blame] | 384 | printf("dma_attach: out of memory\n"); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 385 | #endif |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 386 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 387 | } |
| 388 | |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 389 | di->msg_level = msg_level ? msg_level : &dma_msg_level; |
| 390 | |
| 391 | /* old chips w/o sb is no longer supported */ |
| 392 | ASSERT(sih != NULL); |
| 393 | |
| 394 | if (DMA64_ENAB(di)) |
| 395 | di->dma64 = |
| 396 | ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); |
| 397 | else |
| 398 | di->dma64 = 0; |
| 399 | |
| 400 | /* check arguments */ |
| 401 | ASSERT(ISPOWEROF2(ntxd)); |
| 402 | ASSERT(ISPOWEROF2(nrxd)); |
| 403 | |
| 404 | if (nrxd == 0) |
| 405 | ASSERT(dmaregsrx == NULL); |
| 406 | if (ntxd == 0) |
| 407 | ASSERT(dmaregstx == NULL); |
| 408 | |
| 409 | /* init dma reg pointer */ |
| 410 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 411 | ASSERT(ntxd <= D64MAXDD); |
| 412 | ASSERT(nrxd <= D64MAXDD); |
| 413 | di->d64txregs = (dma64regs_t *) dmaregstx; |
| 414 | di->d64rxregs = (dma64regs_t *) dmaregsrx; |
| 415 | di->hnddma.di_fn = (const di_fcn_t *)&dma64proc; |
| 416 | } else if (DMA32_ENAB(di)) { |
| 417 | ASSERT(ntxd <= D32MAXDD); |
| 418 | ASSERT(nrxd <= D32MAXDD); |
| 419 | di->d32txregs = (dma32regs_t *) dmaregstx; |
| 420 | di->d32rxregs = (dma32regs_t *) dmaregsrx; |
| 421 | di->hnddma.di_fn = (const di_fcn_t *)&dma32proc; |
| 422 | } else { |
| 423 | DMA_ERROR(("dma_attach: driver doesn't support 32-bit DMA\n")); |
| 424 | ASSERT(0); |
| 425 | goto fail; |
| 426 | } |
| 427 | |
| 428 | /* Default flags (which can be changed by the driver calling dma_ctrlflags |
| 429 | * before enable): For backwards compatibility both Rx Overflow Continue |
| 430 | * and Parity are DISABLED. |
| 431 | * supports it. |
| 432 | */ |
| 433 | di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN, |
| 434 | 0); |
| 435 | |
| 436 | DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (DMA64_MODE(di) ? "DMA64" : "DMA32"), osh, di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx)); |
| 437 | |
| 438 | /* make a private copy of our callers name */ |
| 439 | strncpy(di->name, name, MAXNAMEL); |
| 440 | di->name[MAXNAMEL - 1] = '\0'; |
| 441 | |
| 442 | di->osh = osh; |
| 443 | di->sih = sih; |
| 444 | |
| 445 | /* save tunables */ |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 446 | di->ntxd = (u16) ntxd; |
| 447 | di->nrxd = (u16) nrxd; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 448 | |
| 449 | /* the actual dma size doesn't include the extra headroom */ |
| 450 | di->rxextrahdrroom = |
| 451 | (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; |
| 452 | if (rxbufsize > BCMEXTRAHDROOM) |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 453 | di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 454 | else |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 455 | di->rxbufsize = (u16) rxbufsize; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 456 | |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 457 | di->nrxpost = (u16) nrxpost; |
Greg Kroah-Hartman | 36ef9a1 | 2010-10-05 10:02:49 -0700 | [diff] [blame] | 458 | di->rxoffset = (u8) rxoffset; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 459 | |
| 460 | /* |
| 461 | * figure out the DMA physical address offset for dd and data |
| 462 | * PCI/PCIE: they map silicon backplace address to zero based memory, need offset |
| 463 | * Other bus: use zero |
| 464 | * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor |
| 465 | */ |
| 466 | di->ddoffsetlow = 0; |
| 467 | di->dataoffsetlow = 0; |
| 468 | /* for pci bus, add offset */ |
| 469 | if (sih->bustype == PCI_BUS) { |
| 470 | if ((sih->buscoretype == PCIE_CORE_ID) && DMA64_MODE(di)) { |
| 471 | /* pcie with DMA64 */ |
| 472 | di->ddoffsetlow = 0; |
| 473 | di->ddoffsethigh = SI_PCIE_DMA_H32; |
| 474 | } else { |
| 475 | /* pci(DMA32/DMA64) or pcie with DMA32 */ |
| 476 | di->ddoffsetlow = SI_PCI_DMA; |
| 477 | di->ddoffsethigh = 0; |
| 478 | } |
| 479 | di->dataoffsetlow = di->ddoffsetlow; |
| 480 | di->dataoffsethigh = di->ddoffsethigh; |
| 481 | } |
| 482 | #if defined(__mips__) && defined(IL_BIGENDIAN) |
| 483 | di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED; |
| 484 | #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */ |
| 485 | /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ |
| 486 | if ((si_coreid(sih) == SDIOD_CORE_ID) |
| 487 | && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2))) |
| 488 | di->addrext = 0; |
| 489 | else if ((si_coreid(sih) == I2S_CORE_ID) && |
| 490 | ((si_corerev(sih) == 0) || (si_corerev(sih) == 1))) |
| 491 | di->addrext = 0; |
| 492 | else |
| 493 | di->addrext = _dma_isaddrext(di); |
| 494 | |
| 495 | /* does the descriptors need to be aligned and if yes, on 4K/8K or not */ |
| 496 | di->aligndesc_4k = _dma_descriptor_align(di); |
| 497 | if (di->aligndesc_4k) { |
| 498 | if (DMA64_MODE(di)) { |
| 499 | di->dmadesc_align = D64RINGALIGN_BITS; |
| 500 | if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) { |
| 501 | /* for smaller dd table, HW relax the alignment requirement */ |
| 502 | di->dmadesc_align = D64RINGALIGN_BITS - 1; |
| 503 | } |
| 504 | } else |
| 505 | di->dmadesc_align = D32RINGALIGN_BITS; |
| 506 | } else |
| 507 | di->dmadesc_align = 4; /* 16 byte alignment */ |
| 508 | |
| 509 | DMA_NONE(("DMA descriptor align_needed %d, align %d\n", |
| 510 | di->aligndesc_4k, di->dmadesc_align)); |
| 511 | |
| 512 | /* allocate tx packet pointer vector */ |
| 513 | if (ntxd) { |
| 514 | size = ntxd * sizeof(void *); |
mike.rapoport@gmail.com | 5fcc1fc | 2010-10-13 00:09:10 +0200 | [diff] [blame] | 515 | di->txp = kzalloc(size, GFP_ATOMIC); |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 516 | if (di->txp == NULL) { |
mike.rapoport@gmail.com | 97e17d0 | 2010-10-13 00:09:09 +0200 | [diff] [blame] | 517 | DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 518 | goto fail; |
| 519 | } |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | /* allocate rx packet pointer vector */ |
| 523 | if (nrxd) { |
| 524 | size = nrxd * sizeof(void *); |
mike.rapoport@gmail.com | 5fcc1fc | 2010-10-13 00:09:10 +0200 | [diff] [blame] | 525 | di->rxp = kzalloc(size, GFP_ATOMIC); |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 526 | if (di->rxp == NULL) { |
mike.rapoport@gmail.com | 97e17d0 | 2010-10-13 00:09:09 +0200 | [diff] [blame] | 527 | DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 528 | goto fail; |
| 529 | } |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 530 | } |
| 531 | |
| 532 | /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */ |
| 533 | if (ntxd) { |
| 534 | if (!_dma_alloc(di, DMA_TX)) |
| 535 | goto fail; |
| 536 | } |
| 537 | |
| 538 | /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */ |
| 539 | if (nrxd) { |
| 540 | if (!_dma_alloc(di, DMA_RX)) |
| 541 | goto fail; |
| 542 | } |
| 543 | |
| 544 | if ((di->ddoffsetlow != 0) && !di->addrext) { |
| 545 | if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 546 | DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa))); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 547 | goto fail; |
| 548 | } |
| 549 | if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 550 | DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa))); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 551 | goto fail; |
| 552 | } |
| 553 | } |
| 554 | |
| 555 | DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext)); |
| 556 | |
| 557 | /* allocate DMA mapping vectors */ |
| 558 | if (DMASGLIST_ENAB) { |
| 559 | if (ntxd) { |
| 560 | size = ntxd * sizeof(hnddma_seg_map_t); |
mike.rapoport@gmail.com | 5fcc1fc | 2010-10-13 00:09:10 +0200 | [diff] [blame] | 561 | di->txp_dmah = kzalloc(size, GFP_ATOMIC); |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 562 | if (di->txp_dmah == NULL) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 563 | goto fail; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 564 | } |
| 565 | |
| 566 | if (nrxd) { |
| 567 | size = nrxd * sizeof(hnddma_seg_map_t); |
mike.rapoport@gmail.com | 5fcc1fc | 2010-10-13 00:09:10 +0200 | [diff] [blame] | 568 | di->rxp_dmah = kzalloc(size, GFP_ATOMIC); |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 569 | if (di->rxp_dmah == NULL) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 570 | goto fail; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 571 | } |
| 572 | } |
| 573 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 574 | return (hnddma_t *) di; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 575 | |
| 576 | fail: |
| 577 | _dma_detach(di); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 578 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 579 | } |
| 580 | |
| 581 | /* init the tx or rx descriptor */ |
Greg Kroah-Hartman | 2d956e2 | 2010-10-05 09:40:02 -0700 | [diff] [blame] | 582 | static inline void |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 583 | dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, dmaaddr_t pa, uint outidx, |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 584 | u32 *flags, u32 bufcount) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 585 | { |
| 586 | /* dma32 uses 32-bit control to fit both flags and bufcounter */ |
| 587 | *flags = *flags | (bufcount & CTRL_BC_MASK); |
| 588 | |
| 589 | if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { |
| 590 | W_SM(&ddring[outidx].addr, |
| 591 | BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow)); |
| 592 | W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags)); |
| 593 | } else { |
| 594 | /* address extension */ |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 595 | u32 ae; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 596 | ASSERT(di->addrext); |
| 597 | ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; |
| 598 | PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH; |
| 599 | |
| 600 | *flags |= (ae << CTRL_AE_SHIFT); |
| 601 | W_SM(&ddring[outidx].addr, |
| 602 | BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow)); |
| 603 | W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags)); |
| 604 | } |
| 605 | } |
| 606 | |
| 607 | /* Check for odd number of 1's */ |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 608 | static inline u32 parity32(u32 data) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 609 | { |
| 610 | data ^= data >> 16; |
| 611 | data ^= data >> 8; |
| 612 | data ^= data >> 4; |
| 613 | data ^= data >> 2; |
| 614 | data ^= data >> 1; |
| 615 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 616 | return data & 1; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 617 | } |
| 618 | |
| 619 | #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2) |
| 620 | |
Greg Kroah-Hartman | 2d956e2 | 2010-10-05 09:40:02 -0700 | [diff] [blame] | 621 | static inline void |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 622 | dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx, |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 623 | u32 *flags, u32 bufcount) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 624 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 625 | u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 626 | |
| 627 | /* PCI bus with big(>1G) physical address, use address extension */ |
| 628 | #if defined(__mips__) && defined(IL_BIGENDIAN) |
| 629 | if ((di->dataoffsetlow == SI_SDRAM_SWAPPED) |
| 630 | || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { |
| 631 | #else |
| 632 | if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { |
| 633 | #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */ |
| 634 | ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0); |
| 635 | |
| 636 | W_SM(&ddring[outidx].addrlow, |
| 637 | BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow)); |
| 638 | W_SM(&ddring[outidx].addrhigh, |
| 639 | BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh)); |
| 640 | W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags)); |
| 641 | W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2)); |
| 642 | } else { |
| 643 | /* address extension for 32-bit PCI */ |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 644 | u32 ae; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 645 | ASSERT(di->addrext); |
| 646 | |
| 647 | ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; |
| 648 | PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH; |
| 649 | ASSERT(PHYSADDRHI(pa) == 0); |
| 650 | |
| 651 | ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE; |
| 652 | W_SM(&ddring[outidx].addrlow, |
| 653 | BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow)); |
| 654 | W_SM(&ddring[outidx].addrhigh, |
| 655 | BUS_SWAP32(0 + di->dataoffsethigh)); |
| 656 | W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags)); |
| 657 | W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2)); |
| 658 | } |
| 659 | if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) { |
| 660 | if (DMA64_DD_PARITY(&ddring[outidx])) { |
| 661 | W_SM(&ddring[outidx].ctrl2, |
| 662 | BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY)); |
| 663 | } |
| 664 | } |
| 665 | } |
| 666 | |
Brett Rudley | e69284f | 2010-11-16 15:45:48 -0800 | [diff] [blame] | 667 | static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 668 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 669 | u32 w; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 670 | |
| 671 | OR_REG(osh, &dma32regs->control, XC_AE); |
| 672 | w = R_REG(osh, &dma32regs->control); |
| 673 | AND_REG(osh, &dma32regs->control, ~XC_AE); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 674 | return (w & XC_AE) == XC_AE; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 675 | } |
| 676 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 677 | static bool _dma_alloc(dma_info_t *di, uint direction) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 678 | { |
| 679 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 680 | return dma64_alloc(di, direction); |
| 681 | } else if (DMA32_ENAB(di)) { |
| 682 | return dma32_alloc(di, direction); |
| 683 | } else |
| 684 | ASSERT(0); |
| 685 | } |
| 686 | |
| 687 | /* !! may be called with core in reset */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 688 | static void _dma_detach(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 689 | { |
| 690 | |
| 691 | DMA_TRACE(("%s: dma_detach\n", di->name)); |
| 692 | |
| 693 | /* shouldn't be here if descriptors are unreclaimed */ |
| 694 | ASSERT(di->txin == di->txout); |
| 695 | ASSERT(di->rxin == di->rxout); |
| 696 | |
| 697 | /* free dma descriptor rings */ |
| 698 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 699 | if (di->txd64) |
| 700 | DMA_FREE_CONSISTENT(di->osh, |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 701 | ((s8 *)di->txd64 - |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 702 | di->txdalign), di->txdalloc, |
| 703 | (di->txdpaorig), &di->tx_dmah); |
| 704 | if (di->rxd64) |
| 705 | DMA_FREE_CONSISTENT(di->osh, |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 706 | ((s8 *)di->rxd64 - |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 707 | di->rxdalign), di->rxdalloc, |
| 708 | (di->rxdpaorig), &di->rx_dmah); |
| 709 | } else if (DMA32_ENAB(di)) { |
| 710 | if (di->txd32) |
| 711 | DMA_FREE_CONSISTENT(di->osh, |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 712 | ((s8 *)di->txd32 - |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 713 | di->txdalign), di->txdalloc, |
| 714 | (di->txdpaorig), &di->tx_dmah); |
| 715 | if (di->rxd32) |
| 716 | DMA_FREE_CONSISTENT(di->osh, |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 717 | ((s8 *)di->rxd32 - |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 718 | di->rxdalign), di->rxdalloc, |
| 719 | (di->rxdpaorig), &di->rx_dmah); |
| 720 | } else |
| 721 | ASSERT(0); |
| 722 | |
| 723 | /* free packet pointer vectors */ |
| 724 | if (di->txp) |
mike.rapoport@gmail.com | 182acb3 | 2010-10-13 00:09:12 +0200 | [diff] [blame] | 725 | kfree((void *)di->txp); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 726 | if (di->rxp) |
mike.rapoport@gmail.com | 182acb3 | 2010-10-13 00:09:12 +0200 | [diff] [blame] | 727 | kfree((void *)di->rxp); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 728 | |
| 729 | /* free tx packet DMA handles */ |
| 730 | if (di->txp_dmah) |
mike.rapoport@gmail.com | 182acb3 | 2010-10-13 00:09:12 +0200 | [diff] [blame] | 731 | kfree(di->txp_dmah); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 732 | |
| 733 | /* free rx packet DMA handles */ |
| 734 | if (di->rxp_dmah) |
mike.rapoport@gmail.com | 182acb3 | 2010-10-13 00:09:12 +0200 | [diff] [blame] | 735 | kfree(di->rxp_dmah); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 736 | |
| 737 | /* free our private info structure */ |
mike.rapoport@gmail.com | 182acb3 | 2010-10-13 00:09:12 +0200 | [diff] [blame] | 738 | kfree((void *)di); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 739 | |
| 740 | } |
| 741 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 742 | static bool _dma_descriptor_align(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 743 | { |
| 744 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 745 | u32 addrl; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 746 | |
| 747 | /* Check to see if the descriptors need to be aligned on 4K/8K or not */ |
| 748 | if (di->d64txregs != NULL) { |
| 749 | W_REG(di->osh, &di->d64txregs->addrlow, 0xff0); |
| 750 | addrl = R_REG(di->osh, &di->d64txregs->addrlow); |
| 751 | if (addrl != 0) |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 752 | return false; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 753 | } else if (di->d64rxregs != NULL) { |
| 754 | W_REG(di->osh, &di->d64rxregs->addrlow, 0xff0); |
| 755 | addrl = R_REG(di->osh, &di->d64rxregs->addrlow); |
| 756 | if (addrl != 0) |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 757 | return false; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 758 | } |
| 759 | } |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 760 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 761 | } |
| 762 | |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 763 | /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 764 | static bool _dma_isaddrext(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 765 | { |
| 766 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 767 | /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ |
| 768 | |
| 769 | /* not all tx or rx channel are available */ |
| 770 | if (di->d64txregs != NULL) { |
| 771 | if (!_dma64_addrext(di->osh, di->d64txregs)) { |
| 772 | DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", di->name)); |
| 773 | ASSERT(0); |
| 774 | } |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 775 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 776 | } else if (di->d64rxregs != NULL) { |
| 777 | if (!_dma64_addrext(di->osh, di->d64rxregs)) { |
| 778 | DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", di->name)); |
| 779 | ASSERT(0); |
| 780 | } |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 781 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 782 | } |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 783 | return false; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 784 | } else if (DMA32_ENAB(di)) { |
| 785 | if (di->d32txregs) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 786 | return _dma32_addrext(di->osh, di->d32txregs); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 787 | else if (di->d32rxregs) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 788 | return _dma32_addrext(di->osh, di->d32rxregs); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 789 | } else |
| 790 | ASSERT(0); |
| 791 | |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 792 | return false; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 793 | } |
| 794 | |
| 795 | /* initialize descriptor table base address */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 796 | static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 797 | { |
| 798 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 799 | if (!di->aligndesc_4k) { |
| 800 | if (direction == DMA_TX) |
| 801 | di->xmtptrbase = PHYSADDRLO(pa); |
| 802 | else |
| 803 | di->rcvptrbase = PHYSADDRLO(pa); |
| 804 | } |
| 805 | |
| 806 | if ((di->ddoffsetlow == 0) |
| 807 | || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { |
| 808 | if (direction == DMA_TX) { |
| 809 | W_REG(di->osh, &di->d64txregs->addrlow, |
| 810 | (PHYSADDRLO(pa) + di->ddoffsetlow)); |
| 811 | W_REG(di->osh, &di->d64txregs->addrhigh, |
| 812 | (PHYSADDRHI(pa) + di->ddoffsethigh)); |
| 813 | } else { |
| 814 | W_REG(di->osh, &di->d64rxregs->addrlow, |
| 815 | (PHYSADDRLO(pa) + di->ddoffsetlow)); |
| 816 | W_REG(di->osh, &di->d64rxregs->addrhigh, |
| 817 | (PHYSADDRHI(pa) + di->ddoffsethigh)); |
| 818 | } |
| 819 | } else { |
| 820 | /* DMA64 32bits address extension */ |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 821 | u32 ae; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 822 | ASSERT(di->addrext); |
| 823 | ASSERT(PHYSADDRHI(pa) == 0); |
| 824 | |
| 825 | /* shift the high bit(s) from pa to ae */ |
| 826 | ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> |
| 827 | PCI32ADDR_HIGH_SHIFT; |
| 828 | PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH; |
| 829 | |
| 830 | if (direction == DMA_TX) { |
| 831 | W_REG(di->osh, &di->d64txregs->addrlow, |
| 832 | (PHYSADDRLO(pa) + di->ddoffsetlow)); |
| 833 | W_REG(di->osh, &di->d64txregs->addrhigh, |
| 834 | di->ddoffsethigh); |
| 835 | SET_REG(di->osh, &di->d64txregs->control, |
| 836 | D64_XC_AE, (ae << D64_XC_AE_SHIFT)); |
| 837 | } else { |
| 838 | W_REG(di->osh, &di->d64rxregs->addrlow, |
| 839 | (PHYSADDRLO(pa) + di->ddoffsetlow)); |
| 840 | W_REG(di->osh, &di->d64rxregs->addrhigh, |
| 841 | di->ddoffsethigh); |
| 842 | SET_REG(di->osh, &di->d64rxregs->control, |
| 843 | D64_RC_AE, (ae << D64_RC_AE_SHIFT)); |
| 844 | } |
| 845 | } |
| 846 | |
| 847 | } else if (DMA32_ENAB(di)) { |
| 848 | ASSERT(PHYSADDRHI(pa) == 0); |
| 849 | if ((di->ddoffsetlow == 0) |
| 850 | || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { |
| 851 | if (direction == DMA_TX) |
| 852 | W_REG(di->osh, &di->d32txregs->addr, |
| 853 | (PHYSADDRLO(pa) + di->ddoffsetlow)); |
| 854 | else |
| 855 | W_REG(di->osh, &di->d32rxregs->addr, |
| 856 | (PHYSADDRLO(pa) + di->ddoffsetlow)); |
| 857 | } else { |
| 858 | /* dma32 address extension */ |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 859 | u32 ae; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 860 | ASSERT(di->addrext); |
| 861 | |
| 862 | /* shift the high bit(s) from pa to ae */ |
| 863 | ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> |
| 864 | PCI32ADDR_HIGH_SHIFT; |
| 865 | PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH; |
| 866 | |
| 867 | if (direction == DMA_TX) { |
| 868 | W_REG(di->osh, &di->d32txregs->addr, |
| 869 | (PHYSADDRLO(pa) + di->ddoffsetlow)); |
| 870 | SET_REG(di->osh, &di->d32txregs->control, XC_AE, |
| 871 | ae << XC_AE_SHIFT); |
| 872 | } else { |
| 873 | W_REG(di->osh, &di->d32rxregs->addr, |
| 874 | (PHYSADDRLO(pa) + di->ddoffsetlow)); |
| 875 | SET_REG(di->osh, &di->d32rxregs->control, RC_AE, |
| 876 | ae << RC_AE_SHIFT); |
| 877 | } |
| 878 | } |
| 879 | } else |
| 880 | ASSERT(0); |
| 881 | } |
| 882 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 883 | static void _dma_fifoloopbackenable(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 884 | { |
| 885 | DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name)); |
| 886 | |
| 887 | if (DMA64_ENAB(di) && DMA64_MODE(di)) |
| 888 | OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE); |
| 889 | else if (DMA32_ENAB(di)) |
| 890 | OR_REG(di->osh, &di->d32txregs->control, XC_LE); |
| 891 | else |
| 892 | ASSERT(0); |
| 893 | } |
| 894 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 895 | static void _dma_rxinit(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 896 | { |
| 897 | DMA_TRACE(("%s: dma_rxinit\n", di->name)); |
| 898 | |
| 899 | if (di->nrxd == 0) |
| 900 | return; |
| 901 | |
| 902 | di->rxin = di->rxout = 0; |
| 903 | |
| 904 | /* clear rx descriptor ring */ |
| 905 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 906 | BZERO_SM((void *)di->rxd64, |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 907 | (di->nrxd * sizeof(dma64dd_t))); |
| 908 | |
| 909 | /* DMA engine with out alignment requirement requires table to be inited |
| 910 | * before enabling the engine |
| 911 | */ |
| 912 | if (!di->aligndesc_4k) |
| 913 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); |
| 914 | |
| 915 | _dma_rxenable(di); |
| 916 | |
| 917 | if (di->aligndesc_4k) |
| 918 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); |
| 919 | } else if (DMA32_ENAB(di)) { |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 920 | BZERO_SM((void *)di->rxd32, |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 921 | (di->nrxd * sizeof(dma32dd_t))); |
| 922 | _dma_rxenable(di); |
| 923 | _dma_ddtable_init(di, DMA_RX, di->rxdpa); |
| 924 | } else |
| 925 | ASSERT(0); |
| 926 | } |
| 927 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 928 | static void _dma_rxenable(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 929 | { |
| 930 | uint dmactrlflags = di->hnddma.dmactrlflags; |
| 931 | |
| 932 | DMA_TRACE(("%s: dma_rxenable\n", di->name)); |
| 933 | |
| 934 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 935 | u32 control = |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 936 | (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) | |
| 937 | D64_RC_RE; |
| 938 | |
| 939 | if ((dmactrlflags & DMA_CTRL_PEN) == 0) |
| 940 | control |= D64_RC_PD; |
| 941 | |
| 942 | if (dmactrlflags & DMA_CTRL_ROC) |
| 943 | control |= D64_RC_OC; |
| 944 | |
| 945 | W_REG(di->osh, &di->d64rxregs->control, |
| 946 | ((di->rxoffset << D64_RC_RO_SHIFT) | control)); |
| 947 | } else if (DMA32_ENAB(di)) { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 948 | u32 control = |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 949 | (R_REG(di->osh, &di->d32rxregs->control) & RC_AE) | RC_RE; |
| 950 | |
| 951 | if ((dmactrlflags & DMA_CTRL_PEN) == 0) |
| 952 | control |= RC_PD; |
| 953 | |
| 954 | if (dmactrlflags & DMA_CTRL_ROC) |
| 955 | control |= RC_OC; |
| 956 | |
| 957 | W_REG(di->osh, &di->d32rxregs->control, |
| 958 | ((di->rxoffset << RC_RO_SHIFT) | control)); |
| 959 | } else |
| 960 | ASSERT(0); |
| 961 | } |
| 962 | |
| 963 | static void |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 964 | _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 965 | { |
| 966 | /* the normal values fit into 16 bits */ |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 967 | *rxoffset = (u16) di->rxoffset; |
| 968 | *rxbufsize = (u16) di->rxbufsize; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 969 | } |
| 970 | |
| 971 | /* !! rx entry routine |
| 972 | * returns a pointer to the next frame received, or NULL if there are no more |
| 973 | * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported |
| 974 | * with pkts chain |
| 975 | * otherwise, it's treated as giant pkt and will be tossed. |
| 976 | * The DMA scattering starts with normal DMA header, followed by first buffer data. |
| 977 | * After it reaches the max size of buffer, the data continues in next DMA descriptor |
| 978 | * buffer WITHOUT DMA header |
| 979 | */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 980 | static void *BCMFASTPATH _dma_rx(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 981 | { |
Arend van Spriel | c26b137 | 2010-11-23 14:06:23 +0100 | [diff] [blame] | 982 | struct sk_buff *p, *head, *tail; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 983 | uint len; |
| 984 | uint pkt_len; |
| 985 | int resid = 0; |
| 986 | |
| 987 | next_frame: |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 988 | head = _dma_getnextrxp(di, false); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 989 | if (head == NULL) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 990 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 991 | |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 992 | len = ltoh16(*(u16 *) (head->data)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 993 | DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); |
| 994 | |
| 995 | #if defined(__mips__) |
| 996 | if (!len) { |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 997 | while (!(len = *(u16 *) OSL_UNCACHED(head->data))) |
mike.rapoport@gmail.com | 7383141 | 2010-10-13 00:09:07 +0200 | [diff] [blame] | 998 | udelay(1); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 999 | |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 1000 | *(u16 *) (head->data) = htol16((u16) len); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1001 | } |
| 1002 | #endif /* defined(__mips__) */ |
| 1003 | |
| 1004 | /* set actual length */ |
Greg Kroah-Hartman | 7068c2f | 2010-10-08 11:34:59 -0700 | [diff] [blame] | 1005 | pkt_len = min((di->rxoffset + len), di->rxbufsize); |
Arend van Spriel | 2cb8ada | 2010-11-18 20:46:44 +0100 | [diff] [blame] | 1006 | __skb_trim(head, pkt_len); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1007 | resid = len - (di->rxbufsize - di->rxoffset); |
| 1008 | |
| 1009 | /* check for single or multi-buffer rx */ |
| 1010 | if (resid > 0) { |
| 1011 | tail = head; |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 1012 | while ((resid > 0) && (p = _dma_getnextrxp(di, false))) { |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 1013 | tail->next = p; |
Greg Kroah-Hartman | 7068c2f | 2010-10-08 11:34:59 -0700 | [diff] [blame] | 1014 | pkt_len = min(resid, (int)di->rxbufsize); |
Arend van Spriel | 2cb8ada | 2010-11-18 20:46:44 +0100 | [diff] [blame] | 1015 | __skb_trim(p, pkt_len); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1016 | |
| 1017 | tail = p; |
| 1018 | resid -= di->rxbufsize; |
| 1019 | } |
| 1020 | |
| 1021 | #ifdef BCMDBG |
| 1022 | if (resid > 0) { |
| 1023 | uint cur; |
| 1024 | ASSERT(p == NULL); |
| 1025 | cur = (DMA64_ENAB(di) && DMA64_MODE(di)) ? |
| 1026 | B2I(((R_REG(di->osh, &di->d64rxregs->status0) & |
| 1027 | D64_RS0_CD_MASK) - |
| 1028 | di->rcvptrbase) & D64_RS0_CD_MASK, |
| 1029 | dma64dd_t) : B2I(R_REG(di->osh, |
| 1030 | &di->d32rxregs-> |
| 1031 | status) & RS_CD_MASK, |
| 1032 | dma32dd_t); |
| 1033 | DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n", |
| 1034 | di->rxin, di->rxout, cur)); |
| 1035 | } |
| 1036 | #endif /* BCMDBG */ |
| 1037 | |
| 1038 | if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { |
| 1039 | DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", |
| 1040 | di->name, len)); |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 1041 | PKTFREE(di->osh, head, false); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1042 | di->hnddma.rxgiants++; |
| 1043 | goto next_frame; |
| 1044 | } |
| 1045 | } |
| 1046 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1047 | return head; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1048 | } |
| 1049 | |
| 1050 | /* post receive buffers |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 1051 | * return false is refill failed completely and ring is empty |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1052 | * this will stall the rx dma and user might want to call rxfill again asap |
| 1053 | * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle |
| 1054 | */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1055 | static bool BCMFASTPATH _dma_rxfill(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1056 | { |
Arend van Spriel | c26b137 | 2010-11-23 14:06:23 +0100 | [diff] [blame] | 1057 | struct sk_buff *p; |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1058 | u16 rxin, rxout; |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1059 | u32 flags = 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1060 | uint n; |
| 1061 | uint i; |
| 1062 | dmaaddr_t pa; |
| 1063 | uint extra_offset = 0; |
| 1064 | bool ring_empty; |
| 1065 | |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 1066 | ring_empty = false; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1067 | |
| 1068 | /* |
| 1069 | * Determine how many receive buffers we're lacking |
| 1070 | * from the full complement, allocate, initialize, |
| 1071 | * and post them, then update the chip rx lastdscr. |
| 1072 | */ |
| 1073 | |
| 1074 | rxin = di->rxin; |
| 1075 | rxout = di->rxout; |
| 1076 | |
| 1077 | n = di->nrxpost - NRXDACTIVE(rxin, rxout); |
| 1078 | |
| 1079 | DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n)); |
| 1080 | |
| 1081 | if (di->rxbufsize > BCMEXTRAHDROOM) |
| 1082 | extra_offset = di->rxextrahdrroom; |
| 1083 | |
| 1084 | for (i = 0; i < n; i++) { |
| 1085 | /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the |
| 1086 | size to be allocated |
| 1087 | */ |
| 1088 | |
| 1089 | p = osl_pktget(di->osh, di->rxbufsize + extra_offset); |
| 1090 | |
| 1091 | if (p == NULL) { |
| 1092 | DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", |
| 1093 | di->name)); |
| 1094 | if (i == 0) { |
| 1095 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 1096 | if (dma64_rxidle(di)) { |
| 1097 | DMA_ERROR(("%s: rxfill64: ring is empty !\n", di->name)); |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1098 | ring_empty = true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1099 | } |
| 1100 | } else if (DMA32_ENAB(di)) { |
| 1101 | if (dma32_rxidle(di)) { |
| 1102 | DMA_ERROR(("%s: rxfill32: ring is empty !\n", di->name)); |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1103 | ring_empty = true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1104 | } |
| 1105 | } else |
| 1106 | ASSERT(0); |
| 1107 | } |
| 1108 | di->hnddma.rxnobuf++; |
| 1109 | break; |
| 1110 | } |
| 1111 | /* reserve an extra headroom, if applicable */ |
| 1112 | if (extra_offset) |
Arend van Spriel | c303ecb | 2010-11-18 20:46:43 +0100 | [diff] [blame] | 1113 | skb_pull(p, extra_offset); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1114 | |
| 1115 | /* Do a cached write instead of uncached write since DMA_MAP |
| 1116 | * will flush the cache. |
| 1117 | */ |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 1118 | *(u32 *) (p->data) = 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1119 | |
| 1120 | if (DMASGLIST_ENAB) |
Brett Rudley | 9249ede | 2010-11-30 20:09:49 -0800 | [diff] [blame^] | 1121 | memset(&di->rxp_dmah[rxout], 0, |
| 1122 | sizeof(hnddma_seg_map_t)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1123 | |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 1124 | pa = DMA_MAP(di->osh, p->data, |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1125 | di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]); |
| 1126 | |
Greg Kroah-Hartman | 36c63ff | 2010-10-08 11:55:40 -0700 | [diff] [blame] | 1127 | ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1128 | |
| 1129 | /* save the free packet pointer */ |
| 1130 | ASSERT(di->rxp[rxout] == NULL); |
| 1131 | di->rxp[rxout] = p; |
| 1132 | |
| 1133 | /* reset flags for each descriptor */ |
| 1134 | flags = 0; |
| 1135 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 1136 | if (rxout == (di->nrxd - 1)) |
| 1137 | flags = D64_CTRL1_EOT; |
| 1138 | |
| 1139 | dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, |
| 1140 | di->rxbufsize); |
| 1141 | } else if (DMA32_ENAB(di)) { |
| 1142 | if (rxout == (di->nrxd - 1)) |
| 1143 | flags = CTRL_EOT; |
| 1144 | |
| 1145 | ASSERT(PHYSADDRHI(pa) == 0); |
| 1146 | dma32_dd_upd(di, di->rxd32, pa, rxout, &flags, |
| 1147 | di->rxbufsize); |
| 1148 | } else |
| 1149 | ASSERT(0); |
| 1150 | rxout = NEXTRXD(rxout); |
| 1151 | } |
| 1152 | |
| 1153 | di->rxout = rxout; |
| 1154 | |
| 1155 | /* update the chip lastdscr pointer */ |
| 1156 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 1157 | W_REG(di->osh, &di->d64rxregs->ptr, |
| 1158 | di->rcvptrbase + I2B(rxout, dma64dd_t)); |
| 1159 | } else if (DMA32_ENAB(di)) { |
| 1160 | W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t)); |
| 1161 | } else |
| 1162 | ASSERT(0); |
| 1163 | |
| 1164 | return ring_empty; |
| 1165 | } |
| 1166 | |
| 1167 | /* like getnexttxp but no reclaim */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1168 | static void *_dma_peeknexttxp(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1169 | { |
| 1170 | uint end, i; |
| 1171 | |
| 1172 | if (di->ntxd == 0) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1173 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1174 | |
| 1175 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 1176 | end = |
| 1177 | B2I(((R_REG(di->osh, &di->d64txregs->status0) & |
| 1178 | D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK, |
| 1179 | dma64dd_t); |
| 1180 | } else if (DMA32_ENAB(di)) { |
| 1181 | end = |
| 1182 | B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK, |
| 1183 | dma32dd_t); |
| 1184 | } else |
| 1185 | ASSERT(0); |
| 1186 | |
| 1187 | for (i = di->txin; i != end; i = NEXTTXD(i)) |
| 1188 | if (di->txp[i]) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1189 | return di->txp[i]; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1190 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1191 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1192 | } |
| 1193 | |
| 1194 | /* like getnextrxp but not take off the ring */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1195 | static void *_dma_peeknextrxp(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1196 | { |
| 1197 | uint end, i; |
| 1198 | |
| 1199 | if (di->nrxd == 0) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1200 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1201 | |
| 1202 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 1203 | end = |
| 1204 | B2I(((R_REG(di->osh, &di->d64rxregs->status0) & |
| 1205 | D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK, |
| 1206 | dma64dd_t); |
| 1207 | } else if (DMA32_ENAB(di)) { |
| 1208 | end = |
| 1209 | B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, |
| 1210 | dma32dd_t); |
| 1211 | } else |
| 1212 | ASSERT(0); |
| 1213 | |
| 1214 | for (i = di->rxin; i != end; i = NEXTRXD(i)) |
| 1215 | if (di->rxp[i]) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1216 | return di->rxp[i]; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1217 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1218 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1219 | } |
| 1220 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1221 | static void _dma_rxreclaim(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1222 | { |
| 1223 | void *p; |
| 1224 | |
| 1225 | /* "unused local" warning suppression for OSLs that |
| 1226 | * define PKTFREE() without using the di->osh arg |
| 1227 | */ |
| 1228 | di = di; |
| 1229 | |
| 1230 | DMA_TRACE(("%s: dma_rxreclaim\n", di->name)); |
| 1231 | |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1232 | while ((p = _dma_getnextrxp(di, true))) |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 1233 | PKTFREE(di->osh, p, false); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1234 | } |
| 1235 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1236 | static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1237 | { |
| 1238 | if (di->nrxd == 0) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1239 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1240 | |
| 1241 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 1242 | return dma64_getnextrxp(di, forceall); |
| 1243 | } else if (DMA32_ENAB(di)) { |
| 1244 | return dma32_getnextrxp(di, forceall); |
| 1245 | } else |
| 1246 | ASSERT(0); |
| 1247 | } |
| 1248 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1249 | static void _dma_txblock(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1250 | { |
| 1251 | di->hnddma.txavail = 0; |
| 1252 | } |
| 1253 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1254 | static void _dma_txunblock(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1255 | { |
| 1256 | di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; |
| 1257 | } |
| 1258 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1259 | static uint _dma_txactive(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1260 | { |
| 1261 | return NTXDACTIVE(di->txin, di->txout); |
| 1262 | } |
| 1263 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1264 | static uint _dma_txpending(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1265 | { |
| 1266 | uint curr; |
| 1267 | |
| 1268 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 1269 | curr = |
| 1270 | B2I(((R_REG(di->osh, &di->d64txregs->status0) & |
| 1271 | D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK, |
| 1272 | dma64dd_t); |
| 1273 | } else if (DMA32_ENAB(di)) { |
| 1274 | curr = |
| 1275 | B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK, |
| 1276 | dma32dd_t); |
| 1277 | } else |
| 1278 | ASSERT(0); |
| 1279 | |
| 1280 | return NTXDACTIVE(curr, di->txout); |
| 1281 | } |
| 1282 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1283 | static uint _dma_txcommitted(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1284 | { |
| 1285 | uint ptr; |
| 1286 | uint txin = di->txin; |
| 1287 | |
| 1288 | if (txin == di->txout) |
| 1289 | return 0; |
| 1290 | |
| 1291 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 1292 | ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t); |
| 1293 | } else if (DMA32_ENAB(di)) { |
| 1294 | ptr = B2I(R_REG(di->osh, &di->d32txregs->ptr), dma32dd_t); |
| 1295 | } else |
| 1296 | ASSERT(0); |
| 1297 | |
| 1298 | return NTXDACTIVE(di->txin, ptr); |
| 1299 | } |
| 1300 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1301 | static uint _dma_rxactive(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1302 | { |
| 1303 | return NRXDACTIVE(di->rxin, di->rxout); |
| 1304 | } |
| 1305 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1306 | static void _dma_counterreset(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1307 | { |
| 1308 | /* reset all software counter */ |
| 1309 | di->hnddma.rxgiants = 0; |
| 1310 | di->hnddma.rxnobuf = 0; |
| 1311 | di->hnddma.txnobuf = 0; |
| 1312 | } |
| 1313 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1314 | static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1315 | { |
| 1316 | uint dmactrlflags = di->hnddma.dmactrlflags; |
| 1317 | |
| 1318 | if (di == NULL) { |
| 1319 | DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name)); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1320 | return 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1321 | } |
| 1322 | |
| 1323 | ASSERT((flags & ~mask) == 0); |
| 1324 | |
| 1325 | dmactrlflags &= ~mask; |
| 1326 | dmactrlflags |= flags; |
| 1327 | |
| 1328 | /* If trying to enable parity, check if parity is actually supported */ |
| 1329 | if (dmactrlflags & DMA_CTRL_PEN) { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1330 | u32 control; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1331 | |
| 1332 | if (DMA64_ENAB(di) && DMA64_MODE(di)) { |
| 1333 | control = R_REG(di->osh, &di->d64txregs->control); |
| 1334 | W_REG(di->osh, &di->d64txregs->control, |
| 1335 | control | D64_XC_PD); |
| 1336 | if (R_REG(di->osh, &di->d64txregs->control) & D64_XC_PD) { |
| 1337 | /* We *can* disable it so it is supported, |
| 1338 | * restore control register |
| 1339 | */ |
| 1340 | W_REG(di->osh, &di->d64txregs->control, |
| 1341 | control); |
| 1342 | } else { |
| 1343 | /* Not supported, don't allow it to be enabled */ |
| 1344 | dmactrlflags &= ~DMA_CTRL_PEN; |
| 1345 | } |
| 1346 | } else if (DMA32_ENAB(di)) { |
| 1347 | control = R_REG(di->osh, &di->d32txregs->control); |
| 1348 | W_REG(di->osh, &di->d32txregs->control, |
| 1349 | control | XC_PD); |
| 1350 | if (R_REG(di->osh, &di->d32txregs->control) & XC_PD) { |
| 1351 | W_REG(di->osh, &di->d32txregs->control, |
| 1352 | control); |
| 1353 | } else { |
| 1354 | /* Not supported, don't allow it to be enabled */ |
| 1355 | dmactrlflags &= ~DMA_CTRL_PEN; |
| 1356 | } |
| 1357 | } else |
| 1358 | ASSERT(0); |
| 1359 | } |
| 1360 | |
| 1361 | di->hnddma.dmactrlflags = dmactrlflags; |
| 1362 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1363 | return dmactrlflags; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1364 | } |
| 1365 | |
| 1366 | /* get the address of the var in order to change later */ |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 1367 | static unsigned long _dma_getvar(dma_info_t *di, const char *name) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1368 | { |
| 1369 | if (!strcmp(name, "&txavail")) |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 1370 | return (unsigned long)&(di->hnddma.txavail); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1371 | else { |
| 1372 | ASSERT(0); |
| 1373 | } |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1374 | return 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1375 | } |
| 1376 | |
Brett Rudley | e69284f | 2010-11-16 15:45:48 -0800 | [diff] [blame] | 1377 | void dma_txpioloopback(struct osl_info *osh, dma32regs_t *regs) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1378 | { |
| 1379 | OR_REG(osh, ®s->control, XC_LE); |
| 1380 | } |
| 1381 | |
| 1382 | static |
Greg Kroah-Hartman | 36ef9a1 | 2010-10-05 10:02:49 -0700 | [diff] [blame] | 1383 | u8 dma_align_sizetobits(uint size) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1384 | { |
Greg Kroah-Hartman | 36ef9a1 | 2010-10-05 10:02:49 -0700 | [diff] [blame] | 1385 | u8 bitpos = 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1386 | ASSERT(size); |
| 1387 | ASSERT(!(size & (size - 1))); |
| 1388 | while (size >>= 1) { |
| 1389 | bitpos++; |
| 1390 | } |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1391 | return bitpos; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1392 | } |
| 1393 | |
| 1394 | /* This function ensures that the DMA descriptor ring will not get allocated |
| 1395 | * across Page boundary. If the allocation is done across the page boundary |
| 1396 | * at the first time, then it is freed and the allocation is done at |
| 1397 | * descriptor ring size aligned location. This will ensure that the ring will |
| 1398 | * not cross page boundary |
| 1399 | */ |
Brett Rudley | e69284f | 2010-11-16 15:45:48 -0800 | [diff] [blame] | 1400 | static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size, |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1401 | u16 *alignbits, uint *alloced, |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1402 | dmaaddr_t *descpa, osldma_t **dmah) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1403 | { |
| 1404 | void *va; |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1405 | u32 desc_strtaddr; |
| 1406 | u32 alignbytes = 1 << *alignbits; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1407 | |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 1408 | va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa, |
| 1409 | dmah); |
| 1410 | if (NULL == va) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1411 | return NULL; |
| 1412 | |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 1413 | desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1414 | if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr |
| 1415 | & boundary)) { |
| 1416 | *alignbits = dma_align_sizetobits(size); |
| 1417 | DMA_FREE_CONSISTENT(osh, va, size, *descpa, dmah); |
| 1418 | va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, |
| 1419 | descpa, dmah); |
| 1420 | } |
| 1421 | return va; |
| 1422 | } |
| 1423 | |
| 1424 | /* 32-bit DMA functions */ |
| 1425 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1426 | static void dma32_txinit(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1427 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1428 | u32 control = XC_XE; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1429 | |
| 1430 | DMA_TRACE(("%s: dma_txinit\n", di->name)); |
| 1431 | |
| 1432 | if (di->ntxd == 0) |
| 1433 | return; |
| 1434 | |
| 1435 | di->txin = di->txout = 0; |
| 1436 | di->hnddma.txavail = di->ntxd - 1; |
| 1437 | |
| 1438 | /* clear tx descriptor ring */ |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 1439 | BZERO_SM((void *)di->txd32, (di->ntxd * sizeof(dma32dd_t))); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1440 | |
| 1441 | if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0) |
| 1442 | control |= XC_PD; |
| 1443 | W_REG(di->osh, &di->d32txregs->control, control); |
| 1444 | _dma_ddtable_init(di, DMA_TX, di->txdpa); |
| 1445 | } |
| 1446 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1447 | static bool dma32_txenabled(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1448 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1449 | u32 xc; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1450 | |
| 1451 | /* If the chip is dead, it is not enabled :-) */ |
| 1452 | xc = R_REG(di->osh, &di->d32txregs->control); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1453 | return (xc != 0xffffffff) && (xc & XC_XE); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1454 | } |
| 1455 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1456 | static void dma32_txsuspend(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1457 | { |
| 1458 | DMA_TRACE(("%s: dma_txsuspend\n", di->name)); |
| 1459 | |
| 1460 | if (di->ntxd == 0) |
| 1461 | return; |
| 1462 | |
| 1463 | OR_REG(di->osh, &di->d32txregs->control, XC_SE); |
| 1464 | } |
| 1465 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1466 | static void dma32_txresume(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1467 | { |
| 1468 | DMA_TRACE(("%s: dma_txresume\n", di->name)); |
| 1469 | |
| 1470 | if (di->ntxd == 0) |
| 1471 | return; |
| 1472 | |
| 1473 | AND_REG(di->osh, &di->d32txregs->control, ~XC_SE); |
| 1474 | } |
| 1475 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1476 | static bool dma32_txsuspended(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1477 | { |
| 1478 | return (di->ntxd == 0) |
| 1479 | || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE); |
| 1480 | } |
| 1481 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1482 | static void dma32_txreclaim(dma_info_t *di, txd_range_t range) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1483 | { |
| 1484 | void *p; |
| 1485 | |
| 1486 | DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, |
| 1487 | (range == HNDDMA_RANGE_ALL) ? "all" : |
| 1488 | ((range == |
| 1489 | HNDDMA_RANGE_TRANSMITTED) ? "transmitted" : |
| 1490 | "transfered"))); |
| 1491 | |
| 1492 | if (di->txin == di->txout) |
| 1493 | return; |
| 1494 | |
| 1495 | while ((p = dma32_getnexttxp(di, range))) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1496 | PKTFREE(di->osh, p, true); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1497 | } |
| 1498 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1499 | static bool dma32_txstopped(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1500 | { |
| 1501 | return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) == |
| 1502 | XS_XS_STOPPED); |
| 1503 | } |
| 1504 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1505 | static bool dma32_rxstopped(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1506 | { |
| 1507 | return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) == |
| 1508 | RS_RS_STOPPED); |
| 1509 | } |
| 1510 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1511 | static bool dma32_alloc(dma_info_t *di, uint direction) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1512 | { |
| 1513 | uint size; |
| 1514 | uint ddlen; |
| 1515 | void *va; |
| 1516 | uint alloced; |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1517 | u16 align; |
| 1518 | u16 align_bits; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1519 | |
| 1520 | ddlen = sizeof(dma32dd_t); |
| 1521 | |
| 1522 | size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); |
| 1523 | |
| 1524 | alloced = 0; |
| 1525 | align_bits = di->dmadesc_align; |
| 1526 | align = (1 << align_bits); |
| 1527 | |
| 1528 | if (direction == DMA_TX) { |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 1529 | va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits, |
| 1530 | &alloced, &di->txdpaorig, &di->tx_dmah); |
| 1531 | if (va == NULL) { |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1532 | DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name)); |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 1533 | return false; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1534 | } |
| 1535 | |
| 1536 | PHYSADDRHISET(di->txdpa, 0); |
| 1537 | ASSERT(PHYSADDRHI(di->txdpaorig) == 0); |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 1538 | di->txd32 = (dma32dd_t *) roundup((unsigned long)va, align); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1539 | di->txdalign = |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 1540 | (uint) ((s8 *)di->txd32 - (s8 *) va); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1541 | |
| 1542 | PHYSADDRLOSET(di->txdpa, |
| 1543 | PHYSADDRLO(di->txdpaorig) + di->txdalign); |
| 1544 | /* Make sure that alignment didn't overflow */ |
| 1545 | ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig)); |
| 1546 | |
| 1547 | di->txdalloc = alloced; |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 1548 | ASSERT(IS_ALIGNED((unsigned long)di->txd32, align)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1549 | } else { |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 1550 | va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits, |
| 1551 | &alloced, &di->rxdpaorig, &di->rx_dmah); |
| 1552 | if (va == NULL) { |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1553 | DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name)); |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 1554 | return false; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1555 | } |
| 1556 | |
| 1557 | PHYSADDRHISET(di->rxdpa, 0); |
| 1558 | ASSERT(PHYSADDRHI(di->rxdpaorig) == 0); |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 1559 | di->rxd32 = (dma32dd_t *) roundup((unsigned long)va, align); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1560 | di->rxdalign = |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 1561 | (uint) ((s8 *)di->rxd32 - (s8 *) va); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1562 | |
| 1563 | PHYSADDRLOSET(di->rxdpa, |
| 1564 | PHYSADDRLO(di->rxdpaorig) + di->rxdalign); |
| 1565 | /* Make sure that alignment didn't overflow */ |
| 1566 | ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig)); |
| 1567 | di->rxdalloc = alloced; |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 1568 | ASSERT(IS_ALIGNED((unsigned long)di->rxd32, align)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1569 | } |
| 1570 | |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1571 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1572 | } |
| 1573 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1574 | static bool dma32_txreset(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1575 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1576 | u32 status; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1577 | |
| 1578 | if (di->ntxd == 0) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1579 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1580 | |
| 1581 | /* suspend tx DMA first */ |
| 1582 | W_REG(di->osh, &di->d32txregs->control, XC_SE); |
| 1583 | SPINWAIT(((status = |
| 1584 | (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK)) |
| 1585 | != XS_XS_DISABLED) && (status != XS_XS_IDLE) |
| 1586 | && (status != XS_XS_STOPPED), (10000)); |
| 1587 | |
| 1588 | W_REG(di->osh, &di->d32txregs->control, 0); |
| 1589 | SPINWAIT(((status = (R_REG(di->osh, |
| 1590 | &di->d32txregs->status) & XS_XS_MASK)) != |
| 1591 | XS_XS_DISABLED), 10000); |
| 1592 | |
| 1593 | /* wait for the last transaction to complete */ |
mike.rapoport@gmail.com | 7383141 | 2010-10-13 00:09:07 +0200 | [diff] [blame] | 1594 | udelay(300); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1595 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1596 | return status == XS_XS_DISABLED; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1597 | } |
| 1598 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1599 | static bool dma32_rxidle(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1600 | { |
| 1601 | DMA_TRACE(("%s: dma_rxidle\n", di->name)); |
| 1602 | |
| 1603 | if (di->nrxd == 0) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1604 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1605 | |
| 1606 | return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) == |
| 1607 | R_REG(di->osh, &di->d32rxregs->ptr)); |
| 1608 | } |
| 1609 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1610 | static bool dma32_rxreset(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1611 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1612 | u32 status; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1613 | |
| 1614 | if (di->nrxd == 0) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1615 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1616 | |
| 1617 | W_REG(di->osh, &di->d32rxregs->control, 0); |
| 1618 | SPINWAIT(((status = (R_REG(di->osh, |
| 1619 | &di->d32rxregs->status) & RS_RS_MASK)) != |
| 1620 | RS_RS_DISABLED), 10000); |
| 1621 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1622 | return status == RS_RS_DISABLED; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1623 | } |
| 1624 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1625 | static bool dma32_rxenabled(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1626 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1627 | u32 rc; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1628 | |
| 1629 | rc = R_REG(di->osh, &di->d32rxregs->control); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1630 | return (rc != 0xffffffff) && (rc & RC_RE); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1631 | } |
| 1632 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1633 | static bool dma32_txsuspendedidle(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1634 | { |
| 1635 | if (di->ntxd == 0) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1636 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1637 | |
| 1638 | if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE)) |
| 1639 | return 0; |
| 1640 | |
| 1641 | if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE) |
| 1642 | return 0; |
| 1643 | |
mike.rapoport@gmail.com | 7383141 | 2010-10-13 00:09:07 +0200 | [diff] [blame] | 1644 | udelay(2); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1645 | return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) == |
| 1646 | XS_XS_IDLE); |
| 1647 | } |
| 1648 | |
| 1649 | /* !! tx entry routine |
| 1650 | * supports full 32bit dma engine buffer addressing so |
| 1651 | * dma buffers can cross 4 Kbyte page boundaries. |
| 1652 | * |
| 1653 | * WARNING: call must check the return value for error. |
| 1654 | * the error(toss frames) could be fatal and cause many subsequent hard to debug problems |
| 1655 | */ |
Arend van Spriel | c26b137 | 2010-11-23 14:06:23 +0100 | [diff] [blame] | 1656 | static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1657 | { |
Arend van Spriel | c26b137 | 2010-11-23 14:06:23 +0100 | [diff] [blame] | 1658 | struct sk_buff *p, *next; |
Greg Kroah-Hartman | 580a0bd | 2010-10-05 11:09:48 -0700 | [diff] [blame] | 1659 | unsigned char *data; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1660 | uint len; |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1661 | u16 txout; |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1662 | u32 flags = 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1663 | dmaaddr_t pa; |
| 1664 | |
| 1665 | DMA_TRACE(("%s: dma_txfast\n", di->name)); |
| 1666 | |
| 1667 | txout = di->txout; |
| 1668 | |
| 1669 | /* |
| 1670 | * Walk the chain of packet buffers |
| 1671 | * allocating and initializing transmit descriptor entries. |
| 1672 | */ |
| 1673 | for (p = p0; p; p = next) { |
| 1674 | uint nsegs, j; |
| 1675 | hnddma_seg_map_t *map; |
| 1676 | |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 1677 | data = p->data; |
| 1678 | len = p->len; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1679 | #ifdef BCM_DMAPAD |
| 1680 | len += PKTDMAPAD(di->osh, p); |
| 1681 | #endif |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 1682 | next = p->next; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1683 | |
| 1684 | /* return nonzero if out of tx descriptors */ |
| 1685 | if (NEXTTXD(txout) == di->txin) |
| 1686 | goto outoftxd; |
| 1687 | |
| 1688 | if (len == 0) |
| 1689 | continue; |
| 1690 | |
| 1691 | if (DMASGLIST_ENAB) |
Brett Rudley | 9249ede | 2010-11-30 20:09:49 -0800 | [diff] [blame^] | 1692 | memset(&di->txp_dmah[txout], 0, |
| 1693 | sizeof(hnddma_seg_map_t)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1694 | |
| 1695 | /* get physical address of buffer start */ |
| 1696 | pa = DMA_MAP(di->osh, data, len, DMA_TX, p, |
| 1697 | &di->txp_dmah[txout]); |
| 1698 | |
| 1699 | if (DMASGLIST_ENAB) { |
| 1700 | map = &di->txp_dmah[txout]; |
| 1701 | |
| 1702 | /* See if all the segments can be accounted for */ |
| 1703 | if (map->nsegs > |
| 1704 | (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) - |
| 1705 | 1)) |
| 1706 | goto outoftxd; |
| 1707 | |
| 1708 | nsegs = map->nsegs; |
| 1709 | } else |
| 1710 | nsegs = 1; |
| 1711 | |
| 1712 | for (j = 1; j <= nsegs; j++) { |
| 1713 | flags = 0; |
| 1714 | if (p == p0 && j == 1) |
| 1715 | flags |= CTRL_SOF; |
| 1716 | |
| 1717 | /* With a DMA segment list, Descriptor table is filled |
| 1718 | * using the segment list instead of looping over |
| 1719 | * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when |
| 1720 | * end of segment list is reached. |
| 1721 | */ |
| 1722 | if ((!DMASGLIST_ENAB && next == NULL) || |
| 1723 | (DMASGLIST_ENAB && j == nsegs)) |
| 1724 | flags |= (CTRL_IOC | CTRL_EOF); |
| 1725 | if (txout == (di->ntxd - 1)) |
| 1726 | flags |= CTRL_EOT; |
| 1727 | |
| 1728 | if (DMASGLIST_ENAB) { |
| 1729 | len = map->segs[j - 1].length; |
| 1730 | pa = map->segs[j - 1].addr; |
| 1731 | } |
| 1732 | ASSERT(PHYSADDRHI(pa) == 0); |
| 1733 | |
| 1734 | dma32_dd_upd(di, di->txd32, pa, txout, &flags, len); |
| 1735 | ASSERT(di->txp[txout] == NULL); |
| 1736 | |
| 1737 | txout = NEXTTXD(txout); |
| 1738 | } |
| 1739 | |
| 1740 | /* See above. No need to loop over individual buffers */ |
| 1741 | if (DMASGLIST_ENAB) |
| 1742 | break; |
| 1743 | } |
| 1744 | |
| 1745 | /* if last txd eof not set, fix it */ |
| 1746 | if (!(flags & CTRL_EOF)) |
| 1747 | W_SM(&di->txd32[PREVTXD(txout)].ctrl, |
| 1748 | BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF)); |
| 1749 | |
| 1750 | /* save the packet */ |
| 1751 | di->txp[PREVTXD(txout)] = p0; |
| 1752 | |
| 1753 | /* bump the tx descriptor index */ |
| 1754 | di->txout = txout; |
| 1755 | |
| 1756 | /* kick the chip */ |
| 1757 | if (commit) |
| 1758 | W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t)); |
| 1759 | |
| 1760 | /* tx flow control */ |
| 1761 | di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; |
| 1762 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1763 | return 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1764 | |
| 1765 | outoftxd: |
| 1766 | DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name)); |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 1767 | PKTFREE(di->osh, p0, true); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1768 | di->hnddma.txavail = 0; |
| 1769 | di->hnddma.txnobuf++; |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1770 | return -1; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1771 | } |
| 1772 | |
| 1773 | /* |
| 1774 | * Reclaim next completed txd (txds if using chained buffers) in the range |
| 1775 | * specified and return associated packet. |
| 1776 | * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be |
| 1777 | * transmitted as noted by the hardware "CurrDescr" pointer. |
| 1778 | * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be |
| 1779 | * transfered by the DMA as noted by the hardware "ActiveDescr" pointer. |
| 1780 | * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and |
| 1781 | * return associated packet regardless of the value of hardware pointers. |
| 1782 | */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1783 | static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1784 | { |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1785 | u16 start, end, i; |
| 1786 | u16 active_desc; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1787 | void *txp; |
| 1788 | |
| 1789 | DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, |
| 1790 | (range == HNDDMA_RANGE_ALL) ? "all" : |
| 1791 | ((range == |
| 1792 | HNDDMA_RANGE_TRANSMITTED) ? "transmitted" : |
| 1793 | "transfered"))); |
| 1794 | |
| 1795 | if (di->ntxd == 0) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1796 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1797 | |
| 1798 | txp = NULL; |
| 1799 | |
| 1800 | start = di->txin; |
| 1801 | if (range == HNDDMA_RANGE_ALL) |
| 1802 | end = di->txout; |
| 1803 | else { |
| 1804 | dma32regs_t *dregs = di->d32txregs; |
| 1805 | |
| 1806 | end = |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1807 | (u16) B2I(R_REG(di->osh, &dregs->status) & XS_CD_MASK, |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1808 | dma32dd_t); |
| 1809 | |
| 1810 | if (range == HNDDMA_RANGE_TRANSFERED) { |
| 1811 | active_desc = |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1812 | (u16) ((R_REG(di->osh, &dregs->status) & |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1813 | XS_AD_MASK) >> XS_AD_SHIFT); |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1814 | active_desc = (u16) B2I(active_desc, dma32dd_t); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1815 | if (end != active_desc) |
| 1816 | end = PREVTXD(active_desc); |
| 1817 | } |
| 1818 | } |
| 1819 | |
| 1820 | if ((start == 0) && (end > di->txout)) |
| 1821 | goto bogus; |
| 1822 | |
| 1823 | for (i = start; i != end && !txp; i = NEXTTXD(i)) { |
| 1824 | dmaaddr_t pa; |
| 1825 | hnddma_seg_map_t *map = NULL; |
| 1826 | uint size, j, nsegs; |
| 1827 | |
| 1828 | PHYSADDRLOSET(pa, |
| 1829 | (BUS_SWAP32(R_SM(&di->txd32[i].addr)) - |
| 1830 | di->dataoffsetlow)); |
| 1831 | PHYSADDRHISET(pa, 0); |
| 1832 | |
| 1833 | if (DMASGLIST_ENAB) { |
| 1834 | map = &di->txp_dmah[i]; |
| 1835 | size = map->origsize; |
| 1836 | nsegs = map->nsegs; |
| 1837 | } else { |
| 1838 | size = |
| 1839 | (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) & |
| 1840 | CTRL_BC_MASK); |
| 1841 | nsegs = 1; |
| 1842 | } |
| 1843 | |
| 1844 | for (j = nsegs; j > 0; j--) { |
| 1845 | W_SM(&di->txd32[i].addr, 0xdeadbeef); |
| 1846 | |
| 1847 | txp = di->txp[i]; |
| 1848 | di->txp[i] = NULL; |
| 1849 | if (j > 1) |
| 1850 | i = NEXTTXD(i); |
| 1851 | } |
| 1852 | |
| 1853 | DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map); |
| 1854 | } |
| 1855 | |
| 1856 | di->txin = i; |
| 1857 | |
| 1858 | /* tx flow control */ |
| 1859 | di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; |
| 1860 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1861 | return txp; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1862 | |
| 1863 | bogus: |
| 1864 | DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall)); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1865 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1866 | } |
| 1867 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1868 | static void *dma32_getnextrxp(dma_info_t *di, bool forceall) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1869 | { |
| 1870 | uint i, curr; |
| 1871 | void *rxp; |
| 1872 | dmaaddr_t pa; |
| 1873 | /* if forcing, dma engine must be disabled */ |
| 1874 | ASSERT(!forceall || !dma32_rxenabled(di)); |
| 1875 | |
| 1876 | i = di->rxin; |
| 1877 | |
| 1878 | /* return if no packets posted */ |
| 1879 | if (i == di->rxout) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1880 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1881 | |
| 1882 | curr = |
| 1883 | B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t); |
| 1884 | |
| 1885 | /* ignore curr if forceall */ |
| 1886 | if (!forceall && (i == curr)) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1887 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1888 | |
| 1889 | /* get the packet pointer that corresponds to the rx descriptor */ |
| 1890 | rxp = di->rxp[i]; |
| 1891 | ASSERT(rxp); |
| 1892 | di->rxp[i] = NULL; |
| 1893 | |
| 1894 | PHYSADDRLOSET(pa, |
| 1895 | (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) - |
| 1896 | di->dataoffsetlow)); |
| 1897 | PHYSADDRHISET(pa, 0); |
| 1898 | |
| 1899 | /* clear this packet from the descriptor ring */ |
| 1900 | DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]); |
| 1901 | |
| 1902 | W_SM(&di->rxd32[i].addr, 0xdeadbeef); |
| 1903 | |
| 1904 | di->rxin = NEXTRXD(i); |
| 1905 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 1906 | return rxp; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1907 | } |
| 1908 | |
| 1909 | /* |
| 1910 | * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin). |
| 1911 | */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1912 | static void dma32_txrotate(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1913 | { |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1914 | u16 ad; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1915 | uint nactive; |
| 1916 | uint rot; |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1917 | u16 old, new; |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1918 | u32 w; |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1919 | u16 first, last; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1920 | |
| 1921 | ASSERT(dma32_txsuspendedidle(di)); |
| 1922 | |
| 1923 | nactive = _dma_txactive(di); |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 1924 | ad = (u16) (B2I |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1925 | (((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK) |
| 1926 | >> XS_AD_SHIFT), dma32dd_t)); |
| 1927 | rot = TXD(ad - di->txin); |
| 1928 | |
| 1929 | ASSERT(rot < di->ntxd); |
| 1930 | |
| 1931 | /* full-ring case is a lot harder - don't worry about this */ |
| 1932 | if (rot >= (di->ntxd - nactive)) { |
| 1933 | DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name)); |
| 1934 | return; |
| 1935 | } |
| 1936 | |
| 1937 | first = di->txin; |
| 1938 | last = PREVTXD(di->txout); |
| 1939 | |
| 1940 | /* move entries starting at last and moving backwards to first */ |
| 1941 | for (old = last; old != PREVTXD(first); old = PREVTXD(old)) { |
| 1942 | new = TXD(old + rot); |
| 1943 | |
| 1944 | /* |
| 1945 | * Move the tx dma descriptor. |
| 1946 | * EOT is set only in the last entry in the ring. |
| 1947 | */ |
| 1948 | w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT; |
| 1949 | if (new == (di->ntxd - 1)) |
| 1950 | w |= CTRL_EOT; |
| 1951 | W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w)); |
| 1952 | W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr)); |
| 1953 | |
| 1954 | /* zap the old tx dma descriptor address field */ |
| 1955 | W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef)); |
| 1956 | |
| 1957 | /* move the corresponding txp[] entry */ |
| 1958 | ASSERT(di->txp[new] == NULL); |
| 1959 | di->txp[new] = di->txp[old]; |
| 1960 | |
| 1961 | /* Move the segment map as well */ |
| 1962 | if (DMASGLIST_ENAB) { |
| 1963 | bcopy(&di->txp_dmah[old], &di->txp_dmah[new], |
| 1964 | sizeof(hnddma_seg_map_t)); |
Brett Rudley | 9249ede | 2010-11-30 20:09:49 -0800 | [diff] [blame^] | 1965 | memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1966 | } |
| 1967 | |
| 1968 | di->txp[old] = NULL; |
| 1969 | } |
| 1970 | |
| 1971 | /* update txin and txout */ |
| 1972 | di->txin = ad; |
| 1973 | di->txout = TXD(di->txout + rot); |
| 1974 | di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; |
| 1975 | |
| 1976 | /* kick the chip */ |
| 1977 | W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t)); |
| 1978 | } |
| 1979 | |
| 1980 | /* 64-bit DMA functions */ |
| 1981 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 1982 | static void dma64_txinit(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1983 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 1984 | u32 control = D64_XC_XE; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1985 | |
| 1986 | DMA_TRACE(("%s: dma_txinit\n", di->name)); |
| 1987 | |
| 1988 | if (di->ntxd == 0) |
| 1989 | return; |
| 1990 | |
| 1991 | di->txin = di->txout = 0; |
| 1992 | di->hnddma.txavail = di->ntxd - 1; |
| 1993 | |
| 1994 | /* clear tx descriptor ring */ |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 1995 | BZERO_SM((void *)di->txd64, (di->ntxd * sizeof(dma64dd_t))); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 1996 | |
| 1997 | /* DMA engine with out alignment requirement requires table to be inited |
| 1998 | * before enabling the engine |
| 1999 | */ |
| 2000 | if (!di->aligndesc_4k) |
| 2001 | _dma_ddtable_init(di, DMA_TX, di->txdpa); |
| 2002 | |
| 2003 | if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0) |
| 2004 | control |= D64_XC_PD; |
| 2005 | OR_REG(di->osh, &di->d64txregs->control, control); |
| 2006 | |
| 2007 | /* DMA engine with alignment requirement requires table to be inited |
| 2008 | * before enabling the engine |
| 2009 | */ |
| 2010 | if (di->aligndesc_4k) |
| 2011 | _dma_ddtable_init(di, DMA_TX, di->txdpa); |
| 2012 | } |
| 2013 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2014 | static bool dma64_txenabled(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2015 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 2016 | u32 xc; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2017 | |
| 2018 | /* If the chip is dead, it is not enabled :-) */ |
| 2019 | xc = R_REG(di->osh, &di->d64txregs->control); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2020 | return (xc != 0xffffffff) && (xc & D64_XC_XE); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2021 | } |
| 2022 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2023 | static void dma64_txsuspend(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2024 | { |
| 2025 | DMA_TRACE(("%s: dma_txsuspend\n", di->name)); |
| 2026 | |
| 2027 | if (di->ntxd == 0) |
| 2028 | return; |
| 2029 | |
| 2030 | OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE); |
| 2031 | } |
| 2032 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2033 | static void dma64_txresume(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2034 | { |
| 2035 | DMA_TRACE(("%s: dma_txresume\n", di->name)); |
| 2036 | |
| 2037 | if (di->ntxd == 0) |
| 2038 | return; |
| 2039 | |
| 2040 | AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE); |
| 2041 | } |
| 2042 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2043 | static bool dma64_txsuspended(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2044 | { |
| 2045 | return (di->ntxd == 0) || |
| 2046 | ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE) == |
| 2047 | D64_XC_SE); |
| 2048 | } |
| 2049 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2050 | static void BCMFASTPATH dma64_txreclaim(dma_info_t *di, txd_range_t range) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2051 | { |
| 2052 | void *p; |
| 2053 | |
| 2054 | DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, |
| 2055 | (range == HNDDMA_RANGE_ALL) ? "all" : |
| 2056 | ((range == |
| 2057 | HNDDMA_RANGE_TRANSMITTED) ? "transmitted" : |
| 2058 | "transfered"))); |
| 2059 | |
| 2060 | if (di->txin == di->txout) |
| 2061 | return; |
| 2062 | |
| 2063 | while ((p = dma64_getnexttxp(di, range))) { |
| 2064 | /* For unframed data, we don't have any packets to free */ |
| 2065 | if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED)) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 2066 | PKTFREE(di->osh, p, true); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2067 | } |
| 2068 | } |
| 2069 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2070 | static bool dma64_txstopped(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2071 | { |
| 2072 | return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) == |
| 2073 | D64_XS0_XS_STOPPED); |
| 2074 | } |
| 2075 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2076 | static bool dma64_rxstopped(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2077 | { |
| 2078 | return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) == |
| 2079 | D64_RS0_RS_STOPPED); |
| 2080 | } |
| 2081 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2082 | static bool dma64_alloc(dma_info_t *di, uint direction) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2083 | { |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2084 | u16 size; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2085 | uint ddlen; |
| 2086 | void *va; |
| 2087 | uint alloced = 0; |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2088 | u16 align; |
| 2089 | u16 align_bits; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2090 | |
| 2091 | ddlen = sizeof(dma64dd_t); |
| 2092 | |
| 2093 | size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); |
| 2094 | align_bits = di->dmadesc_align; |
| 2095 | align = (1 << align_bits); |
| 2096 | |
| 2097 | if (direction == DMA_TX) { |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 2098 | va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits, |
| 2099 | &alloced, &di->txdpaorig, &di->tx_dmah); |
| 2100 | if (va == NULL) { |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2101 | DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name)); |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 2102 | return false; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2103 | } |
| 2104 | align = (1 << align_bits); |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 2105 | di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align); |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 2106 | di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2107 | PHYSADDRLOSET(di->txdpa, |
| 2108 | PHYSADDRLO(di->txdpaorig) + di->txdalign); |
| 2109 | /* Make sure that alignment didn't overflow */ |
| 2110 | ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig)); |
| 2111 | |
| 2112 | PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig)); |
| 2113 | di->txdalloc = alloced; |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 2114 | ASSERT(IS_ALIGNED((unsigned long)di->txd64, align)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2115 | } else { |
Jason Cooper | ca8c1e5 | 2010-09-14 09:45:33 -0400 | [diff] [blame] | 2116 | va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits, |
| 2117 | &alloced, &di->rxdpaorig, &di->rx_dmah); |
| 2118 | if (va == NULL) { |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2119 | DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name)); |
Greg Kroah-Hartman | 0965ae8 | 2010-10-12 12:50:15 -0700 | [diff] [blame] | 2120 | return false; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2121 | } |
| 2122 | align = (1 << align_bits); |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 2123 | di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align); |
Greg Kroah-Hartman | c03b63c | 2010-10-08 11:20:01 -0700 | [diff] [blame] | 2124 | di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2125 | PHYSADDRLOSET(di->rxdpa, |
| 2126 | PHYSADDRLO(di->rxdpaorig) + di->rxdalign); |
| 2127 | /* Make sure that alignment didn't overflow */ |
| 2128 | ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig)); |
| 2129 | |
| 2130 | PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig)); |
| 2131 | di->rxdalloc = alloced; |
Greg Kroah-Hartman | f024c48 | 2010-10-21 10:50:21 -0700 | [diff] [blame] | 2132 | ASSERT(IS_ALIGNED((unsigned long)di->rxd64, align)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2133 | } |
| 2134 | |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 2135 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2136 | } |
| 2137 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2138 | static bool dma64_txreset(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2139 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 2140 | u32 status; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2141 | |
| 2142 | if (di->ntxd == 0) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 2143 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2144 | |
| 2145 | /* suspend tx DMA first */ |
| 2146 | W_REG(di->osh, &di->d64txregs->control, D64_XC_SE); |
| 2147 | SPINWAIT(((status = |
| 2148 | (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) |
| 2149 | != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) |
| 2150 | && (status != D64_XS0_XS_STOPPED), 10000); |
| 2151 | |
| 2152 | W_REG(di->osh, &di->d64txregs->control, 0); |
| 2153 | SPINWAIT(((status = |
| 2154 | (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) |
| 2155 | != D64_XS0_XS_DISABLED), 10000); |
| 2156 | |
| 2157 | /* wait for the last transaction to complete */ |
mike.rapoport@gmail.com | 7383141 | 2010-10-13 00:09:07 +0200 | [diff] [blame] | 2158 | udelay(300); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2159 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2160 | return status == D64_XS0_XS_DISABLED; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2161 | } |
| 2162 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2163 | static bool dma64_rxidle(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2164 | { |
| 2165 | DMA_TRACE(("%s: dma_rxidle\n", di->name)); |
| 2166 | |
| 2167 | if (di->nrxd == 0) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 2168 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2169 | |
| 2170 | return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) == |
| 2171 | (R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK)); |
| 2172 | } |
| 2173 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2174 | static bool dma64_rxreset(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2175 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 2176 | u32 status; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2177 | |
| 2178 | if (di->nrxd == 0) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 2179 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2180 | |
| 2181 | W_REG(di->osh, &di->d64rxregs->control, 0); |
| 2182 | SPINWAIT(((status = |
| 2183 | (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK)) |
| 2184 | != D64_RS0_RS_DISABLED), 10000); |
| 2185 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2186 | return status == D64_RS0_RS_DISABLED; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2187 | } |
| 2188 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2189 | static bool dma64_rxenabled(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2190 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 2191 | u32 rc; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2192 | |
| 2193 | rc = R_REG(di->osh, &di->d64rxregs->control); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2194 | return (rc != 0xffffffff) && (rc & D64_RC_RE); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2195 | } |
| 2196 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2197 | static bool dma64_txsuspendedidle(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2198 | { |
| 2199 | |
| 2200 | if (di->ntxd == 0) |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 2201 | return true; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2202 | |
| 2203 | if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE)) |
| 2204 | return 0; |
| 2205 | |
| 2206 | if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) == |
| 2207 | D64_XS0_XS_IDLE) |
| 2208 | return 1; |
| 2209 | |
| 2210 | return 0; |
| 2211 | } |
| 2212 | |
| 2213 | /* Useful when sending unframed data. This allows us to get a progress report from the DMA. |
| 2214 | * We return a pointer to the beginning of the DATA buffer of the current descriptor. |
| 2215 | * If DMA is idle, we return NULL. |
| 2216 | */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2217 | static void *dma64_getpos(dma_info_t *di, bool direction) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2218 | { |
| 2219 | void *va; |
| 2220 | bool idle; |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 2221 | u32 cd_offset; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2222 | |
| 2223 | if (direction == DMA_TX) { |
| 2224 | cd_offset = |
| 2225 | R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK; |
| 2226 | idle = !NTXDACTIVE(di->txin, di->txout); |
| 2227 | va = di->txp[B2I(cd_offset, dma64dd_t)]; |
| 2228 | } else { |
| 2229 | cd_offset = |
| 2230 | R_REG(di->osh, &di->d64rxregs->status0) & D64_XS0_CD_MASK; |
| 2231 | idle = !NRXDACTIVE(di->rxin, di->rxout); |
| 2232 | va = di->rxp[B2I(cd_offset, dma64dd_t)]; |
| 2233 | } |
| 2234 | |
| 2235 | /* If DMA is IDLE, return NULL */ |
| 2236 | if (idle) { |
| 2237 | DMA_TRACE(("%s: DMA idle, return NULL\n", __func__)); |
| 2238 | va = NULL; |
| 2239 | } |
| 2240 | |
| 2241 | return va; |
| 2242 | } |
| 2243 | |
| 2244 | /* TX of unframed data |
| 2245 | * |
| 2246 | * Adds a DMA ring descriptor for the data pointed to by "buf". |
| 2247 | * This is for DMA of a buffer of data and is unlike other hnddma TX functions |
| 2248 | * that take a pointer to a "packet" |
| 2249 | * Each call to this is results in a single descriptor being added for "len" bytes of |
| 2250 | * data starting at "buf", it doesn't handle chained buffers. |
| 2251 | */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2252 | static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2253 | { |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2254 | u16 txout; |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 2255 | u32 flags = 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2256 | dmaaddr_t pa; /* phys addr */ |
| 2257 | |
| 2258 | txout = di->txout; |
| 2259 | |
| 2260 | /* return nonzero if out of tx descriptors */ |
| 2261 | if (NEXTTXD(txout) == di->txin) |
| 2262 | goto outoftxd; |
| 2263 | |
| 2264 | if (len == 0) |
| 2265 | return 0; |
| 2266 | |
| 2267 | pa = DMA_MAP(di->osh, buf, len, DMA_TX, NULL, &di->txp_dmah[txout]); |
| 2268 | |
| 2269 | flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF); |
| 2270 | |
| 2271 | if (txout == (di->ntxd - 1)) |
| 2272 | flags |= D64_CTRL1_EOT; |
| 2273 | |
| 2274 | dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); |
| 2275 | ASSERT(di->txp[txout] == NULL); |
| 2276 | |
| 2277 | /* save the buffer pointer - used by dma_getpos */ |
| 2278 | di->txp[txout] = buf; |
| 2279 | |
| 2280 | txout = NEXTTXD(txout); |
| 2281 | /* bump the tx descriptor index */ |
| 2282 | di->txout = txout; |
| 2283 | |
| 2284 | /* kick the chip */ |
| 2285 | if (commit) { |
| 2286 | W_REG(di->osh, &di->d64txregs->ptr, |
| 2287 | di->xmtptrbase + I2B(txout, dma64dd_t)); |
| 2288 | } |
| 2289 | |
| 2290 | /* tx flow control */ |
| 2291 | di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; |
| 2292 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2293 | return 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2294 | |
| 2295 | outoftxd: |
| 2296 | DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__)); |
| 2297 | di->hnddma.txavail = 0; |
| 2298 | di->hnddma.txnobuf++; |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2299 | return -1; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2300 | } |
| 2301 | |
| 2302 | /* !! tx entry routine |
| 2303 | * WARNING: call must check the return value for error. |
| 2304 | * the error(toss frames) could be fatal and cause many subsequent hard to debug problems |
| 2305 | */ |
Arend van Spriel | c26b137 | 2010-11-23 14:06:23 +0100 | [diff] [blame] | 2306 | static int BCMFASTPATH dma64_txfast(dma_info_t *di, struct sk_buff *p0, |
| 2307 | bool commit) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2308 | { |
Arend van Spriel | c26b137 | 2010-11-23 14:06:23 +0100 | [diff] [blame] | 2309 | struct sk_buff *p, *next; |
Greg Kroah-Hartman | 580a0bd | 2010-10-05 11:09:48 -0700 | [diff] [blame] | 2310 | unsigned char *data; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2311 | uint len; |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2312 | u16 txout; |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 2313 | u32 flags = 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2314 | dmaaddr_t pa; |
| 2315 | |
| 2316 | DMA_TRACE(("%s: dma_txfast\n", di->name)); |
| 2317 | |
| 2318 | txout = di->txout; |
| 2319 | |
| 2320 | /* |
| 2321 | * Walk the chain of packet buffers |
| 2322 | * allocating and initializing transmit descriptor entries. |
| 2323 | */ |
| 2324 | for (p = p0; p; p = next) { |
| 2325 | uint nsegs, j; |
| 2326 | hnddma_seg_map_t *map; |
| 2327 | |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 2328 | data = p->data; |
| 2329 | len = p->len; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2330 | #ifdef BCM_DMAPAD |
| 2331 | len += PKTDMAPAD(di->osh, p); |
| 2332 | #endif /* BCM_DMAPAD */ |
Arend van Spriel | 54991ad | 2010-11-23 14:06:24 +0100 | [diff] [blame] | 2333 | next = p->next; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2334 | |
| 2335 | /* return nonzero if out of tx descriptors */ |
| 2336 | if (NEXTTXD(txout) == di->txin) |
| 2337 | goto outoftxd; |
| 2338 | |
| 2339 | if (len == 0) |
| 2340 | continue; |
| 2341 | |
| 2342 | /* get physical address of buffer start */ |
| 2343 | if (DMASGLIST_ENAB) |
Brett Rudley | 9249ede | 2010-11-30 20:09:49 -0800 | [diff] [blame^] | 2344 | memset(&di->txp_dmah[txout], 0, |
| 2345 | sizeof(hnddma_seg_map_t)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2346 | |
| 2347 | pa = DMA_MAP(di->osh, data, len, DMA_TX, p, |
| 2348 | &di->txp_dmah[txout]); |
| 2349 | |
| 2350 | if (DMASGLIST_ENAB) { |
| 2351 | map = &di->txp_dmah[txout]; |
| 2352 | |
| 2353 | /* See if all the segments can be accounted for */ |
| 2354 | if (map->nsegs > |
| 2355 | (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) - |
| 2356 | 1)) |
| 2357 | goto outoftxd; |
| 2358 | |
| 2359 | nsegs = map->nsegs; |
| 2360 | } else |
| 2361 | nsegs = 1; |
| 2362 | |
| 2363 | for (j = 1; j <= nsegs; j++) { |
| 2364 | flags = 0; |
| 2365 | if (p == p0 && j == 1) |
| 2366 | flags |= D64_CTRL1_SOF; |
| 2367 | |
| 2368 | /* With a DMA segment list, Descriptor table is filled |
| 2369 | * using the segment list instead of looping over |
| 2370 | * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when |
| 2371 | * end of segment list is reached. |
| 2372 | */ |
| 2373 | if ((!DMASGLIST_ENAB && next == NULL) || |
| 2374 | (DMASGLIST_ENAB && j == nsegs)) |
| 2375 | flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF); |
| 2376 | if (txout == (di->ntxd - 1)) |
| 2377 | flags |= D64_CTRL1_EOT; |
| 2378 | |
| 2379 | if (DMASGLIST_ENAB) { |
| 2380 | len = map->segs[j - 1].length; |
| 2381 | pa = map->segs[j - 1].addr; |
| 2382 | } |
| 2383 | dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); |
| 2384 | ASSERT(di->txp[txout] == NULL); |
| 2385 | |
| 2386 | txout = NEXTTXD(txout); |
| 2387 | } |
| 2388 | |
| 2389 | /* See above. No need to loop over individual buffers */ |
| 2390 | if (DMASGLIST_ENAB) |
| 2391 | break; |
| 2392 | } |
| 2393 | |
| 2394 | /* if last txd eof not set, fix it */ |
| 2395 | if (!(flags & D64_CTRL1_EOF)) |
| 2396 | W_SM(&di->txd64[PREVTXD(txout)].ctrl1, |
| 2397 | BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF)); |
| 2398 | |
| 2399 | /* save the packet */ |
| 2400 | di->txp[PREVTXD(txout)] = p0; |
| 2401 | |
| 2402 | /* bump the tx descriptor index */ |
| 2403 | di->txout = txout; |
| 2404 | |
| 2405 | /* kick the chip */ |
| 2406 | if (commit) |
| 2407 | W_REG(di->osh, &di->d64txregs->ptr, |
| 2408 | di->xmtptrbase + I2B(txout, dma64dd_t)); |
| 2409 | |
| 2410 | /* tx flow control */ |
| 2411 | di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; |
| 2412 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2413 | return 0; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2414 | |
| 2415 | outoftxd: |
| 2416 | DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name)); |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 2417 | PKTFREE(di->osh, p0, true); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2418 | di->hnddma.txavail = 0; |
| 2419 | di->hnddma.txnobuf++; |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2420 | return -1; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2421 | } |
| 2422 | |
| 2423 | /* |
| 2424 | * Reclaim next completed txd (txds if using chained buffers) in the range |
| 2425 | * specified and return associated packet. |
| 2426 | * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be |
| 2427 | * transmitted as noted by the hardware "CurrDescr" pointer. |
| 2428 | * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be |
| 2429 | * transfered by the DMA as noted by the hardware "ActiveDescr" pointer. |
| 2430 | * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and |
| 2431 | * return associated packet regardless of the value of hardware pointers. |
| 2432 | */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2433 | static void *BCMFASTPATH dma64_getnexttxp(dma_info_t *di, txd_range_t range) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2434 | { |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2435 | u16 start, end, i; |
| 2436 | u16 active_desc; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2437 | void *txp; |
| 2438 | |
| 2439 | DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, |
| 2440 | (range == HNDDMA_RANGE_ALL) ? "all" : |
| 2441 | ((range == |
| 2442 | HNDDMA_RANGE_TRANSMITTED) ? "transmitted" : |
| 2443 | "transfered"))); |
| 2444 | |
| 2445 | if (di->ntxd == 0) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2446 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2447 | |
| 2448 | txp = NULL; |
| 2449 | |
| 2450 | start = di->txin; |
| 2451 | if (range == HNDDMA_RANGE_ALL) |
| 2452 | end = di->txout; |
| 2453 | else { |
| 2454 | dma64regs_t *dregs = di->d64txregs; |
| 2455 | |
| 2456 | end = |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2457 | (u16) (B2I |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2458 | (((R_REG(di->osh, &dregs->status0) & |
| 2459 | D64_XS0_CD_MASK) - |
| 2460 | di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t)); |
| 2461 | |
| 2462 | if (range == HNDDMA_RANGE_TRANSFERED) { |
| 2463 | active_desc = |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2464 | (u16) (R_REG(di->osh, &dregs->status1) & |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2465 | D64_XS1_AD_MASK); |
| 2466 | active_desc = |
| 2467 | (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; |
| 2468 | active_desc = B2I(active_desc, dma64dd_t); |
| 2469 | if (end != active_desc) |
| 2470 | end = PREVTXD(active_desc); |
| 2471 | } |
| 2472 | } |
| 2473 | |
| 2474 | if ((start == 0) && (end > di->txout)) |
| 2475 | goto bogus; |
| 2476 | |
| 2477 | for (i = start; i != end && !txp; i = NEXTTXD(i)) { |
| 2478 | dmaaddr_t pa; |
| 2479 | hnddma_seg_map_t *map = NULL; |
| 2480 | uint size, j, nsegs; |
| 2481 | |
| 2482 | PHYSADDRLOSET(pa, |
| 2483 | (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) - |
| 2484 | di->dataoffsetlow)); |
| 2485 | PHYSADDRHISET(pa, |
| 2486 | (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) - |
| 2487 | di->dataoffsethigh)); |
| 2488 | |
| 2489 | if (DMASGLIST_ENAB) { |
| 2490 | map = &di->txp_dmah[i]; |
| 2491 | size = map->origsize; |
| 2492 | nsegs = map->nsegs; |
| 2493 | } else { |
| 2494 | size = |
| 2495 | (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) & |
| 2496 | D64_CTRL2_BC_MASK); |
| 2497 | nsegs = 1; |
| 2498 | } |
| 2499 | |
| 2500 | for (j = nsegs; j > 0; j--) { |
| 2501 | W_SM(&di->txd64[i].addrlow, 0xdeadbeef); |
| 2502 | W_SM(&di->txd64[i].addrhigh, 0xdeadbeef); |
| 2503 | |
| 2504 | txp = di->txp[i]; |
| 2505 | di->txp[i] = NULL; |
| 2506 | if (j > 1) |
| 2507 | i = NEXTTXD(i); |
| 2508 | } |
| 2509 | |
| 2510 | DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map); |
| 2511 | } |
| 2512 | |
| 2513 | di->txin = i; |
| 2514 | |
| 2515 | /* tx flow control */ |
| 2516 | di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; |
| 2517 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2518 | return txp; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2519 | |
| 2520 | bogus: |
| 2521 | DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall)); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2522 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2523 | } |
| 2524 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2525 | static void *BCMFASTPATH dma64_getnextrxp(dma_info_t *di, bool forceall) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2526 | { |
| 2527 | uint i, curr; |
| 2528 | void *rxp; |
| 2529 | dmaaddr_t pa; |
| 2530 | |
| 2531 | /* if forcing, dma engine must be disabled */ |
| 2532 | ASSERT(!forceall || !dma64_rxenabled(di)); |
| 2533 | |
| 2534 | i = di->rxin; |
| 2535 | |
| 2536 | /* return if no packets posted */ |
| 2537 | if (i == di->rxout) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2538 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2539 | |
| 2540 | curr = |
| 2541 | B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) - |
| 2542 | di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t); |
| 2543 | |
| 2544 | /* ignore curr if forceall */ |
| 2545 | if (!forceall && (i == curr)) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2546 | return NULL; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2547 | |
| 2548 | /* get the packet pointer that corresponds to the rx descriptor */ |
| 2549 | rxp = di->rxp[i]; |
| 2550 | ASSERT(rxp); |
| 2551 | di->rxp[i] = NULL; |
| 2552 | |
| 2553 | PHYSADDRLOSET(pa, |
| 2554 | (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) - |
| 2555 | di->dataoffsetlow)); |
| 2556 | PHYSADDRHISET(pa, |
| 2557 | (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) - |
| 2558 | di->dataoffsethigh)); |
| 2559 | |
| 2560 | /* clear this packet from the descriptor ring */ |
| 2561 | DMA_UNMAP(di->osh, pa, di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]); |
| 2562 | |
| 2563 | W_SM(&di->rxd64[i].addrlow, 0xdeadbeef); |
| 2564 | W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef); |
| 2565 | |
| 2566 | di->rxin = NEXTRXD(i); |
| 2567 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2568 | return rxp; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2569 | } |
| 2570 | |
Brett Rudley | e69284f | 2010-11-16 15:45:48 -0800 | [diff] [blame] | 2571 | static bool _dma64_addrext(struct osl_info *osh, dma64regs_t * dma64regs) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2572 | { |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 2573 | u32 w; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2574 | OR_REG(osh, &dma64regs->control, D64_XC_AE); |
| 2575 | w = R_REG(osh, &dma64regs->control); |
| 2576 | AND_REG(osh, &dma64regs->control, ~D64_XC_AE); |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2577 | return (w & D64_XC_AE) == D64_XC_AE; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2578 | } |
| 2579 | |
| 2580 | /* |
| 2581 | * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin). |
| 2582 | */ |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2583 | static void dma64_txrotate(dma_info_t *di) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2584 | { |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2585 | u16 ad; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2586 | uint nactive; |
| 2587 | uint rot; |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2588 | u16 old, new; |
Greg Kroah-Hartman | 66cbd3a | 2010-10-08 11:05:47 -0700 | [diff] [blame] | 2589 | u32 w; |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2590 | u16 first, last; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2591 | |
| 2592 | ASSERT(dma64_txsuspendedidle(di)); |
| 2593 | |
| 2594 | nactive = _dma_txactive(di); |
Greg Kroah-Hartman | 7d4df48 | 2010-10-07 17:04:47 -0700 | [diff] [blame] | 2595 | ad = (u16) (B2I |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2596 | ((((R_REG(di->osh, &di->d64txregs->status1) & |
| 2597 | D64_XS1_AD_MASK) |
| 2598 | - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t)); |
| 2599 | rot = TXD(ad - di->txin); |
| 2600 | |
| 2601 | ASSERT(rot < di->ntxd); |
| 2602 | |
| 2603 | /* full-ring case is a lot harder - don't worry about this */ |
| 2604 | if (rot >= (di->ntxd - nactive)) { |
| 2605 | DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name)); |
| 2606 | return; |
| 2607 | } |
| 2608 | |
| 2609 | first = di->txin; |
| 2610 | last = PREVTXD(di->txout); |
| 2611 | |
| 2612 | /* move entries starting at last and moving backwards to first */ |
| 2613 | for (old = last; old != PREVTXD(first); old = PREVTXD(old)) { |
| 2614 | new = TXD(old + rot); |
| 2615 | |
| 2616 | /* |
| 2617 | * Move the tx dma descriptor. |
| 2618 | * EOT is set only in the last entry in the ring. |
| 2619 | */ |
| 2620 | w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT; |
| 2621 | if (new == (di->ntxd - 1)) |
| 2622 | w |= D64_CTRL1_EOT; |
| 2623 | W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w)); |
| 2624 | |
| 2625 | w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2)); |
| 2626 | W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w)); |
| 2627 | |
| 2628 | W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow)); |
| 2629 | W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh)); |
| 2630 | |
| 2631 | /* zap the old tx dma descriptor address field */ |
| 2632 | W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef)); |
| 2633 | W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef)); |
| 2634 | |
| 2635 | /* move the corresponding txp[] entry */ |
| 2636 | ASSERT(di->txp[new] == NULL); |
| 2637 | di->txp[new] = di->txp[old]; |
| 2638 | |
| 2639 | /* Move the map */ |
| 2640 | if (DMASGLIST_ENAB) { |
| 2641 | bcopy(&di->txp_dmah[old], &di->txp_dmah[new], |
| 2642 | sizeof(hnddma_seg_map_t)); |
Brett Rudley | 9249ede | 2010-11-30 20:09:49 -0800 | [diff] [blame^] | 2643 | memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t)); |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2644 | } |
| 2645 | |
| 2646 | di->txp[old] = NULL; |
| 2647 | } |
| 2648 | |
| 2649 | /* update txin and txout */ |
| 2650 | di->txin = ad; |
| 2651 | di->txout = TXD(di->txout + rot); |
| 2652 | di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; |
| 2653 | |
| 2654 | /* kick the chip */ |
| 2655 | W_REG(di->osh, &di->d64txregs->ptr, |
| 2656 | di->xmtptrbase + I2B(di->txout, dma64dd_t)); |
| 2657 | } |
| 2658 | |
Jason Cooper | 7cc4a4c | 2010-09-14 09:45:30 -0400 | [diff] [blame] | 2659 | uint dma_addrwidth(si_t *sih, void *dmaregs) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2660 | { |
| 2661 | dma32regs_t *dma32regs; |
Brett Rudley | e69284f | 2010-11-16 15:45:48 -0800 | [diff] [blame] | 2662 | struct osl_info *osh; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2663 | |
| 2664 | osh = si_osh(sih); |
| 2665 | |
| 2666 | /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */ |
| 2667 | /* DMA engine is 64-bit capable */ |
| 2668 | if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) { |
| 2669 | /* backplane are 64-bit capable */ |
| 2670 | if (si_backplane64(sih)) |
| 2671 | /* If bus is System Backplane or PCIE then we can access 64-bits */ |
Brett Rudley | fa7a1db | 2010-11-23 15:30:02 -0800 | [diff] [blame] | 2672 | if ((sih->bustype == SI_BUS) || |
| 2673 | ((sih->bustype == PCI_BUS) && |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2674 | (sih->buscoretype == PCIE_CORE_ID))) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2675 | return DMADDRWIDTH_64; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2676 | |
Greg Kroah-Hartman | 0f0881b | 2010-10-12 12:15:18 -0700 | [diff] [blame] | 2677 | /* DMA64 is always 32-bit capable, AE is always true */ |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2678 | ASSERT(_dma64_addrext(osh, (dma64regs_t *) dmaregs)); |
| 2679 | |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2680 | return DMADDRWIDTH_32; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2681 | } |
| 2682 | |
| 2683 | /* Start checking for 32-bit / 30-bit addressing */ |
| 2684 | dma32regs = (dma32regs_t *) dmaregs; |
| 2685 | |
| 2686 | /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */ |
Brett Rudley | fa7a1db | 2010-11-23 15:30:02 -0800 | [diff] [blame] | 2687 | if ((sih->bustype == SI_BUS) || |
| 2688 | ((sih->bustype == PCI_BUS) |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2689 | && sih->buscoretype == PCIE_CORE_ID) |
| 2690 | || (_dma32_addrext(osh, dma32regs))) |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2691 | return DMADDRWIDTH_32; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2692 | |
| 2693 | /* Fallthru */ |
Jason Cooper | 90ea229 | 2010-09-14 09:45:32 -0400 | [diff] [blame] | 2694 | return DMADDRWIDTH_30; |
Henry Ptasinski | a9533e7 | 2010-09-08 21:04:42 -0700 | [diff] [blame] | 2695 | } |