blob: 3d6bc9b8717677d359767f2a5e258e2c7c8ec4f2 [file] [log] [blame]
Eugene Krasnikov8e84c252013-10-08 21:25:58 +01001/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/interrupt.h>
26#include "wcn36xx.h"
27#include "txrx.h"
28
29void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
30{
31 struct wcn36xx_dxe_ch *ch = is_low ?
32 &wcn->dxe_tx_l_ch :
33 &wcn->dxe_tx_h_ch;
34
35 return ch->head_blk_ctl->bd_cpu_addr;
36}
37
38static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
39{
40 wcn36xx_dbg(WCN36XX_DBG_DXE,
41 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
42 addr, data);
43
44 writel(data, wcn->mmio + addr);
45}
46
Pontus Fuchsf2ed5d22014-02-12 19:04:45 +000047#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
48do { \
49 if (wcn->chip_version == WCN36XX_CHIP_3680) \
50 wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
51 else \
52 wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
53} while (0) \
54
Eugene Krasnikov8e84c252013-10-08 21:25:58 +010055static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
56{
57 *data = readl(wcn->mmio + addr);
58
59 wcn36xx_dbg(WCN36XX_DBG_DXE,
60 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
61 addr, *data);
62}
63
64static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
65{
66 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
67 int i;
68
69 for (i = 0; i < ch->desc_num && ctl; i++) {
70 next = ctl->next;
71 kfree(ctl);
72 ctl = next;
73 }
74}
75
76static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
77{
78 struct wcn36xx_dxe_ctl *prev_ctl = NULL;
79 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
80 int i;
81
82 for (i = 0; i < ch->desc_num; i++) {
83 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
84 if (!cur_ctl)
85 goto out_fail;
86
87 cur_ctl->ctl_blk_order = i;
88 if (i == 0) {
89 ch->head_blk_ctl = cur_ctl;
90 ch->tail_blk_ctl = cur_ctl;
91 } else if (ch->desc_num - 1 == i) {
92 prev_ctl->next = cur_ctl;
93 cur_ctl->next = ch->head_blk_ctl;
94 } else {
95 prev_ctl->next = cur_ctl;
96 }
97 prev_ctl = cur_ctl;
98 }
99
100 return 0;
101
102out_fail:
103 wcn36xx_dxe_free_ctl_block(ch);
104 return -ENOMEM;
105}
106
107int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
108{
109 int ret;
110
111 wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
112 wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
113 wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
114 wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
115
116 wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
117 wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
118 wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
119 wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
120
121 wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
122 wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
123
124 wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
125 wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
126
127 wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
128 wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
129
130 wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
131 wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
132
133 wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
134 wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
135
136 /* DXE control block allocation */
137 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
138 if (ret)
139 goto out_err;
140 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
141 if (ret)
142 goto out_err;
143 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
144 if (ret)
145 goto out_err;
146 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
147 if (ret)
148 goto out_err;
149
150 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
151 ret = wcn->ctrl_ops->smsm_change_state(
152 WCN36XX_SMSM_WLAN_TX_ENABLE,
153 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
154
155 return 0;
156
157out_err:
158 wcn36xx_err("Failed to allocate DXE control blocks\n");
159 wcn36xx_dxe_free_ctl_blks(wcn);
160 return -ENOMEM;
161}
162
163void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
164{
165 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
166 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
167 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
168 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
169}
170
171static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
172{
173 struct wcn36xx_dxe_desc *cur_dxe = NULL;
174 struct wcn36xx_dxe_desc *prev_dxe = NULL;
175 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
176 size_t size;
177 int i;
178
179 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
180 wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
181 GFP_KERNEL);
182 if (!wcn_ch->cpu_addr)
183 return -ENOMEM;
184
185 memset(wcn_ch->cpu_addr, 0, size);
186
187 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
188 cur_ctl = wcn_ch->head_blk_ctl;
189
190 for (i = 0; i < wcn_ch->desc_num; i++) {
191 cur_ctl->desc = cur_dxe;
192 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
193 i * sizeof(struct wcn36xx_dxe_desc);
194
195 switch (wcn_ch->ch_type) {
196 case WCN36XX_DXE_CH_TX_L:
197 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
198 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
199 break;
200 case WCN36XX_DXE_CH_TX_H:
201 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
202 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
203 break;
204 case WCN36XX_DXE_CH_RX_L:
205 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
206 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
207 break;
208 case WCN36XX_DXE_CH_RX_H:
209 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
210 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
211 break;
212 }
213 if (0 == i) {
214 cur_dxe->phy_next_l = 0;
215 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
216 prev_dxe->phy_next_l =
217 cur_ctl->desc_phy_addr;
218 } else if (i == (wcn_ch->desc_num - 1)) {
219 prev_dxe->phy_next_l =
220 cur_ctl->desc_phy_addr;
221 cur_dxe->phy_next_l =
222 wcn_ch->head_blk_ctl->desc_phy_addr;
223 }
224 cur_ctl = cur_ctl->next;
225 prev_dxe = cur_dxe;
226 cur_dxe++;
227 }
228
229 return 0;
230}
231
232static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
233 struct wcn36xx_dxe_mem_pool *pool)
234{
235 int i, chunk_size = pool->chunk_size;
236 dma_addr_t bd_phy_addr = pool->phy_addr;
237 void *bd_cpu_addr = pool->virt_addr;
238 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
239
240 for (i = 0; i < ch->desc_num; i++) {
241 /* Only every second dxe needs a bd pointer,
242 the other will point to the skb data */
243 if (!(i & 1)) {
244 cur->bd_phy_addr = bd_phy_addr;
245 cur->bd_cpu_addr = bd_cpu_addr;
246 bd_phy_addr += chunk_size;
247 bd_cpu_addr += chunk_size;
248 } else {
249 cur->bd_phy_addr = 0;
250 cur->bd_cpu_addr = NULL;
251 }
252 cur = cur->next;
253 }
254}
255
256static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
257{
258 int reg_data = 0;
259
260 wcn36xx_dxe_read_register(wcn,
261 WCN36XX_DXE_INT_MASK_REG,
262 &reg_data);
263
264 reg_data |= wcn_ch;
265
266 wcn36xx_dxe_write_register(wcn,
267 WCN36XX_DXE_INT_MASK_REG,
268 (int)reg_data);
269 return 0;
270}
271
272static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
273{
274 struct wcn36xx_dxe_desc *dxe = ctl->desc;
275 struct sk_buff *skb;
276
277 skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
278 if (skb == NULL)
279 return -ENOMEM;
280
281 dxe->dst_addr_l = dma_map_single(NULL,
282 skb_tail_pointer(skb),
283 WCN36XX_PKT_SIZE,
284 DMA_FROM_DEVICE);
285 ctl->skb = skb;
286
287 return 0;
288}
289
290static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
291 struct wcn36xx_dxe_ch *wcn_ch)
292{
293 int i;
294 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
295
296 cur_ctl = wcn_ch->head_blk_ctl;
297
298 for (i = 0; i < wcn_ch->desc_num; i++) {
299 wcn36xx_dxe_fill_skb(cur_ctl);
300 cur_ctl = cur_ctl->next;
301 }
302
303 return 0;
304}
305
306static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
307 struct wcn36xx_dxe_ch *wcn_ch)
308{
309 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
310 int i;
311
312 for (i = 0; i < wcn_ch->desc_num; i++) {
313 kfree_skb(cur->skb);
314 cur = cur->next;
315 }
316}
317
318void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
319{
320 struct ieee80211_tx_info *info;
321 struct sk_buff *skb;
322 unsigned long flags;
323
324 spin_lock_irqsave(&wcn->dxe_lock, flags);
325 skb = wcn->tx_ack_skb;
326 wcn->tx_ack_skb = NULL;
327 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
328
329 if (!skb) {
330 wcn36xx_warn("Spurious TX complete indication\n");
331 return;
332 }
333
334 info = IEEE80211_SKB_CB(skb);
335
336 if (status == 1)
337 info->flags |= IEEE80211_TX_STAT_ACK;
338
339 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
340
341 ieee80211_tx_status_irqsafe(wcn->hw, skb);
342 ieee80211_wake_queues(wcn->hw);
343}
344
345static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
346{
347 struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
348 struct ieee80211_tx_info *info;
349 unsigned long flags;
350
351 /*
352 * Make at least one loop of do-while because in case ring is
353 * completely full head and tail are pointing to the same element
354 * and while-do will not make any cycles.
355 */
356 do {
Bob Copelandbfa66962015-01-09 14:15:48 -0500357 if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
358 break;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100359 if (ctl->skb) {
360 dma_unmap_single(NULL, ctl->desc->src_addr_l,
361 ctl->skb->len, DMA_TO_DEVICE);
362 info = IEEE80211_SKB_CB(ctl->skb);
363 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
364 /* Keep frame until TX status comes */
365 ieee80211_free_txskb(wcn->hw, ctl->skb);
366 }
367 spin_lock_irqsave(&ctl->skb_lock, flags);
368 if (wcn->queues_stopped) {
369 wcn->queues_stopped = false;
370 ieee80211_wake_queues(wcn->hw);
371 }
372 spin_unlock_irqrestore(&ctl->skb_lock, flags);
373
374 ctl->skb = NULL;
375 }
376 ctl = ctl->next;
377 } while (ctl != ch->head_blk_ctl &&
378 !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
379
380 ch->tail_blk_ctl = ctl;
381}
382
383static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
384{
385 struct wcn36xx *wcn = (struct wcn36xx *)dev;
386 int int_src, int_reason;
387
388 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
389
390 if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
391 wcn36xx_dxe_read_register(wcn,
392 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
393 &int_reason);
394
395 /* TODO: Check int_reason */
396
397 wcn36xx_dxe_write_register(wcn,
398 WCN36XX_DXE_0_INT_CLR,
399 WCN36XX_INT_MASK_CHAN_TX_H);
400
401 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
402 WCN36XX_INT_MASK_CHAN_TX_H);
403 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
404 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
405 }
406
407 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
408 wcn36xx_dxe_read_register(wcn,
409 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
410 &int_reason);
411 /* TODO: Check int_reason */
412
413 wcn36xx_dxe_write_register(wcn,
414 WCN36XX_DXE_0_INT_CLR,
415 WCN36XX_INT_MASK_CHAN_TX_L);
416
417 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
418 WCN36XX_INT_MASK_CHAN_TX_L);
419 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
420 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
421 }
422
423 return IRQ_HANDLED;
424}
425
426static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
427{
428 struct wcn36xx *wcn = (struct wcn36xx *)dev;
429
430 disable_irq_nosync(wcn->rx_irq);
431 wcn36xx_dxe_rx_frame(wcn);
432 enable_irq(wcn->rx_irq);
433 return IRQ_HANDLED;
434}
435
436static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
437{
438 int ret;
439
440 ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
441 IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
442 if (ret) {
443 wcn36xx_err("failed to alloc tx irq\n");
444 goto out_err;
445 }
446
447 ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
448 "wcn36xx_rx", wcn);
449 if (ret) {
450 wcn36xx_err("failed to alloc rx irq\n");
451 goto out_txirq;
452 }
453
454 enable_irq_wake(wcn->rx_irq);
455
456 return 0;
457
458out_txirq:
459 free_irq(wcn->tx_irq, wcn);
460out_err:
461 return ret;
462
463}
464
465static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
466 struct wcn36xx_dxe_ch *ch)
467{
468 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
469 struct wcn36xx_dxe_desc *dxe = ctl->desc;
470 dma_addr_t dma_addr;
471 struct sk_buff *skb;
472
473 while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
474 skb = ctl->skb;
475 dma_addr = dxe->dst_addr_l;
476 wcn36xx_dxe_fill_skb(ctl);
477
478 switch (ch->ch_type) {
479 case WCN36XX_DXE_CH_RX_L:
480 dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
481 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
482 WCN36XX_DXE_INT_CH1_MASK);
483 break;
484 case WCN36XX_DXE_CH_RX_H:
485 dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
486 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
487 WCN36XX_DXE_INT_CH3_MASK);
488 break;
489 default:
490 wcn36xx_warn("Unknown channel\n");
491 }
492
493 dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
494 DMA_FROM_DEVICE);
495 wcn36xx_rx_skb(wcn, skb);
496 ctl = ctl->next;
497 dxe = ctl->desc;
498 }
499
500 ch->head_blk_ctl = ctl;
501
502 return 0;
503}
504
505void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
506{
507 int int_src;
508
509 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
510
511 /* RX_LOW_PRI */
512 if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
513 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
514 WCN36XX_DXE_INT_CH1_MASK);
515 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
516 }
517
518 /* RX_HIGH_PRI */
519 if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
520 /* Clean up all the INT within this channel */
521 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
522 WCN36XX_DXE_INT_CH3_MASK);
523 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
524 }
525
526 if (!int_src)
527 wcn36xx_warn("No DXE interrupt pending\n");
528}
529
530int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
531{
532 size_t s;
533 void *cpu_addr;
534
535 /* Allocate BD headers for MGMT frames */
536
537 /* Where this come from ask QC */
538 wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
539 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
540
541 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
542 cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
543 GFP_KERNEL);
544 if (!cpu_addr)
545 goto out_err;
546
547 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
548 memset(cpu_addr, 0, s);
549
550 /* Allocate BD headers for DATA frames */
551
552 /* Where this come from ask QC */
553 wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
554 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
555
556 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
557 cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
558 GFP_KERNEL);
559 if (!cpu_addr)
560 goto out_err;
561
562 wcn->data_mem_pool.virt_addr = cpu_addr;
563 memset(cpu_addr, 0, s);
564
565 return 0;
566
567out_err:
568 wcn36xx_dxe_free_mem_pools(wcn);
569 wcn36xx_err("Failed to allocate BD mempool\n");
570 return -ENOMEM;
571}
572
573void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
574{
575 if (wcn->mgmt_mem_pool.virt_addr)
576 dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
577 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
578 wcn->mgmt_mem_pool.virt_addr,
579 wcn->mgmt_mem_pool.phy_addr);
580
581 if (wcn->data_mem_pool.virt_addr) {
582 dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
583 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
584 wcn->data_mem_pool.virt_addr,
585 wcn->data_mem_pool.phy_addr);
586 }
587}
588
589int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
590 struct wcn36xx_vif *vif_priv,
591 struct sk_buff *skb,
592 bool is_low)
593{
594 struct wcn36xx_dxe_ctl *ctl = NULL;
595 struct wcn36xx_dxe_desc *desc = NULL;
596 struct wcn36xx_dxe_ch *ch = NULL;
597 unsigned long flags;
598
599 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
600
601 ctl = ch->head_blk_ctl;
602
603 spin_lock_irqsave(&ctl->next->skb_lock, flags);
604
605 /*
606 * If skb is not null that means that we reached the tail of the ring
607 * hence ring is full. Stop queues to let mac80211 back off until ring
608 * has an empty slot again.
609 */
610 if (NULL != ctl->next->skb) {
611 ieee80211_stop_queues(wcn->hw);
612 wcn->queues_stopped = true;
613 spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
614 return -EBUSY;
615 }
616 spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
617
618 ctl->skb = NULL;
619 desc = ctl->desc;
620
621 /* Set source address of the BD we send */
622 desc->src_addr_l = ctl->bd_phy_addr;
623
624 desc->dst_addr_l = ch->dxe_wq;
625 desc->fr_len = sizeof(struct wcn36xx_tx_bd);
626 desc->ctrl = ch->ctrl_bd;
627
628 wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
629
630 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
631 (char *)desc, sizeof(*desc));
632 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
633 "BD >>> ", (char *)ctl->bd_cpu_addr,
634 sizeof(struct wcn36xx_tx_bd));
635
636 /* Set source address of the SKB we send */
637 ctl = ctl->next;
638 ctl->skb = skb;
639 desc = ctl->desc;
640 if (ctl->bd_cpu_addr) {
641 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
642 return -EINVAL;
643 }
644
645 desc->src_addr_l = dma_map_single(NULL,
646 ctl->skb->data,
647 ctl->skb->len,
648 DMA_TO_DEVICE);
649
650 desc->dst_addr_l = ch->dxe_wq;
651 desc->fr_len = ctl->skb->len;
652
653 /* set dxe descriptor to VALID */
654 desc->ctrl = ch->ctrl_skb;
655
656 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
657 (char *)desc, sizeof(*desc));
658 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
659 (char *)ctl->skb->data, ctl->skb->len);
660
661 /* Move the head of the ring to the next empty descriptor */
662 ch->head_blk_ctl = ctl->next;
663
664 /*
665 * When connected and trying to send data frame chip can be in sleep
666 * mode and writing to the register will not wake up the chip. Instead
667 * notify chip about new frame through SMSM bus.
668 */
669 if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
670 wcn->ctrl_ops->smsm_change_state(
671 0,
672 WCN36XX_SMSM_WLAN_TX_ENABLE);
673 } else {
674 /* indicate End Of Packet and generate interrupt on descriptor
675 * done.
676 */
677 wcn36xx_dxe_write_register(wcn,
678 ch->reg_ctrl, ch->def_ctrl);
679 }
680
681 return 0;
682}
683
684int wcn36xx_dxe_init(struct wcn36xx *wcn)
685{
686 int reg_data = 0, ret;
687
688 reg_data = WCN36XX_DXE_REG_RESET;
689 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
690
691 /* Setting interrupt path */
692 reg_data = WCN36XX_DXE_CCU_INT;
Pontus Fuchsf2ed5d22014-02-12 19:04:45 +0000693 wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100694
695 /***************************************/
696 /* Init descriptors for TX LOW channel */
697 /***************************************/
698 wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
699 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
700
701 /* Write channel head to a NEXT register */
702 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
703 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
704
705 /* Program DMA destination addr for TX LOW */
706 wcn36xx_dxe_write_register(wcn,
707 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
708 WCN36XX_DXE_WQ_TX_L);
709
710 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
711 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
712
713 /***************************************/
714 /* Init descriptors for TX HIGH channel */
715 /***************************************/
716 wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
717 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
718
719 /* Write channel head to a NEXT register */
720 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
721 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
722
723 /* Program DMA destination addr for TX HIGH */
724 wcn36xx_dxe_write_register(wcn,
725 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
726 WCN36XX_DXE_WQ_TX_H);
727
728 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
729
730 /* Enable channel interrupts */
731 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
732
733 /***************************************/
734 /* Init descriptors for RX LOW channel */
735 /***************************************/
736 wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
737
738 /* For RX we need to preallocated buffers */
739 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
740
741 /* Write channel head to a NEXT register */
742 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
743 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
744
745 /* Write DMA source address */
746 wcn36xx_dxe_write_register(wcn,
747 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
748 WCN36XX_DXE_WQ_RX_L);
749
750 /* Program preallocated destination address */
751 wcn36xx_dxe_write_register(wcn,
752 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
753 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
754
755 /* Enable default control registers */
756 wcn36xx_dxe_write_register(wcn,
757 WCN36XX_DXE_REG_CTL_RX_L,
758 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
759
760 /* Enable channel interrupts */
761 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
762
763 /***************************************/
764 /* Init descriptors for RX HIGH channel */
765 /***************************************/
766 wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
767
768 /* For RX we need to prealocat buffers */
769 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
770
771 /* Write chanel head to a NEXT register */
772 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
773 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
774
775 /* Write DMA source address */
776 wcn36xx_dxe_write_register(wcn,
777 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
778 WCN36XX_DXE_WQ_RX_H);
779
780 /* Program preallocated destination address */
781 wcn36xx_dxe_write_register(wcn,
782 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
783 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
784
785 /* Enable default control registers */
786 wcn36xx_dxe_write_register(wcn,
787 WCN36XX_DXE_REG_CTL_RX_H,
788 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
789
790 /* Enable channel interrupts */
791 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
792
793 ret = wcn36xx_dxe_request_irqs(wcn);
794 if (ret < 0)
795 goto out_err;
796
797 return 0;
798
799out_err:
800 return ret;
801}
802
803void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
804{
805 free_irq(wcn->tx_irq, wcn);
806 free_irq(wcn->rx_irq, wcn);
807
808 if (wcn->tx_ack_skb) {
809 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
810 wcn->tx_ack_skb = NULL;
811 }
812
813 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
814 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
815}