blob: 086549b732b95097710276206bf63038ef315414 [file] [log] [blame]
Eugene Krasnikov8e84c252013-10-08 21:25:58 +01001/*
2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/interrupt.h>
26#include "wcn36xx.h"
27#include "txrx.h"
28
29void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
30{
31 struct wcn36xx_dxe_ch *ch = is_low ?
32 &wcn->dxe_tx_l_ch :
33 &wcn->dxe_tx_h_ch;
34
35 return ch->head_blk_ctl->bd_cpu_addr;
36}
37
38static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
39{
40 wcn36xx_dbg(WCN36XX_DBG_DXE,
41 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
42 addr, data);
43
44 writel(data, wcn->mmio + addr);
45}
46
Pontus Fuchsf2ed5d22014-02-12 19:04:45 +000047#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
48do { \
49 if (wcn->chip_version == WCN36XX_CHIP_3680) \
50 wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
51 else \
52 wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
53} while (0) \
54
Eugene Krasnikov8e84c252013-10-08 21:25:58 +010055static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
56{
57 *data = readl(wcn->mmio + addr);
58
59 wcn36xx_dbg(WCN36XX_DBG_DXE,
60 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
61 addr, *data);
62}
63
64static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
65{
66 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
67 int i;
68
69 for (i = 0; i < ch->desc_num && ctl; i++) {
70 next = ctl->next;
71 kfree(ctl);
72 ctl = next;
73 }
74}
75
76static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
77{
78 struct wcn36xx_dxe_ctl *prev_ctl = NULL;
79 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
80 int i;
81
82 for (i = 0; i < ch->desc_num; i++) {
83 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
84 if (!cur_ctl)
85 goto out_fail;
86
Bob Copeland90dccb72015-01-09 14:15:49 -050087 spin_lock_init(&cur_ctl->skb_lock);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +010088 cur_ctl->ctl_blk_order = i;
89 if (i == 0) {
90 ch->head_blk_ctl = cur_ctl;
91 ch->tail_blk_ctl = cur_ctl;
92 } else if (ch->desc_num - 1 == i) {
93 prev_ctl->next = cur_ctl;
94 cur_ctl->next = ch->head_blk_ctl;
95 } else {
96 prev_ctl->next = cur_ctl;
97 }
98 prev_ctl = cur_ctl;
99 }
100
101 return 0;
102
103out_fail:
104 wcn36xx_dxe_free_ctl_block(ch);
105 return -ENOMEM;
106}
107
108int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
109{
110 int ret;
111
112 wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
113 wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
114 wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
115 wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
116
117 wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
118 wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
119 wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
120 wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
121
122 wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
123 wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
124
125 wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
126 wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
127
128 wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
129 wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
130
131 wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
132 wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
133
134 wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
135 wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
136
137 /* DXE control block allocation */
138 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
139 if (ret)
140 goto out_err;
141 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
142 if (ret)
143 goto out_err;
144 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
145 if (ret)
146 goto out_err;
147 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
148 if (ret)
149 goto out_err;
150
151 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
152 ret = wcn->ctrl_ops->smsm_change_state(
153 WCN36XX_SMSM_WLAN_TX_ENABLE,
154 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
155
156 return 0;
157
158out_err:
159 wcn36xx_err("Failed to allocate DXE control blocks\n");
160 wcn36xx_dxe_free_ctl_blks(wcn);
161 return -ENOMEM;
162}
163
164void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
165{
166 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
167 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
168 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
169 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
170}
171
172static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
173{
174 struct wcn36xx_dxe_desc *cur_dxe = NULL;
175 struct wcn36xx_dxe_desc *prev_dxe = NULL;
176 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
177 size_t size;
178 int i;
179
180 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
181 wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
182 GFP_KERNEL);
183 if (!wcn_ch->cpu_addr)
184 return -ENOMEM;
185
186 memset(wcn_ch->cpu_addr, 0, size);
187
188 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
189 cur_ctl = wcn_ch->head_blk_ctl;
190
191 for (i = 0; i < wcn_ch->desc_num; i++) {
192 cur_ctl->desc = cur_dxe;
193 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
194 i * sizeof(struct wcn36xx_dxe_desc);
195
196 switch (wcn_ch->ch_type) {
197 case WCN36XX_DXE_CH_TX_L:
198 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
199 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
200 break;
201 case WCN36XX_DXE_CH_TX_H:
202 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
203 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
204 break;
205 case WCN36XX_DXE_CH_RX_L:
206 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
207 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
208 break;
209 case WCN36XX_DXE_CH_RX_H:
210 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
211 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
212 break;
213 }
214 if (0 == i) {
215 cur_dxe->phy_next_l = 0;
216 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
217 prev_dxe->phy_next_l =
218 cur_ctl->desc_phy_addr;
219 } else if (i == (wcn_ch->desc_num - 1)) {
220 prev_dxe->phy_next_l =
221 cur_ctl->desc_phy_addr;
222 cur_dxe->phy_next_l =
223 wcn_ch->head_blk_ctl->desc_phy_addr;
224 }
225 cur_ctl = cur_ctl->next;
226 prev_dxe = cur_dxe;
227 cur_dxe++;
228 }
229
230 return 0;
231}
232
233static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
234 struct wcn36xx_dxe_mem_pool *pool)
235{
236 int i, chunk_size = pool->chunk_size;
237 dma_addr_t bd_phy_addr = pool->phy_addr;
238 void *bd_cpu_addr = pool->virt_addr;
239 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
240
241 for (i = 0; i < ch->desc_num; i++) {
242 /* Only every second dxe needs a bd pointer,
243 the other will point to the skb data */
244 if (!(i & 1)) {
245 cur->bd_phy_addr = bd_phy_addr;
246 cur->bd_cpu_addr = bd_cpu_addr;
247 bd_phy_addr += chunk_size;
248 bd_cpu_addr += chunk_size;
249 } else {
250 cur->bd_phy_addr = 0;
251 cur->bd_cpu_addr = NULL;
252 }
253 cur = cur->next;
254 }
255}
256
257static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
258{
259 int reg_data = 0;
260
261 wcn36xx_dxe_read_register(wcn,
262 WCN36XX_DXE_INT_MASK_REG,
263 &reg_data);
264
265 reg_data |= wcn_ch;
266
267 wcn36xx_dxe_write_register(wcn,
268 WCN36XX_DXE_INT_MASK_REG,
269 (int)reg_data);
270 return 0;
271}
272
273static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
274{
275 struct wcn36xx_dxe_desc *dxe = ctl->desc;
276 struct sk_buff *skb;
277
278 skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
279 if (skb == NULL)
280 return -ENOMEM;
281
282 dxe->dst_addr_l = dma_map_single(NULL,
283 skb_tail_pointer(skb),
284 WCN36XX_PKT_SIZE,
285 DMA_FROM_DEVICE);
286 ctl->skb = skb;
287
288 return 0;
289}
290
291static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
292 struct wcn36xx_dxe_ch *wcn_ch)
293{
294 int i;
295 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
296
297 cur_ctl = wcn_ch->head_blk_ctl;
298
299 for (i = 0; i < wcn_ch->desc_num; i++) {
300 wcn36xx_dxe_fill_skb(cur_ctl);
301 cur_ctl = cur_ctl->next;
302 }
303
304 return 0;
305}
306
307static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
308 struct wcn36xx_dxe_ch *wcn_ch)
309{
310 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
311 int i;
312
313 for (i = 0; i < wcn_ch->desc_num; i++) {
314 kfree_skb(cur->skb);
315 cur = cur->next;
316 }
317}
318
319void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
320{
321 struct ieee80211_tx_info *info;
322 struct sk_buff *skb;
323 unsigned long flags;
324
325 spin_lock_irqsave(&wcn->dxe_lock, flags);
326 skb = wcn->tx_ack_skb;
327 wcn->tx_ack_skb = NULL;
328 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
329
330 if (!skb) {
331 wcn36xx_warn("Spurious TX complete indication\n");
332 return;
333 }
334
335 info = IEEE80211_SKB_CB(skb);
336
337 if (status == 1)
338 info->flags |= IEEE80211_TX_STAT_ACK;
339
340 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
341
342 ieee80211_tx_status_irqsafe(wcn->hw, skb);
343 ieee80211_wake_queues(wcn->hw);
344}
345
346static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
347{
348 struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
349 struct ieee80211_tx_info *info;
350 unsigned long flags;
351
352 /*
353 * Make at least one loop of do-while because in case ring is
354 * completely full head and tail are pointing to the same element
355 * and while-do will not make any cycles.
356 */
357 do {
Bob Copelandbfa66962015-01-09 14:15:48 -0500358 if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
359 break;
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100360 if (ctl->skb) {
361 dma_unmap_single(NULL, ctl->desc->src_addr_l,
362 ctl->skb->len, DMA_TO_DEVICE);
363 info = IEEE80211_SKB_CB(ctl->skb);
364 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
365 /* Keep frame until TX status comes */
366 ieee80211_free_txskb(wcn->hw, ctl->skb);
367 }
368 spin_lock_irqsave(&ctl->skb_lock, flags);
369 if (wcn->queues_stopped) {
370 wcn->queues_stopped = false;
371 ieee80211_wake_queues(wcn->hw);
372 }
373 spin_unlock_irqrestore(&ctl->skb_lock, flags);
374
375 ctl->skb = NULL;
376 }
377 ctl = ctl->next;
378 } while (ctl != ch->head_blk_ctl &&
379 !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
380
381 ch->tail_blk_ctl = ctl;
382}
383
384static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
385{
386 struct wcn36xx *wcn = (struct wcn36xx *)dev;
387 int int_src, int_reason;
388
389 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
390
391 if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
392 wcn36xx_dxe_read_register(wcn,
393 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
394 &int_reason);
395
396 /* TODO: Check int_reason */
397
398 wcn36xx_dxe_write_register(wcn,
399 WCN36XX_DXE_0_INT_CLR,
400 WCN36XX_INT_MASK_CHAN_TX_H);
401
402 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
403 WCN36XX_INT_MASK_CHAN_TX_H);
404 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
405 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
406 }
407
408 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
409 wcn36xx_dxe_read_register(wcn,
410 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
411 &int_reason);
412 /* TODO: Check int_reason */
413
414 wcn36xx_dxe_write_register(wcn,
415 WCN36XX_DXE_0_INT_CLR,
416 WCN36XX_INT_MASK_CHAN_TX_L);
417
418 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
419 WCN36XX_INT_MASK_CHAN_TX_L);
420 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
421 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
422 }
423
424 return IRQ_HANDLED;
425}
426
427static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
428{
429 struct wcn36xx *wcn = (struct wcn36xx *)dev;
430
431 disable_irq_nosync(wcn->rx_irq);
432 wcn36xx_dxe_rx_frame(wcn);
433 enable_irq(wcn->rx_irq);
434 return IRQ_HANDLED;
435}
436
437static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
438{
439 int ret;
440
441 ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
442 IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
443 if (ret) {
444 wcn36xx_err("failed to alloc tx irq\n");
445 goto out_err;
446 }
447
448 ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
449 "wcn36xx_rx", wcn);
450 if (ret) {
451 wcn36xx_err("failed to alloc rx irq\n");
452 goto out_txirq;
453 }
454
455 enable_irq_wake(wcn->rx_irq);
456
457 return 0;
458
459out_txirq:
460 free_irq(wcn->tx_irq, wcn);
461out_err:
462 return ret;
463
464}
465
466static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
467 struct wcn36xx_dxe_ch *ch)
468{
469 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
470 struct wcn36xx_dxe_desc *dxe = ctl->desc;
471 dma_addr_t dma_addr;
472 struct sk_buff *skb;
473
474 while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
475 skb = ctl->skb;
476 dma_addr = dxe->dst_addr_l;
477 wcn36xx_dxe_fill_skb(ctl);
478
479 switch (ch->ch_type) {
480 case WCN36XX_DXE_CH_RX_L:
481 dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
482 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
483 WCN36XX_DXE_INT_CH1_MASK);
484 break;
485 case WCN36XX_DXE_CH_RX_H:
486 dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
487 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
488 WCN36XX_DXE_INT_CH3_MASK);
489 break;
490 default:
491 wcn36xx_warn("Unknown channel\n");
492 }
493
494 dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
495 DMA_FROM_DEVICE);
496 wcn36xx_rx_skb(wcn, skb);
497 ctl = ctl->next;
498 dxe = ctl->desc;
499 }
500
501 ch->head_blk_ctl = ctl;
502
503 return 0;
504}
505
506void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
507{
508 int int_src;
509
510 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
511
512 /* RX_LOW_PRI */
513 if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
514 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
515 WCN36XX_DXE_INT_CH1_MASK);
516 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
517 }
518
519 /* RX_HIGH_PRI */
520 if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
521 /* Clean up all the INT within this channel */
522 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
523 WCN36XX_DXE_INT_CH3_MASK);
524 wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
525 }
526
527 if (!int_src)
528 wcn36xx_warn("No DXE interrupt pending\n");
529}
530
531int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
532{
533 size_t s;
534 void *cpu_addr;
535
536 /* Allocate BD headers for MGMT frames */
537
538 /* Where this come from ask QC */
539 wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
540 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
541
542 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
543 cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
544 GFP_KERNEL);
545 if (!cpu_addr)
546 goto out_err;
547
548 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
549 memset(cpu_addr, 0, s);
550
551 /* Allocate BD headers for DATA frames */
552
553 /* Where this come from ask QC */
554 wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
555 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
556
557 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
558 cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
559 GFP_KERNEL);
560 if (!cpu_addr)
561 goto out_err;
562
563 wcn->data_mem_pool.virt_addr = cpu_addr;
564 memset(cpu_addr, 0, s);
565
566 return 0;
567
568out_err:
569 wcn36xx_dxe_free_mem_pools(wcn);
570 wcn36xx_err("Failed to allocate BD mempool\n");
571 return -ENOMEM;
572}
573
574void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
575{
576 if (wcn->mgmt_mem_pool.virt_addr)
577 dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
578 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
579 wcn->mgmt_mem_pool.virt_addr,
580 wcn->mgmt_mem_pool.phy_addr);
581
582 if (wcn->data_mem_pool.virt_addr) {
583 dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
584 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
585 wcn->data_mem_pool.virt_addr,
586 wcn->data_mem_pool.phy_addr);
587 }
588}
589
590int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
591 struct wcn36xx_vif *vif_priv,
592 struct sk_buff *skb,
593 bool is_low)
594{
595 struct wcn36xx_dxe_ctl *ctl = NULL;
596 struct wcn36xx_dxe_desc *desc = NULL;
597 struct wcn36xx_dxe_ch *ch = NULL;
598 unsigned long flags;
599
600 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
601
602 ctl = ch->head_blk_ctl;
603
604 spin_lock_irqsave(&ctl->next->skb_lock, flags);
605
606 /*
607 * If skb is not null that means that we reached the tail of the ring
608 * hence ring is full. Stop queues to let mac80211 back off until ring
609 * has an empty slot again.
610 */
611 if (NULL != ctl->next->skb) {
612 ieee80211_stop_queues(wcn->hw);
613 wcn->queues_stopped = true;
614 spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
615 return -EBUSY;
616 }
617 spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
618
619 ctl->skb = NULL;
620 desc = ctl->desc;
621
622 /* Set source address of the BD we send */
623 desc->src_addr_l = ctl->bd_phy_addr;
624
625 desc->dst_addr_l = ch->dxe_wq;
626 desc->fr_len = sizeof(struct wcn36xx_tx_bd);
627 desc->ctrl = ch->ctrl_bd;
628
629 wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
630
631 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
632 (char *)desc, sizeof(*desc));
633 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
634 "BD >>> ", (char *)ctl->bd_cpu_addr,
635 sizeof(struct wcn36xx_tx_bd));
636
637 /* Set source address of the SKB we send */
638 ctl = ctl->next;
639 ctl->skb = skb;
640 desc = ctl->desc;
641 if (ctl->bd_cpu_addr) {
642 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
643 return -EINVAL;
644 }
645
646 desc->src_addr_l = dma_map_single(NULL,
647 ctl->skb->data,
648 ctl->skb->len,
649 DMA_TO_DEVICE);
650
651 desc->dst_addr_l = ch->dxe_wq;
652 desc->fr_len = ctl->skb->len;
653
654 /* set dxe descriptor to VALID */
655 desc->ctrl = ch->ctrl_skb;
656
657 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
658 (char *)desc, sizeof(*desc));
659 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
660 (char *)ctl->skb->data, ctl->skb->len);
661
662 /* Move the head of the ring to the next empty descriptor */
663 ch->head_blk_ctl = ctl->next;
664
665 /*
666 * When connected and trying to send data frame chip can be in sleep
667 * mode and writing to the register will not wake up the chip. Instead
668 * notify chip about new frame through SMSM bus.
669 */
670 if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
671 wcn->ctrl_ops->smsm_change_state(
672 0,
673 WCN36XX_SMSM_WLAN_TX_ENABLE);
674 } else {
675 /* indicate End Of Packet and generate interrupt on descriptor
676 * done.
677 */
678 wcn36xx_dxe_write_register(wcn,
679 ch->reg_ctrl, ch->def_ctrl);
680 }
681
682 return 0;
683}
684
685int wcn36xx_dxe_init(struct wcn36xx *wcn)
686{
687 int reg_data = 0, ret;
688
689 reg_data = WCN36XX_DXE_REG_RESET;
690 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
691
692 /* Setting interrupt path */
693 reg_data = WCN36XX_DXE_CCU_INT;
Pontus Fuchsf2ed5d22014-02-12 19:04:45 +0000694 wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
Eugene Krasnikov8e84c252013-10-08 21:25:58 +0100695
696 /***************************************/
697 /* Init descriptors for TX LOW channel */
698 /***************************************/
699 wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
700 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
701
702 /* Write channel head to a NEXT register */
703 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
704 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
705
706 /* Program DMA destination addr for TX LOW */
707 wcn36xx_dxe_write_register(wcn,
708 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
709 WCN36XX_DXE_WQ_TX_L);
710
711 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
712 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
713
714 /***************************************/
715 /* Init descriptors for TX HIGH channel */
716 /***************************************/
717 wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
718 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
719
720 /* Write channel head to a NEXT register */
721 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
722 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
723
724 /* Program DMA destination addr for TX HIGH */
725 wcn36xx_dxe_write_register(wcn,
726 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
727 WCN36XX_DXE_WQ_TX_H);
728
729 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
730
731 /* Enable channel interrupts */
732 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
733
734 /***************************************/
735 /* Init descriptors for RX LOW channel */
736 /***************************************/
737 wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
738
739 /* For RX we need to preallocated buffers */
740 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
741
742 /* Write channel head to a NEXT register */
743 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
744 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
745
746 /* Write DMA source address */
747 wcn36xx_dxe_write_register(wcn,
748 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
749 WCN36XX_DXE_WQ_RX_L);
750
751 /* Program preallocated destination address */
752 wcn36xx_dxe_write_register(wcn,
753 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
754 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
755
756 /* Enable default control registers */
757 wcn36xx_dxe_write_register(wcn,
758 WCN36XX_DXE_REG_CTL_RX_L,
759 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
760
761 /* Enable channel interrupts */
762 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
763
764 /***************************************/
765 /* Init descriptors for RX HIGH channel */
766 /***************************************/
767 wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
768
769 /* For RX we need to prealocat buffers */
770 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
771
772 /* Write chanel head to a NEXT register */
773 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
774 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
775
776 /* Write DMA source address */
777 wcn36xx_dxe_write_register(wcn,
778 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
779 WCN36XX_DXE_WQ_RX_H);
780
781 /* Program preallocated destination address */
782 wcn36xx_dxe_write_register(wcn,
783 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
784 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
785
786 /* Enable default control registers */
787 wcn36xx_dxe_write_register(wcn,
788 WCN36XX_DXE_REG_CTL_RX_H,
789 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
790
791 /* Enable channel interrupts */
792 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
793
794 ret = wcn36xx_dxe_request_irqs(wcn);
795 if (ret < 0)
796 goto out_err;
797
798 return 0;
799
800out_err:
801 return ret;
802}
803
804void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
805{
806 free_irq(wcn->tx_irq, wcn);
807 free_irq(wcn->rx_irq, wcn);
808
809 if (wcn->tx_ack_skb) {
810 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
811 wcn->tx_ack_skb = NULL;
812 }
813
814 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
815 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
816}