blob: 0e6f46fa6318bb9b71597995840413721b088b73 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Houston Hoffmana2cdf222015-10-20 16:03:06 -07002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wma_data.c
30 * This file contains tx/rx and data path related functions.
31 */
32
33/* Header files */
34
35#include "wma.h"
36#include "wma_api.h"
37#include "cds_api.h"
38#include "wmi_unified_api.h"
39#include "wlan_qct_sys.h"
40#include "wni_api.h"
41#include "ani_global.h"
42#include "wmi_unified.h"
43#include "wni_cfg.h"
44#include "cfg_api.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045#include "wlan_tgt_def_config.h"
46
Nirav Shahcbc6d722016-03-01 16:24:53 +053047#include "qdf_nbuf.h"
Anurag Chouhan6d760662016-02-20 16:05:43 +053048#include "qdf_types.h"
Anurag Chouhan600c3a02016-03-01 10:33:54 +053049#include "qdf_mem.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080050#include "ol_txrx_peer_find.h"
51
52#include "wma_types.h"
53#include "lim_api.h"
54#include "lim_session_utils.h"
55
56#include "cds_utils.h"
57
58#if !defined(REMOVE_PKT_LOG)
59#include "pktlog_ac.h"
60#endif /* REMOVE_PKT_LOG */
61
62#include "dbglog_host.h"
63#include "csr_api.h"
64#include "ol_fw.h"
65
66#include "dfs.h"
67#include "wma_internal.h"
68
69typedef struct {
70 int32_t rate;
71 uint8_t flag;
72} wma_search_rate_t;
73
74#define WMA_MAX_OFDM_CCK_RATE_TBL_SIZE 12
75/* In ofdm_cck_rate_tbl->flag, if bit 7 is 1 it's CCK, otherwise it ofdm.
76 * Lower bit carries the ofdm/cck index for encoding the rate
77 */
78static wma_search_rate_t ofdm_cck_rate_tbl[WMA_MAX_OFDM_CCK_RATE_TBL_SIZE] = {
79 {540, 4}, /* 4: OFDM 54 Mbps */
80 {480, 0}, /* 0: OFDM 48 Mbps */
81 {360, 5}, /* 5: OFDM 36 Mbps */
82 {240, 1}, /* 1: OFDM 24 Mbps */
83 {180, 6}, /* 6: OFDM 18 Mbps */
84 {120, 2}, /* 2: OFDM 12 Mbps */
85 {110, (1 << 7)}, /* 0: CCK 11 Mbps Long */
86 {90, 7}, /* 7: OFDM 9 Mbps */
87 {60, 3}, /* 3: OFDM 6 Mbps */
88 {55, ((1 << 7) | 1)}, /* 1: CCK 5.5 Mbps Long */
89 {20, ((1 << 7) | 2)}, /* 2: CCK 2 Mbps Long */
90 {10, ((1 << 7) | 3)} /* 3: CCK 1 Mbps Long */
91};
92
93#define WMA_MAX_VHT20_RATE_TBL_SIZE 9
94/* In vht20_400ns_rate_tbl flag carries the mcs index for encoding the rate */
95static wma_search_rate_t vht20_400ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
96 {867, 8}, /* MCS8 1SS short GI */
97 {722, 7}, /* MCS7 1SS short GI */
98 {650, 6}, /* MCS6 1SS short GI */
99 {578, 5}, /* MCS5 1SS short GI */
100 {433, 4}, /* MCS4 1SS short GI */
101 {289, 3}, /* MCS3 1SS short GI */
102 {217, 2}, /* MCS2 1SS short GI */
103 {144, 1}, /* MCS1 1SS short GI */
104 {72, 0} /* MCS0 1SS short GI */
105};
106
107/* In vht20_800ns_rate_tbl flag carries the mcs index for encoding the rate */
108static wma_search_rate_t vht20_800ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
109 {780, 8}, /* MCS8 1SS long GI */
110 {650, 7}, /* MCS7 1SS long GI */
111 {585, 6}, /* MCS6 1SS long GI */
112 {520, 5}, /* MCS5 1SS long GI */
113 {390, 4}, /* MCS4 1SS long GI */
114 {260, 3}, /* MCS3 1SS long GI */
115 {195, 2}, /* MCS2 1SS long GI */
116 {130, 1}, /* MCS1 1SS long GI */
117 {65, 0} /* MCS0 1SS long GI */
118};
119
120#define WMA_MAX_VHT40_RATE_TBL_SIZE 10
121/* In vht40_400ns_rate_tbl flag carries the mcs index for encoding the rate */
122static wma_search_rate_t vht40_400ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
123 {2000, 9}, /* MCS9 1SS short GI */
124 {1800, 8}, /* MCS8 1SS short GI */
125 {1500, 7}, /* MCS7 1SS short GI */
126 {1350, 6}, /* MCS6 1SS short GI */
127 {1200, 5}, /* MCS5 1SS short GI */
128 {900, 4}, /* MCS4 1SS short GI */
129 {600, 3}, /* MCS3 1SS short GI */
130 {450, 2}, /* MCS2 1SS short GI */
131 {300, 1}, /* MCS1 1SS short GI */
132 {150, 0}, /* MCS0 1SS short GI */
133};
134
135static wma_search_rate_t vht40_800ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
136 {1800, 9}, /* MCS9 1SS long GI */
137 {1620, 8}, /* MCS8 1SS long GI */
138 {1350, 7}, /* MCS7 1SS long GI */
139 {1215, 6}, /* MCS6 1SS long GI */
140 {1080, 5}, /* MCS5 1SS long GI */
141 {810, 4}, /* MCS4 1SS long GI */
142 {540, 3}, /* MCS3 1SS long GI */
143 {405, 2}, /* MCS2 1SS long GI */
144 {270, 1}, /* MCS1 1SS long GI */
145 {135, 0} /* MCS0 1SS long GI */
146};
147
148#define WMA_MAX_VHT80_RATE_TBL_SIZE 10
149static wma_search_rate_t vht80_400ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
150 {4333, 9}, /* MCS9 1SS short GI */
151 {3900, 8}, /* MCS8 1SS short GI */
152 {3250, 7}, /* MCS7 1SS short GI */
153 {2925, 6}, /* MCS6 1SS short GI */
154 {2600, 5}, /* MCS5 1SS short GI */
155 {1950, 4}, /* MCS4 1SS short GI */
156 {1300, 3}, /* MCS3 1SS short GI */
157 {975, 2}, /* MCS2 1SS short GI */
158 {650, 1}, /* MCS1 1SS short GI */
159 {325, 0} /* MCS0 1SS short GI */
160};
161
162static wma_search_rate_t vht80_800ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
163 {3900, 9}, /* MCS9 1SS long GI */
164 {3510, 8}, /* MCS8 1SS long GI */
165 {2925, 7}, /* MCS7 1SS long GI */
166 {2633, 6}, /* MCS6 1SS long GI */
167 {2340, 5}, /* MCS5 1SS long GI */
168 {1755, 4}, /* MCS4 1SS long GI */
169 {1170, 3}, /* MCS3 1SS long GI */
170 {878, 2}, /* MCS2 1SS long GI */
171 {585, 1}, /* MCS1 1SS long GI */
172 {293, 0} /* MCS0 1SS long GI */
173};
174
175#define WMA_MAX_HT20_RATE_TBL_SIZE 8
176static wma_search_rate_t ht20_400ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
177 {722, 7}, /* MCS7 1SS short GI */
178 {650, 6}, /* MCS6 1SS short GI */
179 {578, 5}, /* MCS5 1SS short GI */
180 {433, 4}, /* MCS4 1SS short GI */
181 {289, 3}, /* MCS3 1SS short GI */
182 {217, 2}, /* MCS2 1SS short GI */
183 {144, 1}, /* MCS1 1SS short GI */
184 {72, 0} /* MCS0 1SS short GI */
185};
186
187static wma_search_rate_t ht20_800ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
188 {650, 7}, /* MCS7 1SS long GI */
189 {585, 6}, /* MCS6 1SS long GI */
190 {520, 5}, /* MCS5 1SS long GI */
191 {390, 4}, /* MCS4 1SS long GI */
192 {260, 3}, /* MCS3 1SS long GI */
193 {195, 2}, /* MCS2 1SS long GI */
194 {130, 1}, /* MCS1 1SS long GI */
195 {65, 0} /* MCS0 1SS long GI */
196};
197
198#define WMA_MAX_HT40_RATE_TBL_SIZE 8
199static wma_search_rate_t ht40_400ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
200 {1500, 7}, /* MCS7 1SS short GI */
201 {1350, 6}, /* MCS6 1SS short GI */
202 {1200, 5}, /* MCS5 1SS short GI */
203 {900, 4}, /* MCS4 1SS short GI */
204 {600, 3}, /* MCS3 1SS short GI */
205 {450, 2}, /* MCS2 1SS short GI */
206 {300, 1}, /* MCS1 1SS short GI */
207 {150, 0} /* MCS0 1SS short GI */
208};
209
210static wma_search_rate_t ht40_800ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
211 {1350, 7}, /* MCS7 1SS long GI */
212 {1215, 6}, /* MCS6 1SS long GI */
213 {1080, 5}, /* MCS5 1SS long GI */
214 {810, 4}, /* MCS4 1SS long GI */
215 {540, 3}, /* MCS3 1SS long GI */
216 {405, 2}, /* MCS2 1SS long GI */
217 {270, 1}, /* MCS1 1SS long GI */
218 {135, 0} /* MCS0 1SS long GI */
219};
220
221/**
222 * wma_bin_search_rate() - binary search function to find rate
223 * @tbl: rate table
224 * @tbl_size: table size
225 * @mbpsx10_rate: return mbps rate
226 * @ret_flag: return flag
227 *
228 * Return: none
229 */
230static void wma_bin_search_rate(wma_search_rate_t *tbl, int32_t tbl_size,
231 int32_t *mbpsx10_rate, uint8_t *ret_flag)
232{
233 int32_t upper, lower, mid;
234
235 /* the table is descenting. index holds the largest value and the
236 * bottom index holds the smallest value */
237
238 upper = 0; /* index 0 */
239 lower = tbl_size - 1; /* last index */
240
241 if (*mbpsx10_rate >= tbl[upper].rate) {
242 /* use the largest rate */
243 *mbpsx10_rate = tbl[upper].rate;
244 *ret_flag = tbl[upper].flag;
245 return;
246 } else if (*mbpsx10_rate <= tbl[lower].rate) {
247 /* use the smallest rate */
248 *mbpsx10_rate = tbl[lower].rate;
249 *ret_flag = tbl[lower].flag;
250 return;
251 }
252 /* now we do binery search to get the floor value */
253 while (lower - upper > 1) {
254 mid = (upper + lower) >> 1;
255 if (*mbpsx10_rate == tbl[mid].rate) {
256 /* found the exact match */
257 *mbpsx10_rate = tbl[mid].rate;
258 *ret_flag = tbl[mid].flag;
259 return;
260 } else {
261 /* not found. if mid's rate is larger than input move
262 * upper to mid. If mid's rate is larger than input
263 * move lower to mid.
264 */
265 if (*mbpsx10_rate > tbl[mid].rate)
266 lower = mid;
267 else
268 upper = mid;
269 }
270 }
271 /* after the bin search the index is the ceiling of rate */
272 *mbpsx10_rate = tbl[upper].rate;
273 *ret_flag = tbl[upper].flag;
274 return;
275}
276
277/**
278 * wma_fill_ofdm_cck_mcast_rate() - fill ofdm cck mcast rate
279 * @mbpsx10_rate: mbps rates
280 * @nss: nss
281 * @rate: rate
282 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530283 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800284 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530285static QDF_STATUS wma_fill_ofdm_cck_mcast_rate(int32_t mbpsx10_rate,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800286 uint8_t nss, uint8_t *rate)
287{
288 uint8_t idx = 0;
289 wma_bin_search_rate(ofdm_cck_rate_tbl, WMA_MAX_OFDM_CCK_RATE_TBL_SIZE,
290 &mbpsx10_rate, &idx);
291
292 /* if bit 7 is set it uses CCK */
293 if (idx & 0x80)
294 *rate |= (1 << 6) | (idx & 0xF); /* set bit 6 to 1 for CCK */
295 else
296 *rate |= (idx & 0xF);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530297 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800298}
299
300/**
301 * wma_set_ht_vht_mcast_rate() - set ht/vht mcast rate
302 * @shortgi: short gaurd interval
303 * @mbpsx10_rate: mbps rates
304 * @sgi_idx: shortgi index
305 * @sgi_rate: shortgi rate
306 * @lgi_idx: longgi index
307 * @lgi_rate: longgi rate
308 * @premable: preamble
309 * @rate: rate
310 * @streaming_rate: streaming rate
311 *
312 * Return: none
313 */
314static void wma_set_ht_vht_mcast_rate(uint32_t shortgi, int32_t mbpsx10_rate,
315 uint8_t sgi_idx, int32_t sgi_rate,
316 uint8_t lgi_idx, int32_t lgi_rate,
317 uint8_t premable, uint8_t *rate,
318 int32_t *streaming_rate)
319{
320 if (shortgi == 0) {
321 *rate |= (premable << 6) | (lgi_idx & 0xF);
322 *streaming_rate = lgi_rate;
323 } else {
324 *rate |= (premable << 6) | (sgi_idx & 0xF);
325 *streaming_rate = sgi_rate;
326 }
327}
328
329/**
330 * wma_fill_ht20_mcast_rate() - fill ht20 mcast rate
331 * @shortgi: short gaurd interval
332 * @mbpsx10_rate: mbps rates
333 * @nss: nss
334 * @rate: rate
335 * @streaming_rate: streaming rate
336 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530337 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800338 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530339static QDF_STATUS wma_fill_ht20_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 int32_t mbpsx10_rate, uint8_t nss,
341 uint8_t *rate,
342 int32_t *streaming_rate)
343{
344 uint8_t sgi_idx = 0, lgi_idx = 0;
345 int32_t sgi_rate, lgi_rate;
346 if (nss == 1)
347 mbpsx10_rate = mbpsx10_rate >> 1;
348
349 sgi_rate = mbpsx10_rate;
350 lgi_rate = mbpsx10_rate;
351 if (shortgi)
352 wma_bin_search_rate(ht20_400ns_rate_tbl,
353 WMA_MAX_HT20_RATE_TBL_SIZE, &sgi_rate,
354 &sgi_idx);
355 else
356 wma_bin_search_rate(ht20_800ns_rate_tbl,
357 WMA_MAX_HT20_RATE_TBL_SIZE, &lgi_rate,
358 &lgi_idx);
359
360 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
361 lgi_idx, lgi_rate, 2, rate, streaming_rate);
362 if (nss == 1)
363 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530364 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800365}
366
367/**
368 * wma_fill_ht40_mcast_rate() - fill ht40 mcast rate
369 * @shortgi: short gaurd interval
370 * @mbpsx10_rate: mbps rates
371 * @nss: nss
372 * @rate: rate
373 * @streaming_rate: streaming rate
374 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530375 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800376 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530377static QDF_STATUS wma_fill_ht40_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800378 int32_t mbpsx10_rate, uint8_t nss,
379 uint8_t *rate,
380 int32_t *streaming_rate)
381{
382 uint8_t sgi_idx = 0, lgi_idx = 0;
383 int32_t sgi_rate, lgi_rate;
384
385 /* for 2x2 divide the rate by 2 */
386 if (nss == 1)
387 mbpsx10_rate = mbpsx10_rate >> 1;
388
389 sgi_rate = mbpsx10_rate;
390 lgi_rate = mbpsx10_rate;
391 if (shortgi)
392 wma_bin_search_rate(ht40_400ns_rate_tbl,
393 WMA_MAX_HT40_RATE_TBL_SIZE, &sgi_rate,
394 &sgi_idx);
395 else
396 wma_bin_search_rate(ht40_800ns_rate_tbl,
397 WMA_MAX_HT40_RATE_TBL_SIZE, &lgi_rate,
398 &lgi_idx);
399
400 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
401 lgi_idx, lgi_rate, 2, rate, streaming_rate);
402
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530403 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800404}
405
406/**
407 * wma_fill_vht20_mcast_rate() - fill vht20 mcast rate
408 * @shortgi: short gaurd interval
409 * @mbpsx10_rate: mbps rates
410 * @nss: nss
411 * @rate: rate
412 * @streaming_rate: streaming rate
413 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530414 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800415 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530416static QDF_STATUS wma_fill_vht20_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800417 int32_t mbpsx10_rate, uint8_t nss,
418 uint8_t *rate,
419 int32_t *streaming_rate)
420{
421 uint8_t sgi_idx = 0, lgi_idx = 0;
422 int32_t sgi_rate, lgi_rate;
423
424 /* for 2x2 divide the rate by 2 */
425 if (nss == 1)
426 mbpsx10_rate = mbpsx10_rate >> 1;
427
428 sgi_rate = mbpsx10_rate;
429 lgi_rate = mbpsx10_rate;
430 if (shortgi)
431 wma_bin_search_rate(vht20_400ns_rate_tbl,
432 WMA_MAX_VHT20_RATE_TBL_SIZE, &sgi_rate,
433 &sgi_idx);
434 else
435 wma_bin_search_rate(vht20_800ns_rate_tbl,
436 WMA_MAX_VHT20_RATE_TBL_SIZE, &lgi_rate,
437 &lgi_idx);
438
439 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
440 lgi_idx, lgi_rate, 3, rate, streaming_rate);
441 if (nss == 1)
442 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530443 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800444}
445
446/**
447 * wma_fill_vht40_mcast_rate() - fill vht40 mcast rate
448 * @shortgi: short gaurd interval
449 * @mbpsx10_rate: mbps rates
450 * @nss: nss
451 * @rate: rate
452 * @streaming_rate: streaming rate
453 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530454 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800455 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530456static QDF_STATUS wma_fill_vht40_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800457 int32_t mbpsx10_rate, uint8_t nss,
458 uint8_t *rate,
459 int32_t *streaming_rate)
460{
461 uint8_t sgi_idx = 0, lgi_idx = 0;
462 int32_t sgi_rate, lgi_rate;
463
464 /* for 2x2 divide the rate by 2 */
465 if (nss == 1)
466 mbpsx10_rate = mbpsx10_rate >> 1;
467
468 sgi_rate = mbpsx10_rate;
469 lgi_rate = mbpsx10_rate;
470 if (shortgi)
471 wma_bin_search_rate(vht40_400ns_rate_tbl,
472 WMA_MAX_VHT40_RATE_TBL_SIZE, &sgi_rate,
473 &sgi_idx);
474 else
475 wma_bin_search_rate(vht40_800ns_rate_tbl,
476 WMA_MAX_VHT40_RATE_TBL_SIZE, &lgi_rate,
477 &lgi_idx);
478
479 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate,
480 sgi_idx, sgi_rate, lgi_idx, lgi_rate,
481 3, rate, streaming_rate);
482 if (nss == 1)
483 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530484 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800485}
486
487/**
488 * wma_fill_vht80_mcast_rate() - fill vht80 mcast rate
489 * @shortgi: short gaurd interval
490 * @mbpsx10_rate: mbps rates
491 * @nss: nss
492 * @rate: rate
493 * @streaming_rate: streaming rate
494 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530495 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800496 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530497static QDF_STATUS wma_fill_vht80_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800498 int32_t mbpsx10_rate, uint8_t nss,
499 uint8_t *rate,
500 int32_t *streaming_rate)
501{
502 uint8_t sgi_idx = 0, lgi_idx = 0;
503 int32_t sgi_rate, lgi_rate;
504
505 /* for 2x2 divide the rate by 2 */
506 if (nss == 1)
507 mbpsx10_rate = mbpsx10_rate >> 1;
508
509 sgi_rate = mbpsx10_rate;
510 lgi_rate = mbpsx10_rate;
511 if (shortgi)
512 wma_bin_search_rate(vht80_400ns_rate_tbl,
513 WMA_MAX_VHT80_RATE_TBL_SIZE, &sgi_rate,
514 &sgi_idx);
515 else
516 wma_bin_search_rate(vht80_800ns_rate_tbl,
517 WMA_MAX_VHT80_RATE_TBL_SIZE, &lgi_rate,
518 &lgi_idx);
519
520 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
521 lgi_idx, lgi_rate, 3, rate, streaming_rate);
522 if (nss == 1)
523 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530524 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800525}
526
527/**
528 * wma_fill_ht_mcast_rate() - fill ht mcast rate
529 * @shortgi: short gaurd interval
530 * @chwidth: channel width
531 * @chanmode: channel mode
532 * @mhz: frequency
533 * @mbpsx10_rate: mbps rates
534 * @nss: nss
535 * @rate: rate
536 * @streaming_rate: streaming rate
537 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530538 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800539 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530540static QDF_STATUS wma_fill_ht_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800541 uint32_t chwidth, int32_t mbpsx10_rate,
542 uint8_t nss, WLAN_PHY_MODE chanmode,
543 uint8_t *rate,
544 int32_t *streaming_rate)
545{
546 int32_t ret = 0;
547
548 *streaming_rate = 0;
549 if (chwidth == 0)
550 ret = wma_fill_ht20_mcast_rate(shortgi, mbpsx10_rate,
551 nss, rate, streaming_rate);
552 else if (chwidth == 1)
553 ret = wma_fill_ht40_mcast_rate(shortgi, mbpsx10_rate,
554 nss, rate, streaming_rate);
555 else
556 WMA_LOGE("%s: Error, Invalid chwidth enum %d", __func__,
557 chwidth);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530558 return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800559}
560
561/**
562 * wma_fill_vht_mcast_rate() - fill vht mcast rate
563 * @shortgi: short gaurd interval
564 * @chwidth: channel width
565 * @chanmode: channel mode
566 * @mhz: frequency
567 * @mbpsx10_rate: mbps rates
568 * @nss: nss
569 * @rate: rate
570 * @streaming_rate: streaming rate
571 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530572 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530574static QDF_STATUS wma_fill_vht_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800575 uint32_t chwidth,
576 int32_t mbpsx10_rate, uint8_t nss,
577 WLAN_PHY_MODE chanmode,
578 uint8_t *rate,
579 int32_t *streaming_rate)
580{
581 int32_t ret = 0;
582
583 *streaming_rate = 0;
584 if (chwidth == 0)
585 ret = wma_fill_vht20_mcast_rate(shortgi, mbpsx10_rate, nss,
586 rate, streaming_rate);
587 else if (chwidth == 1)
588 ret = wma_fill_vht40_mcast_rate(shortgi, mbpsx10_rate, nss,
589 rate, streaming_rate);
590 else if (chwidth == 2)
591 ret = wma_fill_vht80_mcast_rate(shortgi, mbpsx10_rate, nss,
592 rate, streaming_rate);
593 else
594 WMA_LOGE("%s: chwidth enum %d not supported",
595 __func__, chwidth);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530596 return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800597}
598
599#define WMA_MCAST_1X1_CUT_OFF_RATE 2000
600/**
601 * wma_encode_mc_rate() - fill mc rates
602 * @shortgi: short gaurd interval
603 * @chwidth: channel width
604 * @chanmode: channel mode
605 * @mhz: frequency
606 * @mbpsx10_rate: mbps rates
607 * @nss: nss
608 * @rate: rate
609 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530610 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800611 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530612static QDF_STATUS wma_encode_mc_rate(uint32_t shortgi, uint32_t chwidth,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800613 WLAN_PHY_MODE chanmode, A_UINT32 mhz,
614 int32_t mbpsx10_rate, uint8_t nss,
615 uint8_t *rate)
616{
617 int32_t ret = 0;
618
619 /* nss input value: 0 - 1x1; 1 - 2x2; 2 - 3x3
620 * the phymode selection is based on following assumption:
621 * (1) if the app specifically requested 1x1 or 2x2 we hornor it
622 * (2) if mbpsx10_rate <= 540: always use BG
623 * (3) 540 < mbpsx10_rate <= 2000: use 1x1 HT/VHT
624 * (4) 2000 < mbpsx10_rate: use 2x2 HT/VHT
625 */
626 WMA_LOGE("%s: Input: nss = %d, chanmode = %d, "
627 "mbpsx10 = 0x%x, chwidth = %d, shortgi = %d",
628 __func__, nss, chanmode, mbpsx10_rate, chwidth, shortgi);
629 if ((mbpsx10_rate & 0x40000000) && nss > 0) {
630 /* bit 30 indicates user inputed nss,
631 * bit 28 and 29 used to encode nss
632 */
633 uint8_t user_nss = (mbpsx10_rate & 0x30000000) >> 28;
634
635 nss = (user_nss < nss) ? user_nss : nss;
636 /* zero out bits 19 - 21 to recover the actual rate */
637 mbpsx10_rate &= ~0x70000000;
638 } else if (mbpsx10_rate <= WMA_MCAST_1X1_CUT_OFF_RATE) {
639 /* if the input rate is less or equal to the
640 * 1x1 cutoff rate we use 1x1 only
641 */
642 nss = 0;
643 }
644 /* encode NSS bits (bit 4, bit 5) */
645 *rate = (nss & 0x3) << 4;
646 /* if mcast input rate exceeds the ofdm/cck max rate 54mpbs
647 * we try to choose best ht/vht mcs rate
648 */
649 if (540 < mbpsx10_rate) {
650 /* cannot use ofdm/cck, choose closest ht/vht mcs rate */
651 uint8_t rate_ht = *rate;
652 uint8_t rate_vht = *rate;
653 int32_t stream_rate_ht = 0;
654 int32_t stream_rate_vht = 0;
655 int32_t stream_rate = 0;
656
657 ret = wma_fill_ht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
658 nss, chanmode, &rate_ht,
659 &stream_rate_ht);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530660 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800661 stream_rate_ht = 0;
662 }
663 if (mhz < WMA_2_4_GHZ_MAX_FREQ) {
664 /* not in 5 GHZ frequency */
665 *rate = rate_ht;
666 stream_rate = stream_rate_ht;
667 goto ht_vht_done;
668 }
669 /* capable doing 11AC mcast so that search vht tables */
670 ret = wma_fill_vht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
671 nss, chanmode, &rate_vht,
672 &stream_rate_vht);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530673 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800674 if (stream_rate_ht != 0)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530675 ret = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800676 *rate = rate_ht;
677 stream_rate = stream_rate_ht;
678 goto ht_vht_done;
679 }
680 if (stream_rate_ht == 0) {
681 /* only vht rate available */
682 *rate = rate_vht;
683 stream_rate = stream_rate_vht;
684 } else {
685 /* set ht as default first */
686 *rate = rate_ht;
687 stream_rate = stream_rate_ht;
688 if (stream_rate < mbpsx10_rate) {
689 if (mbpsx10_rate <= stream_rate_vht ||
690 stream_rate < stream_rate_vht) {
691 *rate = rate_vht;
692 stream_rate = stream_rate_vht;
693 }
694 } else {
695 if (stream_rate_vht >= mbpsx10_rate &&
696 stream_rate_vht < stream_rate) {
697 *rate = rate_vht;
698 stream_rate = stream_rate_vht;
699 }
700 }
701 }
702ht_vht_done:
703 WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, "
704 "freq = %d, input_rate = %d, chwidth = %d "
705 "rate = 0x%x, streaming_rate = %d",
706 __func__, nss, chanmode, mhz,
707 mbpsx10_rate, chwidth, *rate, stream_rate);
708 } else {
709 if (mbpsx10_rate > 0)
710 ret = wma_fill_ofdm_cck_mcast_rate(mbpsx10_rate,
711 nss, rate);
712 else
713 *rate = 0xFF;
714
715 WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, "
716 "input_rate = %d, rate = 0x%x",
717 __func__, nss, chanmode, mbpsx10_rate, *rate);
718 }
719 return ret;
720}
721
722/**
723 * wma_set_bss_rate_flags() - set rate flags based on BSS capability
724 * @iface: txrx_node ctx
725 * @add_bss: add_bss params
726 *
727 * Return: none
728 */
729void wma_set_bss_rate_flags(struct wma_txrx_node *iface,
730 tpAddBssParams add_bss)
731{
732 iface->rate_flags = 0;
733
734 if (add_bss->vhtCapable) {
735 if (add_bss->ch_width == CH_WIDTH_80P80MHZ)
736 iface->rate_flags |= eHAL_TX_RATE_VHT80;
737 if (add_bss->ch_width == CH_WIDTH_160MHZ)
738 iface->rate_flags |= eHAL_TX_RATE_VHT80;
739 if (add_bss->ch_width == CH_WIDTH_80MHZ)
740 iface->rate_flags |= eHAL_TX_RATE_VHT80;
741 else if (add_bss->ch_width)
742 iface->rate_flags |= eHAL_TX_RATE_VHT40;
743 else
744 iface->rate_flags |= eHAL_TX_RATE_VHT20;
745 }
746 /* avoid to conflict with htCapable flag */
747 else if (add_bss->htCapable) {
748 if (add_bss->ch_width)
749 iface->rate_flags |= eHAL_TX_RATE_HT40;
750 else
751 iface->rate_flags |= eHAL_TX_RATE_HT20;
752 }
753
754 if (add_bss->staContext.fShortGI20Mhz ||
755 add_bss->staContext.fShortGI40Mhz)
756 iface->rate_flags |= eHAL_TX_RATE_SGI;
757
758 if (!add_bss->htCapable && !add_bss->vhtCapable)
759 iface->rate_flags = eHAL_TX_RATE_LEGACY;
760}
761
762/**
763 * wmi_unified_send_txbf() - set txbf parameter to fw
764 * @wma: wma handle
765 * @params: txbf parameters
766 *
767 * Return: 0 for success or error code
768 */
769int32_t wmi_unified_send_txbf(tp_wma_handle wma, tpAddStaParams params)
770{
771 wmi_vdev_txbf_en txbf_en;
772
773 /* This is set when Other partner is Bformer
774 * and we are capable bformee(enabled both in ini and fw)
775 */
776 txbf_en.sutxbfee = params->vhtTxBFCapable;
777 txbf_en.mutxbfee = params->vhtTxMUBformeeCapable;
778 txbf_en.sutxbfer = params->enable_su_tx_bformer;
779 txbf_en.mutxbfer = 0;
780
781 /* When MU TxBfee is set, SU TxBfee must be set by default */
782 if (txbf_en.mutxbfee)
783 txbf_en.sutxbfee = txbf_en.mutxbfee;
784
785 WMA_LOGD("txbf_en.sutxbfee %d txbf_en.mutxbfee %d, sutxbfer %d",
786 txbf_en.sutxbfee, txbf_en.mutxbfee, txbf_en.sutxbfer);
787
Govind Singhd76a5b02016-03-08 15:12:14 +0530788 return wma_vdev_set_param(wma->wmi_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800789 params->smesessionId,
790 WMI_VDEV_PARAM_TXBF,
791 *((A_UINT8 *) &txbf_en));
792}
793
794/**
795 * wma_data_tx_ack_work_handler() - process data tx ack
796 * @ack_work: work structure
797 *
798 * Return: none
799 */
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -0800800static void wma_data_tx_ack_work_handler(void *ack_work)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800801{
802 struct wma_tx_ack_work_ctx *work;
803 tp_wma_handle wma_handle;
804 pWMAAckFnTxComp ack_cb;
805
Rajeev Kumarfec3dbe2016-01-19 15:23:52 -0800806 if (cds_is_load_or_unload_in_progress()) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800807 WMA_LOGE("%s: Driver load/unload in progress", __func__);
808 return;
809 }
810
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -0800811 work = (struct wma_tx_ack_work_ctx *)ack_work;
812
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800813 wma_handle = work->wma_handle;
814 ack_cb = wma_handle->umac_data_ota_ack_cb;
815
816 if (work->status)
817 WMA_LOGE("Data Tx Ack Cb Status %d", work->status);
818 else
819 WMA_LOGD("Data Tx Ack Cb Status %d", work->status);
820
821 /* Call the Ack Cb registered by UMAC */
822 if (ack_cb)
823 ack_cb((tpAniSirGlobal) (wma_handle->mac_context),
824 work->status ? 0 : 1);
825 else
826 WMA_LOGE("Data Tx Ack Cb is NULL");
827
828 wma_handle->umac_data_ota_ack_cb = NULL;
829 wma_handle->last_umac_data_nbuf = NULL;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530830 qdf_mem_free(work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800831 wma_handle->ack_work_ctx = NULL;
832}
833
834/**
835 * wma_data_tx_ack_comp_hdlr() - handles tx data ack completion
836 * @context: context with which the handler is registered
837 * @netbuf: tx data nbuf
838 * @err: status of tx completion
839 *
840 * This is the cb registered with TxRx for
841 * Ack Complete
842 *
843 * Return: none
844 */
845void
Nirav Shahcbc6d722016-03-01 16:24:53 +0530846wma_data_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800847{
848 ol_txrx_pdev_handle pdev;
849 tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
850
851 if (NULL == wma_handle) {
852 WMA_LOGE("%s: Invalid WMA Handle", __func__);
853 return;
854 }
855
Anurag Chouhan6d760662016-02-20 16:05:43 +0530856 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800857
858 if (NULL == pdev) {
859 WMA_LOGE("%s: Failed to get pdev", __func__);
860 return;
861 }
862
863 /*
864 * if netBuf does not match with pending nbuf then just free the
865 * netbuf and do not call ack cb
866 */
867 if (wma_handle->last_umac_data_nbuf != netbuf) {
868 if (wma_handle->umac_data_ota_ack_cb) {
869 WMA_LOGE("%s: nbuf does not match but umac_data_ota_ack_cb is not null",
870 __func__);
871 } else {
872 WMA_LOGE("%s: nbuf does not match and umac_data_ota_ack_cb is also null",
873 __func__);
874 }
875 goto free_nbuf;
876 }
877
878 if (wma_handle && wma_handle->umac_data_ota_ack_cb) {
879 struct wma_tx_ack_work_ctx *ack_work;
880
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530881 ack_work = qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800882 wma_handle->ack_work_ctx = ack_work;
883 if (ack_work) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800884 ack_work->wma_handle = wma_handle;
885 ack_work->sub_type = 0;
886 ack_work->status = status;
887
Anurag Chouhan42958bb2016-02-19 15:43:11 +0530888 qdf_create_work(0, &ack_work->ack_cmp_work,
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -0800889 wma_data_tx_ack_work_handler,
890 ack_work);
Anurag Chouhan42958bb2016-02-19 15:43:11 +0530891 qdf_sched_work(0, &ack_work->ack_cmp_work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800892 }
893 }
894
895free_nbuf:
896 /* unmap and freeing the tx buf as txrx is not taking care */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530897 qdf_nbuf_unmap_single(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
898 qdf_nbuf_free(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800899}
900
901/**
902 * wma_update_txrx_chainmask() - update txrx chainmask
903 * @num_rf_chains: number rf chains
904 * @cmd_value: command value
905 *
906 * Return: none
907 */
908void wma_update_txrx_chainmask(int num_rf_chains, int *cmd_value)
909{
910 if (*cmd_value > WMA_MAX_RF_CHAINS(num_rf_chains)) {
911 WMA_LOGE("%s: Chainmask value exceeds the maximum"
912 " supported range setting it to"
913 " maximum value. Requested value %d"
914 " Updated value %d", __func__, *cmd_value,
915 WMA_MAX_RF_CHAINS(num_rf_chains));
916 *cmd_value = WMA_MAX_RF_CHAINS(num_rf_chains);
917 } else if (*cmd_value < WMA_MIN_RF_CHAINS) {
918 WMA_LOGE("%s: Chainmask value is less than the minimum"
919 " supported range setting it to"
920 " minimum value. Requested value %d"
921 " Updated value %d", __func__, *cmd_value,
922 WMA_MIN_RF_CHAINS);
923 *cmd_value = WMA_MIN_RF_CHAINS;
924 }
925}
926
927/**
928 * wma_peer_state_change_event_handler() - peer state change event handler
929 * @handle: wma handle
930 * @event_buff: event buffer
931 * @len: length of buffer
932 *
933 * This event handler unpauses vdev if peer state change to AUTHORIZED STATE
934 *
935 * Return: 0 for success or error code
936 */
937int wma_peer_state_change_event_handler(void *handle,
938 uint8_t *event_buff,
939 uint32_t len)
940{
941 WMI_PEER_STATE_EVENTID_param_tlvs *param_buf;
942 wmi_peer_state_event_fixed_param *event;
943 ol_txrx_vdev_handle vdev;
944 tp_wma_handle wma_handle = (tp_wma_handle) handle;
945
946 if (!event_buff) {
947 WMA_LOGE("%s: Received NULL event ptr from FW", __func__);
948 return -EINVAL;
949 }
950 param_buf = (WMI_PEER_STATE_EVENTID_param_tlvs *) event_buff;
951 if (!param_buf) {
952 WMA_LOGE("%s: Received NULL buf ptr from FW", __func__);
953 return -ENOMEM;
954 }
955
956 event = param_buf->fixed_param;
957 vdev = wma_find_vdev_by_id(wma_handle, event->vdev_id);
958 if (NULL == vdev) {
959 WMA_LOGP("%s: Couldn't find vdev for vdev_id: %d",
960 __func__, event->vdev_id);
961 return -EINVAL;
962 }
963
964 if (vdev->opmode == wlan_op_mode_sta
965 && event->state == WMI_PEER_STATE_AUTHORIZED) {
966 /*
967 * set event so that hdd
968 * can procced and unpause tx queue
969 */
970#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
971 if (!wma_handle->peer_authorized_cb) {
972 WMA_LOGE("%s: peer authorized cb not registered",
973 __func__);
974 return -EINVAL;
975 }
976 wma_handle->peer_authorized_cb(vdev->vdev_id);
977#endif
978 }
979
980 return 0;
981}
982
983/**
984 * wma_set_enable_disable_mcc_adaptive_scheduler() -enable/disable mcc scheduler
985 * @mcc_adaptive_scheduler: enable/disable
986 *
987 * This function enable/disable mcc adaptive scheduler in fw.
988 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530989 * Return: QDF_STATUS_SUCCESS for sucess or error code
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800990 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530991QDF_STATUS wma_set_enable_disable_mcc_adaptive_scheduler(uint32_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800992 mcc_adaptive_scheduler)
993{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800994 tp_wma_handle wma = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800995
Anurag Chouhan6d760662016-02-20 16:05:43 +0530996 wma = cds_get_context(QDF_MODULE_ID_WMA);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800997 if (NULL == wma) {
998 WMA_LOGE("%s : Failed to get wma", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530999 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001000 }
1001
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301002 return wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd(
1003 wma->wmi_handle, mcc_adaptive_scheduler);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001004}
1005
1006/**
1007 * wma_set_mcc_channel_time_latency() -set MCC channel time latency
1008 * @wma: wma handle
1009 * @mcc_channel: mcc channel
1010 * @mcc_channel_time_latency: MCC channel time latency.
1011 *
1012 * Currently used to set time latency for an MCC vdev/adapter using operating
1013 * channel of it and channel number. The info is provided run time using
1014 * iwpriv command: iwpriv <wlan0 | p2p0> setMccLatency <latency in ms>.
1015 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301016 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001017 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301018QDF_STATUS wma_set_mcc_channel_time_latency
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001019 (tp_wma_handle wma,
1020 uint32_t mcc_channel, uint32_t mcc_channel_time_latency)
1021{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001022 uint32_t cfg_val = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001023 struct sAniSirGlobal *pMac = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001024 uint32_t channel1 = mcc_channel;
1025 uint32_t chan1_freq = cds_chan_to_freq(channel1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001026
1027 if (!wma) {
1028 WMA_LOGE("%s:NULL wma ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301029 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301030 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001031 }
Anurag Chouhan6d760662016-02-20 16:05:43 +05301032 pMac = cds_get_context(QDF_MODULE_ID_PE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001033 if (!pMac) {
1034 WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301035 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301036 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001037 }
1038
1039 /* First step is to confirm if MCC is active */
1040 if (!lim_is_in_mcc(pMac)) {
1041 WMA_LOGE("%s: MCC is not active. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301042 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301043 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001044 }
1045 /* Confirm MCC adaptive scheduler feature is disabled */
1046 if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED,
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301047 &cfg_val) == eSIR_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001048 if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) {
1049 WMA_LOGD("%s: Can't set channel latency while MCC "
1050 "ADAPTIVE SCHED is enabled. Exit", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301051 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001052 }
1053 } else {
1054 WMA_LOGE("%s: Failed to get value for MCC_ADAPTIVE_SCHED, "
1055 "Exit w/o setting latency", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301056 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301057 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001058 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001059
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301060 return wmi_unified_set_mcc_channel_time_latency_cmd(wma->wmi_handle,
1061 chan1_freq,
1062 mcc_channel_time_latency);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001063}
1064
1065/**
1066 * wma_set_mcc_channel_time_quota() -set MCC channel time quota
1067 * @wma: wma handle
1068 * @adapter_1_chan_number: adapter 1 channel number
1069 * @adapter_1_quota: adapter 1 quota
1070 * @adapter_2_chan_number: adapter 2 channel number
1071 *
1072 * Currently used to set time quota for 2 MCC vdevs/adapters using (operating
1073 * channel, quota) for each mode . The info is provided run time using
1074 * iwpriv command: iwpriv <wlan0 | p2p0> setMccQuota <quota in ms>.
1075 * Note: the quota provided in command is for the same mode in cmd. HDD
1076 * checks if MCC mode is active, gets the second mode and its operating chan.
1077 * Quota for the 2nd role is calculated as 100 - quota of first mode.
1078 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301079 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001080 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301081QDF_STATUS wma_set_mcc_channel_time_quota
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001082 (tp_wma_handle wma,
1083 uint32_t adapter_1_chan_number,
1084 uint32_t adapter_1_quota, uint32_t adapter_2_chan_number)
1085{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001086 uint32_t cfg_val = 0;
1087 struct sAniSirGlobal *pMac = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001088 uint32_t chan1_freq = cds_chan_to_freq(adapter_1_chan_number);
1089 uint32_t chan2_freq = cds_chan_to_freq(adapter_2_chan_number);
1090
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001091 if (!wma) {
1092 WMA_LOGE("%s:NULL wma ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301093 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301094 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001095 }
Anurag Chouhan6d760662016-02-20 16:05:43 +05301096 pMac = cds_get_context(QDF_MODULE_ID_PE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001097 if (!pMac) {
1098 WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301099 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301100 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001101 }
1102
1103 /* First step is to confirm if MCC is active */
1104 if (!lim_is_in_mcc(pMac)) {
1105 WMA_LOGD("%s: MCC is not active. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301106 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301107 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001108 }
1109
1110 /* Confirm MCC adaptive scheduler feature is disabled */
1111 if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED,
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301112 &cfg_val) == eSIR_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001113 if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) {
1114 WMA_LOGD("%s: Can't set channel quota while "
1115 "MCC_ADAPTIVE_SCHED is enabled. Exit",
1116 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301117 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001118 }
1119 } else {
1120 WMA_LOGE("%s: Failed to retrieve "
1121 "WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED. Exit", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301122 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301123 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001124 }
1125
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301126 return wmi_unified_set_mcc_channel_time_quota_cmd(wma->wmi_handle,
1127 chan1_freq,
1128 adapter_1_quota,
1129 chan2_freq);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001130}
1131
1132/**
1133 * wma_set_linkstate() - set wma linkstate
1134 * @wma: wma handle
1135 * @params: link state params
1136 *
1137 * Return: none
1138 */
1139void wma_set_linkstate(tp_wma_handle wma, tpLinkStateParams params)
1140{
1141 ol_txrx_pdev_handle pdev;
1142 ol_txrx_vdev_handle vdev;
1143 ol_txrx_peer_handle peer;
1144 uint8_t vdev_id, peer_id;
1145 bool roam_synch_in_progress = false;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301146 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001147
1148 params->status = true;
1149 WMA_LOGD("%s: state %d selfmac %pM", __func__,
1150 params->state, params->selfMacAddr);
1151 if ((params->state != eSIR_LINK_PREASSOC_STATE) &&
1152 (params->state != eSIR_LINK_DOWN_STATE)) {
1153 WMA_LOGD("%s: unsupported link state %d",
1154 __func__, params->state);
1155 goto out;
1156 }
1157
Anurag Chouhan6d760662016-02-20 16:05:43 +05301158 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001159
1160 if (NULL == pdev) {
1161 WMA_LOGE("%s: Unable to get TXRX context", __func__);
1162 goto out;
1163 }
1164
1165 vdev = wma_find_vdev_by_addr(wma, params->selfMacAddr, &vdev_id);
1166 if (!vdev) {
1167 WMA_LOGP("%s: vdev not found for addr: %pM",
1168 __func__, params->selfMacAddr);
1169 goto out;
1170 }
1171
1172 if (wma_is_vdev_in_ap_mode(wma, vdev_id)) {
1173 WMA_LOGD("%s: Ignoring set link req in ap mode", __func__);
1174 goto out;
1175 }
1176
1177 if (params->state == eSIR_LINK_PREASSOC_STATE) {
Varun Reddy Yeturud5939f82015-12-24 18:14:02 -08001178 if (wma_is_roam_synch_in_progress(wma, vdev_id))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001179 roam_synch_in_progress = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001180 status = wma_create_peer(wma, pdev, vdev, params->bssid,
1181 WMI_PEER_TYPE_DEFAULT, vdev_id,
1182 roam_synch_in_progress);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301183 if (status != QDF_STATUS_SUCCESS)
Varun Reddy Yeturud5939f82015-12-24 18:14:02 -08001184 WMA_LOGE("%s: Unable to create peer", __func__);
1185 if (roam_synch_in_progress)
1186 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001187 } else {
1188 WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP",
1189 __func__, vdev_id);
1190 ol_txrx_vdev_pause(wma->interfaces[vdev_id].handle,
1191 OL_TXQ_PAUSE_REASON_VDEV_STOP);
1192 wma->interfaces[vdev_id].pause_bitmap |= (1 << PAUSE_TYPE_HOST);
1193 if (wmi_unified_vdev_stop_send(wma->wmi_handle, vdev_id)) {
1194 WMA_LOGP("%s: %d Failed to send vdev stop",
1195 __func__, __LINE__);
1196 }
1197 peer = ol_txrx_find_peer_by_addr(pdev, params->bssid, &peer_id);
1198 if (peer) {
1199 WMA_LOGP("%s: Deleting peer %pM vdev id %d",
1200 __func__, params->bssid, vdev_id);
1201 wma_remove_peer(wma, params->bssid, vdev_id, peer,
1202 roam_synch_in_progress);
1203 }
1204 }
1205out:
1206 wma_send_msg(wma, WMA_SET_LINK_STATE_RSP, (void *)params, 0);
1207}
1208
1209/**
1210 * wma_unpause_vdev - unpause all vdev
1211 * @wma: wma handle
1212 *
1213 * unpause all vdev aftter resume/coming out of wow mode
1214 *
1215 * Return: none
1216 */
1217void wma_unpause_vdev(tp_wma_handle wma)
1218{
1219 int8_t vdev_id;
1220 struct wma_txrx_node *iface;
1221
1222 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
1223 if (!wma->interfaces[vdev_id].handle)
1224 continue;
1225
1226#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
1227 /* When host resume, by default, unpause all active vdev */
1228 if (wma->interfaces[vdev_id].pause_bitmap) {
1229 ol_txrx_vdev_unpause(wma->interfaces[vdev_id].handle,
1230 0xffffffff);
1231 wma->interfaces[vdev_id].pause_bitmap = 0;
1232 }
1233#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1234
1235 iface = &wma->interfaces[vdev_id];
1236 iface->conn_state = false;
1237 }
1238}
1239
1240/**
1241 * wma_process_rate_update_indate() - rate update indication
1242 * @wma: wma handle
1243 * @pRateUpdateParams: Rate update params
1244 *
1245 * This function update rate & short GI interval to fw based on params
1246 * send by SME.
1247 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301248 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001249 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301250QDF_STATUS wma_process_rate_update_indicate(tp_wma_handle wma,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001251 tSirRateUpdateInd *
1252 pRateUpdateParams)
1253{
1254 int32_t ret = 0;
1255 uint8_t vdev_id = 0;
1256 void *pdev;
1257 int32_t mbpsx10_rate = -1;
1258 uint32_t paramId;
1259 uint8_t rate = 0;
1260 uint32_t short_gi;
1261 struct wma_txrx_node *intr = wma->interfaces;
Govind Singhd76a5b02016-03-08 15:12:14 +05301262 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001263
1264 /* Get the vdev id */
Srinivas Girigowdaafede182015-11-18 22:36:12 -08001265 pdev = wma_find_vdev_by_addr(wma, pRateUpdateParams->bssid.bytes,
1266 &vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001267 if (!pdev) {
1268 WMA_LOGE("vdev handle is invalid for %pM",
Srinivas Girigowdaafede182015-11-18 22:36:12 -08001269 pRateUpdateParams->bssid.bytes);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301270 qdf_mem_free(pRateUpdateParams);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301271 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001272 }
1273 short_gi = intr[vdev_id].config.shortgi;
1274 if (short_gi == 0)
1275 short_gi = (intr[vdev_id].rate_flags & eHAL_TX_RATE_SGI) ?
1276 true : false;
1277 /* first check if reliable TX mcast rate is used. If not check the bcast.
1278 * Then is mcast. Mcast rate is saved in mcastDataRate24GHz
1279 */
1280 if (pRateUpdateParams->reliableMcastDataRateTxFlag > 0) {
1281 mbpsx10_rate = pRateUpdateParams->reliableMcastDataRate;
1282 paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE;
1283 if (pRateUpdateParams->
1284 reliableMcastDataRateTxFlag & eHAL_TX_RATE_SGI)
1285 short_gi = 1; /* upper layer specified short GI */
1286 } else if (pRateUpdateParams->bcastDataRate > -1) {
1287 mbpsx10_rate = pRateUpdateParams->bcastDataRate;
1288 paramId = WMI_VDEV_PARAM_BCAST_DATA_RATE;
1289 } else {
1290 mbpsx10_rate = pRateUpdateParams->mcastDataRate24GHz;
1291 paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE;
1292 if (pRateUpdateParams->
1293 mcastDataRate24GHzTxFlag & eHAL_TX_RATE_SGI)
1294 short_gi = 1; /* upper layer specified short GI */
1295 }
1296 WMA_LOGE("%s: dev_id = %d, dev_type = %d, dev_mode = %d, "
1297 "mac = %pM, config.shortgi = %d, rate_flags = 0x%x",
1298 __func__, vdev_id, intr[vdev_id].type,
Srinivas Girigowdaafede182015-11-18 22:36:12 -08001299 pRateUpdateParams->dev_mode, pRateUpdateParams->bssid.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001300 intr[vdev_id].config.shortgi, intr[vdev_id].rate_flags);
1301 ret = wma_encode_mc_rate(short_gi, intr[vdev_id].config.chwidth,
1302 intr[vdev_id].chanmode, intr[vdev_id].mhz,
1303 mbpsx10_rate, pRateUpdateParams->nss, &rate);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301304 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001305 WMA_LOGE("%s: Error, Invalid input rate value", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301306 qdf_mem_free(pRateUpdateParams);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001307 return ret;
1308 }
Govind Singhd76a5b02016-03-08 15:12:14 +05301309 status = wma_vdev_set_param(wma->wmi_handle, vdev_id,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001310 WMI_VDEV_PARAM_SGI, short_gi);
Govind Singhd76a5b02016-03-08 15:12:14 +05301311 if (QDF_IS_STATUS_ERROR(status)) {
1312 WMA_LOGE("%s: Failed to Set WMI_VDEV_PARAM_SGI (%d), status = %d",
1313 __func__, short_gi, status);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301314 qdf_mem_free(pRateUpdateParams);
Govind Singhd76a5b02016-03-08 15:12:14 +05301315 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001316 }
Govind Singhd76a5b02016-03-08 15:12:14 +05301317 status = wma_vdev_set_param(wma->wmi_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001318 vdev_id, paramId, rate);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301319 qdf_mem_free(pRateUpdateParams);
Govind Singhd76a5b02016-03-08 15:12:14 +05301320 if (QDF_IS_STATUS_ERROR(status)) {
1321 WMA_LOGE("%s: Failed to Set rate, status = %d", __func__, status);
1322 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001323 }
1324
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301325 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001326}
1327
1328/**
1329 * wma_mgmt_tx_ack_work_handler() - mgmt tx ack work queue
1330 * @ack_work: work structure
1331 *
1332 * Return: none
1333 */
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -08001334static void wma_mgmt_tx_ack_work_handler(void *ack_work)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001335{
1336 struct wma_tx_ack_work_ctx *work;
1337 tp_wma_handle wma_handle;
1338 pWMAAckFnTxComp ack_cb;
1339
Rajeev Kumarfec3dbe2016-01-19 15:23:52 -08001340 if (cds_is_load_or_unload_in_progress()) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001341 WMA_LOGE("%s: Driver load/unload in progress", __func__);
1342 return;
1343 }
1344
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -08001345 work = (struct wma_tx_ack_work_ctx *)ack_work;
1346
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001347 wma_handle = work->wma_handle;
1348 ack_cb = wma_handle->umac_ota_ack_cb[work->sub_type];
1349
1350 WMA_LOGD("Tx Ack Cb SubType %d Status %d",
1351 work->sub_type, work->status);
1352
1353 /* Call the Ack Cb registered by UMAC */
1354 ack_cb((tpAniSirGlobal) (wma_handle->mac_context),
1355 work->status ? 0 : 1);
1356
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301357 qdf_mem_free(work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001358 wma_handle->ack_work_ctx = NULL;
1359}
1360
1361/**
1362 * wma_mgmt_tx_comp_conf_ind() - Post mgmt tx complete indication to PE.
1363 * @wma_handle: Pointer to WMA handle
1364 * @sub_type: Tx mgmt frame sub type
1365 * @status: Mgmt frame tx status
1366 *
1367 * This function sends mgmt complition confirmation to PE for deauth
1368 * and deassoc frames.
1369 *
1370 * Return: none
1371 */
1372static void
1373wma_mgmt_tx_comp_conf_ind(tp_wma_handle wma_handle, uint8_t sub_type,
1374 int32_t status)
1375{
1376 int32_t tx_comp_status;
1377
1378 tx_comp_status = status ? 0 : 1;
1379 if (sub_type == SIR_MAC_MGMT_DISASSOC) {
1380 wma_send_msg(wma_handle, WMA_DISASSOC_TX_COMP, NULL,
1381 tx_comp_status);
1382 } else if (sub_type == SIR_MAC_MGMT_DEAUTH) {
1383 wma_send_msg(wma_handle, WMA_DEAUTH_TX_COMP, NULL,
1384 tx_comp_status);
1385 }
1386}
1387
1388/**
1389 * wma_mgmt_tx_ack_comp_hdlr() - handles tx ack mgmt completion
1390 * @context: context with which the handler is registered
1391 * @netbuf: tx mgmt nbuf
1392 * @status: status of tx completion
1393 *
1394 * This is callback registered with TxRx for
1395 * Ack Complete.
1396 *
1397 * Return: none
1398 */
1399static void
Nirav Shahcbc6d722016-03-01 16:24:53 +05301400wma_mgmt_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001401{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301402 tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(netbuf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001403 tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1404
1405 if (wma_handle && wma_handle->umac_ota_ack_cb[pFc->subType]) {
1406 if ((pFc->subType == SIR_MAC_MGMT_DISASSOC) ||
1407 (pFc->subType == SIR_MAC_MGMT_DEAUTH)) {
1408 wma_mgmt_tx_comp_conf_ind(wma_handle,
1409 (uint8_t) pFc->subType,
1410 status);
1411 } else {
1412 struct wma_tx_ack_work_ctx *ack_work;
1413
1414 ack_work =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301415 qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001416
1417 if (ack_work) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001418 ack_work->wma_handle = wma_handle;
1419 ack_work->sub_type = pFc->subType;
1420 ack_work->status = status;
1421
Anurag Chouhan42958bb2016-02-19 15:43:11 +05301422 qdf_create_work(0, &ack_work->ack_cmp_work,
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -08001423 wma_mgmt_tx_ack_work_handler,
1424 ack_work);
1425
Anurag Chouhan42958bb2016-02-19 15:43:11 +05301426 qdf_sched_work(0, &ack_work->ack_cmp_work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001427 }
1428 }
1429 }
1430}
1431
1432/**
1433 * wma_mgmt_tx_dload_comp_hldr() - handles tx mgmt completion
1434 * @context: context with which the handler is registered
1435 * @netbuf: tx mgmt nbuf
1436 * @status: status of tx completion
1437 *
1438 * This function calls registered download callback while sending mgmt packet.
1439 *
1440 * Return: none
1441 */
1442static void
Nirav Shahcbc6d722016-03-01 16:24:53 +05301443wma_mgmt_tx_dload_comp_hldr(void *wma_context, qdf_nbuf_t netbuf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001444 int32_t status)
1445{
Anurag Chouhance0dc992016-02-16 18:18:03 +05301446 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001447
1448 tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1449 void *mac_context = wma_handle->mac_context;
1450
1451 WMA_LOGD("Tx Complete Status %d", status);
1452
1453 if (!wma_handle->tx_frm_download_comp_cb) {
1454 WMA_LOGE("Tx Complete Cb not registered by umac");
1455 return;
1456 }
1457
1458 /* Call Tx Mgmt Complete Callback registered by umac */
1459 wma_handle->tx_frm_download_comp_cb(mac_context, netbuf, 0);
1460
1461 /* Reset Callback */
1462 wma_handle->tx_frm_download_comp_cb = NULL;
1463
1464 /* Set the Tx Mgmt Complete Event */
Anurag Chouhance0dc992016-02-16 18:18:03 +05301465 qdf_status = qdf_event_set(&wma_handle->tx_frm_download_comp_event);
1466 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001467 WMA_LOGP("%s: Event Set failed - tx_frm_comp_event", __func__);
1468}
1469
1470/**
1471 * wma_tx_attach() - attach tx related callbacks
1472 * @pwmaCtx: wma context
1473 *
1474 * attaches tx fn with underlying layer.
1475 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301476 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001477 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301478QDF_STATUS wma_tx_attach(tp_wma_handle wma_handle)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001479{
1480 /* Get the Vos Context */
1481 p_cds_contextType cds_handle =
1482 (p_cds_contextType) (wma_handle->cds_context);
1483
1484 /* Get the txRx Pdev handle */
1485 ol_txrx_pdev_handle txrx_pdev =
1486 (ol_txrx_pdev_handle) (cds_handle->pdev_txrx_ctx);
1487
1488 /* Register for Tx Management Frames */
1489 ol_txrx_mgmt_tx_cb_set(txrx_pdev, GENERIC_NODOWLOAD_ACK_COMP_INDEX,
1490 NULL, wma_mgmt_tx_ack_comp_hdlr, wma_handle);
1491
1492 ol_txrx_mgmt_tx_cb_set(txrx_pdev, GENERIC_DOWNLD_COMP_NOACK_COMP_INDEX,
1493 wma_mgmt_tx_dload_comp_hldr, NULL, wma_handle);
1494
1495 ol_txrx_mgmt_tx_cb_set(txrx_pdev, GENERIC_DOWNLD_COMP_ACK_COMP_INDEX,
1496 wma_mgmt_tx_dload_comp_hldr,
1497 wma_mgmt_tx_ack_comp_hdlr, wma_handle);
1498
1499 /* Store the Mac Context */
1500 wma_handle->mac_context = cds_handle->pMACContext;
1501
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301502 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001503}
1504
1505/**
1506 * wma_tx_detach() - detach tx related callbacks
1507 * @tp_wma_handle: wma context
1508 *
1509 * Deregister with TxRx for Tx Mgmt Download and Ack completion.
1510 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301511 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001512 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301513QDF_STATUS wma_tx_detach(tp_wma_handle wma_handle)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001514{
1515 uint32_t frame_index = 0;
1516
1517 /* Get the Vos Context */
1518 p_cds_contextType cds_handle =
1519 (p_cds_contextType) (wma_handle->cds_context);
1520
1521 /* Get the txRx Pdev handle */
1522 ol_txrx_pdev_handle txrx_pdev =
1523 (ol_txrx_pdev_handle) (cds_handle->pdev_txrx_ctx);
1524
Himanshu Agarwale1086fa2015-10-19 18:05:15 +05301525 if (txrx_pdev) {
1526 /* Deregister with TxRx for Tx Mgmt completion call back */
1527 for (frame_index = 0; frame_index < FRAME_INDEX_MAX;
1528 frame_index++) {
1529 ol_txrx_mgmt_tx_cb_set(txrx_pdev, frame_index, NULL,
1530 NULL, txrx_pdev);
1531 }
1532 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001533 /* Destroy Tx Frame Complete event */
Anurag Chouhance0dc992016-02-16 18:18:03 +05301534 qdf_event_destroy(&wma_handle->tx_frm_download_comp_event);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001535
1536 /* Tx queue empty check event (dummy event) */
Anurag Chouhance0dc992016-02-16 18:18:03 +05301537 qdf_event_destroy(&wma_handle->tx_queue_empty_event);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001538
1539 /* Reset Tx Frm Callbacks */
1540 wma_handle->tx_frm_download_comp_cb = NULL;
1541
1542 /* Reset Tx Data Frame Ack Cb */
1543 wma_handle->umac_data_ota_ack_cb = NULL;
1544
1545 /* Reset last Tx Data Frame nbuf ptr */
1546 wma_handle->last_umac_data_nbuf = NULL;
1547
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301548 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001549}
1550
1551#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
1552/**
1553 * wma_mcc_vdev_tx_pause_evt_handler() - pause event handler
1554 * @handle: wma handle
1555 * @event: event buffer
1556 * @len: data length
1557 *
1558 * This function handle pause event from fw and pause/unpause
1559 * vdev.
1560 *
1561 * Return: 0 for success or error code.
1562 */
1563int wma_mcc_vdev_tx_pause_evt_handler(void *handle, uint8_t *event,
1564 uint32_t len)
1565{
1566 tp_wma_handle wma = (tp_wma_handle) handle;
1567 WMI_TX_PAUSE_EVENTID_param_tlvs *param_buf;
1568 wmi_tx_pause_event_fixed_param *wmi_event;
1569 uint8_t vdev_id;
1570 A_UINT32 vdev_map;
1571
1572 param_buf = (WMI_TX_PAUSE_EVENTID_param_tlvs *) event;
1573 if (!param_buf) {
1574 WMA_LOGE("Invalid roam event buffer");
1575 return -EINVAL;
1576 }
1577
1578 if (wma_get_wow_bus_suspend(wma)) {
1579 WMA_LOGD(" Suspend is in progress: Pause/Unpause Tx is NoOp");
1580 return 0;
1581 }
1582
1583 wmi_event = param_buf->fixed_param;
1584 vdev_map = wmi_event->vdev_map;
1585 /* FW mapped vdev from ID
1586 * vdev_map = (1 << vdev_id)
1587 * So, host should unmap to ID */
1588 for (vdev_id = 0; vdev_map != 0; vdev_id++) {
1589 if (!(vdev_map & 0x1)) {
1590 /* No Vdev */
1591 } else {
1592 if (!wma->interfaces[vdev_id].handle) {
1593 WMA_LOGE("%s: invalid vdev ID %d", __func__,
1594 vdev_id);
1595 /* Test Next VDEV */
1596 vdev_map >>= 1;
1597 continue;
1598 }
1599
1600 /* PAUSE action, add bitmap */
1601 if (ACTION_PAUSE == wmi_event->action) {
1602 /*
1603 * Now only support per-dev pause so it is not
1604 * necessary to pause a paused queue again.
1605 */
1606 if (!wma->interfaces[vdev_id].pause_bitmap)
1607 ol_txrx_vdev_pause(
1608 wma->interfaces[vdev_id].
1609 handle,
1610 OL_TXQ_PAUSE_REASON_FW);
1611 wma->interfaces[vdev_id].pause_bitmap |=
1612 (1 << wmi_event->pause_type);
1613 }
1614 /* UNPAUSE action, clean bitmap */
1615 else if (ACTION_UNPAUSE == wmi_event->action) {
1616 /* Handle unpause only if already paused */
1617 if (wma->interfaces[vdev_id].pause_bitmap) {
1618 wma->interfaces[vdev_id].pause_bitmap &=
1619 ~(1 << wmi_event->pause_type);
1620
1621 if (!wma->interfaces[vdev_id].
1622 pause_bitmap) {
1623 /* PAUSE BIT MAP is cleared
1624 * UNPAUSE VDEV */
1625 ol_txrx_vdev_unpause(
1626 wma->interfaces[vdev_id]
1627 .handle,
1628 OL_TXQ_PAUSE_REASON_FW);
1629 }
1630 }
1631 } else {
1632 WMA_LOGE("Not Valid Action Type %d",
1633 wmi_event->action);
1634 }
1635
1636 WMA_LOGD
1637 ("vdev_id %d, pause_map 0x%x, pause type %d, action %d",
1638 vdev_id, wma->interfaces[vdev_id].pause_bitmap,
1639 wmi_event->pause_type, wmi_event->action);
1640 }
1641 /* Test Next VDEV */
1642 vdev_map >>= 1;
1643 }
1644
1645 return 0;
1646}
1647
1648#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1649
1650/**
1651 * wma_process_init_thermal_info() - initialize thermal info
1652 * @wma: Pointer to WMA handle
1653 * @pThermalParams: Pointer to thermal mitigation parameters
1654 *
1655 * This function initializes the thermal management table in WMA,
1656 * sends down the initial temperature thresholds to the firmware
1657 * and configures the throttle period in the tx rx module
1658 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301659 * Returns: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001660 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301661QDF_STATUS wma_process_init_thermal_info(tp_wma_handle wma,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001662 t_thermal_mgmt *pThermalParams)
1663{
1664 t_thermal_cmd_params thermal_params;
1665 ol_txrx_pdev_handle curr_pdev;
1666
1667 if (NULL == wma || NULL == pThermalParams) {
1668 WMA_LOGE("TM Invalid input");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301669 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001670 }
1671
Anurag Chouhan6d760662016-02-20 16:05:43 +05301672 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001673 if (NULL == curr_pdev) {
1674 WMA_LOGE("%s: Failed to get pdev", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301675 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001676 }
1677
1678 WMA_LOGD("TM enable %d period %d", pThermalParams->thermalMgmtEnabled,
1679 pThermalParams->throttlePeriod);
1680
1681 wma->thermal_mgmt_info.thermalMgmtEnabled =
1682 pThermalParams->thermalMgmtEnabled;
1683 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold =
1684 pThermalParams->thermalLevels[0].minTempThreshold;
1685 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold =
1686 pThermalParams->thermalLevels[0].maxTempThreshold;
1687 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold =
1688 pThermalParams->thermalLevels[1].minTempThreshold;
1689 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold =
1690 pThermalParams->thermalLevels[1].maxTempThreshold;
1691 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold =
1692 pThermalParams->thermalLevels[2].minTempThreshold;
1693 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold =
1694 pThermalParams->thermalLevels[2].maxTempThreshold;
1695 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold =
1696 pThermalParams->thermalLevels[3].minTempThreshold;
1697 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold =
1698 pThermalParams->thermalLevels[3].maxTempThreshold;
1699 wma->thermal_mgmt_info.thermalCurrLevel = WLAN_WMA_THERMAL_LEVEL_0;
1700
1701 WMA_LOGD("TM level min max:\n"
1702 "0 %d %d\n"
1703 "1 %d %d\n"
1704 "2 %d %d\n"
1705 "3 %d %d",
1706 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold,
1707 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold,
1708 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold,
1709 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold,
1710 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold,
1711 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold,
1712 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold,
1713 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold);
1714
1715 if (wma->thermal_mgmt_info.thermalMgmtEnabled) {
1716 ol_tx_throttle_init_period(curr_pdev,
1717 pThermalParams->throttlePeriod);
1718
1719 /* Get the temperature thresholds to set in firmware */
1720 thermal_params.minTemp =
1721 wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].minTempThreshold;
1722 thermal_params.maxTemp =
1723 wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].maxTempThreshold;
1724 thermal_params.thermalEnable =
1725 wma->thermal_mgmt_info.thermalMgmtEnabled;
1726
1727 WMA_LOGE("TM sending the following to firmware: min %d max %d enable %d",
1728 thermal_params.minTemp, thermal_params.maxTemp,
1729 thermal_params.thermalEnable);
1730
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301731 if (QDF_STATUS_SUCCESS !=
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001732 wma_set_thermal_mgmt(wma, thermal_params)) {
1733 WMA_LOGE("Could not send thermal mgmt command to the firmware!");
1734 }
1735 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301736 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001737}
1738
1739/**
1740 * wma_set_thermal_level_ind() - send SME set thermal level indication message
1741 * @level: thermal level
1742 *
1743 * Send SME SET_THERMAL_LEVEL_IND message
1744 *
1745 * Returns: none
1746 */
1747static void wma_set_thermal_level_ind(u_int8_t level)
1748{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301749 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001750 cds_msg_t sme_msg = {0};
1751
1752 WMA_LOGI(FL("Thermal level: %d"), level);
1753
1754 sme_msg.type = eWNI_SME_SET_THERMAL_LEVEL_IND;
1755 sme_msg.bodyptr = NULL;
1756 sme_msg.bodyval = level;
1757
Anurag Chouhan6d760662016-02-20 16:05:43 +05301758 qdf_status = cds_mq_post_message(QDF_MODULE_ID_SME, &sme_msg);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301759 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001760 WMA_LOGE(FL(
1761 "Fail to post set thermal level ind msg"));
1762}
1763
1764/**
1765 * wma_process_set_thermal_level() - sets thermal level
1766 * @wma: Pointer to WMA handle
1767 * @thermal_level : Thermal level
1768 *
1769 * This function sets the new thermal throttle level in the
1770 * txrx module and sends down the corresponding temperature
1771 * thresholds to the firmware
1772 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301773 * Returns: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001774 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301775QDF_STATUS wma_process_set_thermal_level(tp_wma_handle wma,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001776 uint8_t thermal_level)
1777{
1778 ol_txrx_pdev_handle curr_pdev;
1779
1780 if (NULL == wma) {
1781 WMA_LOGE("TM Invalid input");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301782 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001783 }
1784
Anurag Chouhan6d760662016-02-20 16:05:43 +05301785 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001786 if (NULL == curr_pdev) {
1787 WMA_LOGE("%s: Failed to get pdev", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301788 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001789 }
1790
1791 WMA_LOGE("TM set level %d", thermal_level);
1792
1793 /* Check if thermal mitigation is enabled */
1794 if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
1795 WMA_LOGE("Thermal mgmt is not enabled, ignoring set level command");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301796 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001797 }
1798
1799 if (thermal_level >= WLAN_WMA_MAX_THERMAL_LEVELS) {
1800 WMA_LOGE("Invalid thermal level set %d", thermal_level);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301801 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001802 }
1803
1804 if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
1805 WMA_LOGD("Current level %d is same as the set level, ignoring",
1806 wma->thermal_mgmt_info.thermalCurrLevel);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301807 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001808 }
1809
1810 wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
1811
1812 ol_tx_throttle_set_level(curr_pdev, thermal_level);
1813
1814 /* Send SME SET_THERMAL_LEVEL_IND message */
1815 wma_set_thermal_level_ind(thermal_level);
1816
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301817 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001818}
1819
1820
1821/**
1822 * wma_set_thermal_mgmt() - set thermal mgmt command to fw
1823 * @wma_handle: Pointer to WMA handle
1824 * @thermal_info: Thermal command information
1825 *
1826 * This function sends the thermal management command
1827 * to the firmware
1828 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301829 * Return: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001830 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301831QDF_STATUS wma_set_thermal_mgmt(tp_wma_handle wma_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001832 t_thermal_cmd_params thermal_info)
1833{
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301834 struct thermal_cmd_params mgmt_thermal_info = {0};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001835
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301836 if (!wma_handle) {
1837 WMA_LOGE("%s:'wma_set_thermal_mgmt':invalid input", __func__);
1838 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301839 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001840 }
1841
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301842 mgmt_thermal_info.min_temp = thermal_info.minTemp;
1843 mgmt_thermal_info.max_temp = thermal_info.maxTemp;
1844 mgmt_thermal_info.thermal_enable = thermal_info.thermalEnable;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001845
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301846 return wmi_unified_set_thermal_mgmt_cmd(wma_handle->wmi_handle,
1847 &mgmt_thermal_info);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001848}
1849
1850/**
1851 * wma_thermal_mgmt_get_level() - returns throttle level
1852 * @handle: Pointer to WMA handle
1853 * @temp: temperature
1854 *
1855 * This function returns the thermal(throttle) level
1856 * given the temperature
1857 *
1858 * Return: thermal (throttle) level
1859 */
1860uint8_t wma_thermal_mgmt_get_level(void *handle, uint32_t temp)
1861{
1862 tp_wma_handle wma = (tp_wma_handle) handle;
1863 int i;
1864 uint8_t level;
1865
1866 level = i = wma->thermal_mgmt_info.thermalCurrLevel;
1867 while (temp < wma->thermal_mgmt_info.thermalLevels[i].minTempThreshold
1868 && i > 0) {
1869 i--;
1870 level = i;
1871 }
1872
1873 i = wma->thermal_mgmt_info.thermalCurrLevel;
1874 while (temp > wma->thermal_mgmt_info.thermalLevels[i].maxTempThreshold
1875 && i < (WLAN_WMA_MAX_THERMAL_LEVELS - 1)) {
1876 i++;
1877 level = i;
1878 }
1879
1880 WMA_LOGW("Change thermal level from %d -> %d\n",
1881 wma->thermal_mgmt_info.thermalCurrLevel, level);
1882
1883 return level;
1884}
1885
1886/**
1887 * wma_thermal_mgmt_evt_handler() - thermal mgmt event handler
1888 * @wma_handle: Pointer to WMA handle
1889 * @event: Thermal event information
1890 *
1891 * This function handles the thermal mgmt event from the firmware len
1892 *
1893 * Return: 0 for success otherwise failure
1894 */
1895int wma_thermal_mgmt_evt_handler(void *handle, uint8_t *event,
1896 uint32_t len)
1897{
1898 tp_wma_handle wma;
1899 wmi_thermal_mgmt_event_fixed_param *tm_event;
1900 uint8_t thermal_level;
1901 t_thermal_cmd_params thermal_params;
1902 WMI_THERMAL_MGMT_EVENTID_param_tlvs *param_buf;
1903 ol_txrx_pdev_handle curr_pdev;
1904
1905 if (NULL == event || NULL == handle) {
1906 WMA_LOGE("Invalid thermal mitigation event buffer");
1907 return -EINVAL;
1908 }
1909
1910 wma = (tp_wma_handle) handle;
1911
1912 if (NULL == wma) {
1913 WMA_LOGE("%s: Failed to get wma handle", __func__);
1914 return -EINVAL;
1915 }
1916
1917 param_buf = (WMI_THERMAL_MGMT_EVENTID_param_tlvs *) event;
1918
Anurag Chouhan6d760662016-02-20 16:05:43 +05301919 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001920 if (NULL == curr_pdev) {
1921 WMA_LOGE("%s: Failed to get pdev", __func__);
1922 return -EINVAL;
1923 }
1924
1925 /* Check if thermal mitigation is enabled */
1926 if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
1927 WMA_LOGE("Thermal mgmt is not enabled, ignoring event");
1928 return -EINVAL;
1929 }
1930
1931 tm_event = param_buf->fixed_param;
1932 WMA_LOGD("Thermal mgmt event received with temperature %d",
1933 tm_event->temperature_degreeC);
1934
1935 /* Get the thermal mitigation level for the reported temperature */
1936 thermal_level =
1937 wma_thermal_mgmt_get_level(handle, tm_event->temperature_degreeC);
1938 WMA_LOGD("Thermal mgmt level %d", thermal_level);
1939
1940 if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
1941 WMA_LOGD("Current level %d is same as the set level, ignoring",
1942 wma->thermal_mgmt_info.thermalCurrLevel);
1943 return 0;
1944 }
1945
1946 wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
1947
1948 /* Inform txrx */
1949 ol_tx_throttle_set_level(curr_pdev, thermal_level);
1950
1951 /* Send SME SET_THERMAL_LEVEL_IND message */
1952 wma_set_thermal_level_ind(thermal_level);
1953
1954 /* Get the temperature thresholds to set in firmware */
1955 thermal_params.minTemp =
1956 wma->thermal_mgmt_info.thermalLevels[thermal_level].
1957 minTempThreshold;
1958 thermal_params.maxTemp =
1959 wma->thermal_mgmt_info.thermalLevels[thermal_level].
1960 maxTempThreshold;
1961 thermal_params.thermalEnable =
1962 wma->thermal_mgmt_info.thermalMgmtEnabled;
1963
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301964 if (QDF_STATUS_SUCCESS != wma_set_thermal_mgmt(wma, thermal_params)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001965 WMA_LOGE("Could not send thermal mgmt command to the firmware!");
1966 return -EINVAL;
1967 }
1968
1969 return 0;
1970}
1971
1972/**
1973 * wma_decap_to_8023() - Decapsulate to 802.3 format
1974 * @msdu: skb buffer
1975 * @info: decapsulate info
1976 *
1977 * Return: none
1978 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05301979static void wma_decap_to_8023(qdf_nbuf_t msdu, struct wma_decap_info_t *info)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001980{
1981 struct llc_snap_hdr_t *llc_hdr;
1982 uint16_t ether_type;
1983 uint16_t l2_hdr_space;
1984 struct ieee80211_qosframe_addr4 *wh;
1985 uint8_t local_buf[ETHERNET_HDR_LEN];
1986 uint8_t *buf;
1987 struct ethernet_hdr_t *ethr_hdr;
1988
Nirav Shahcbc6d722016-03-01 16:24:53 +05301989 buf = (uint8_t *) qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001990 llc_hdr = (struct llc_snap_hdr_t *)buf;
1991 ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
1992 /* do llc remove if needed */
1993 l2_hdr_space = 0;
1994 if (IS_SNAP(llc_hdr)) {
1995 if (IS_BTEP(llc_hdr)) {
1996 /* remove llc */
1997 l2_hdr_space += sizeof(struct llc_snap_hdr_t);
1998 llc_hdr = NULL;
1999 } else if (IS_RFC1042(llc_hdr)) {
2000 if (!(ether_type == ETHERTYPE_AARP ||
2001 ether_type == ETHERTYPE_IPX)) {
2002 /* remove llc */
2003 l2_hdr_space += sizeof(struct llc_snap_hdr_t);
2004 llc_hdr = NULL;
2005 }
2006 }
2007 }
2008 if (l2_hdr_space > ETHERNET_HDR_LEN) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302009 buf = qdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002010 } else if (l2_hdr_space < ETHERNET_HDR_LEN) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302011 buf = qdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002012 }
2013
2014 /* mpdu hdr should be present in info,re-create ethr_hdr based on mpdu hdr */
2015 wh = (struct ieee80211_qosframe_addr4 *)info->hdr;
2016 ethr_hdr = (struct ethernet_hdr_t *)local_buf;
2017 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
2018 case IEEE80211_FC1_DIR_NODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302019 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002020 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302021 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002022 ETHERNET_ADDR_LEN);
2023 break;
2024 case IEEE80211_FC1_DIR_TODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302025 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002026 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302027 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002028 ETHERNET_ADDR_LEN);
2029 break;
2030 case IEEE80211_FC1_DIR_FROMDS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302031 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002032 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302033 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002034 ETHERNET_ADDR_LEN);
2035 break;
2036 case IEEE80211_FC1_DIR_DSTODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302037 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002038 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302039 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr4,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002040 ETHERNET_ADDR_LEN);
2041 break;
2042 }
2043
2044 if (llc_hdr == NULL) {
2045 ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2046 ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2047 } else {
2048 uint32_t pktlen =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302049 qdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002050 ether_type = (uint16_t) pktlen;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302051 ether_type = qdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002052 ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2053 ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2054 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302055 qdf_mem_copy(buf, ethr_hdr, ETHERNET_HDR_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002056}
2057
2058/**
2059 * wma_ieee80211_hdrsize() - get 802.11 header size
2060 * @data: 80211 frame
2061 *
2062 * Return: size of header
2063 */
2064static int32_t wma_ieee80211_hdrsize(const void *data)
2065{
2066 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
2067 int32_t size = sizeof(struct ieee80211_frame);
2068
2069 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2070 size += IEEE80211_ADDR_LEN;
2071 if (IEEE80211_QOS_HAS_SEQ(wh))
2072 size += sizeof(uint16_t);
2073 return size;
2074}
2075
2076/**
2077 * wmi_desc_pool_init() - Initialize the WMI descriptor pool
2078 * @wma_handle: handle to wma
2079 * @pool_size: Size of wma pool
2080 *
2081 * Return: 0 for success, error code on failure.
2082 */
2083int wmi_desc_pool_init(tp_wma_handle wma_handle, uint32_t pool_size)
2084{
2085 int i;
2086
2087 if (!pool_size) {
2088 WMA_LOGE("%s: failed to allocate desc pool", __func__);
Anurag Chouhanc5548422016-02-24 18:33:27 +05302089 qdf_assert_always(pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002090 return -EINVAL;
2091 }
2092 WMA_LOGE("%s: initialize desc pool of size %d", __func__, pool_size);
2093 wma_handle->wmi_desc_pool.pool_size = pool_size;
2094 wma_handle->wmi_desc_pool.num_free = pool_size;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302095 wma_handle->wmi_desc_pool.array = qdf_mem_malloc(pool_size *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002096 sizeof(union wmi_desc_elem_t));
2097 if (!wma_handle->wmi_desc_pool.array) {
2098 WMA_LOGE("%s: failed to allocate desc pool", __func__);
2099 return -ENOMEM;
2100 }
2101 wma_handle->wmi_desc_pool.freelist = &wma_handle->
2102 wmi_desc_pool.array[0];
2103
2104 for (i = 0; i < (pool_size - 1); i++) {
2105 wma_handle->wmi_desc_pool.array[i].wmi_desc.desc_id = i;
2106 wma_handle->wmi_desc_pool.array[i].next =
2107 &wma_handle->wmi_desc_pool.array[i + 1];
2108 }
2109
2110 wma_handle->wmi_desc_pool.array[i].next = NULL;
2111 wma_handle->wmi_desc_pool.array[i].wmi_desc.desc_id = i;
2112
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302113 qdf_spinlock_create(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002114 return 0;
2115}
2116
2117/**
2118 * wmi_desc_pool_deinit() - Deinitialize the WMI descriptor pool
2119 * @wma_handle: handle to wma
2120 *
2121 * Return: None
2122 */
2123void wmi_desc_pool_deinit(tp_wma_handle wma_handle)
2124{
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302125 qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002126 if (wma_handle->wmi_desc_pool.array) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302127 qdf_mem_free(wma_handle->wmi_desc_pool.array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002128 wma_handle->wmi_desc_pool.array = NULL;
2129 } else {
2130 WMA_LOGE("%s: Empty WMI descriptor pool", __func__);
2131 }
2132
2133 wma_handle->wmi_desc_pool.freelist = NULL;
2134 wma_handle->wmi_desc_pool.pool_size = 0;
2135 wma_handle->wmi_desc_pool.num_free = 0;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302136 qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
2137 qdf_spinlock_destroy(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002138}
2139
2140/**
2141 * wmi_desc_get() - Get wmi descriptor from wmi free descriptor pool
2142 * @wma_handle: handle to wma
2143 *
2144 * Return: pointer to wmi descriptor, NULL on failure
2145 */
2146struct wmi_desc_t *wmi_desc_get(tp_wma_handle wma_handle)
2147{
2148 struct wmi_desc_t *wmi_desc = NULL;
2149
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302150 qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002151 if (wma_handle->wmi_desc_pool.freelist) {
2152 wma_handle->wmi_desc_pool.num_free--;
2153 wmi_desc = &wma_handle->wmi_desc_pool.freelist->wmi_desc;
2154 wma_handle->wmi_desc_pool.freelist =
2155 wma_handle->wmi_desc_pool.freelist->next;
2156 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302157 qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002158
2159 return wmi_desc;
2160}
2161
2162/**
2163 * wmi_desc_put() - Put wmi descriptor to wmi free descriptor pool
2164 * @wma_handle: handle to wma
2165 * @wmi_desc: wmi descriptor
2166 *
2167 * Return: None
2168 */
2169void wmi_desc_put(tp_wma_handle wma_handle, struct wmi_desc_t *wmi_desc)
2170{
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302171 qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002172 ((union wmi_desc_elem_t *)wmi_desc)->next =
2173 wma_handle->wmi_desc_pool.freelist;
2174 wma_handle->wmi_desc_pool.freelist = (union wmi_desc_elem_t *)wmi_desc;
2175 wma_handle->wmi_desc_pool.num_free++;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302176 qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002177}
2178
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002179/**
2180 * wma_tx_packet() - Sends Tx Frame to TxRx
2181 * @wma_context: wma context
2182 * @tx_frame: frame buffer
2183 * @frmLen: frame length
2184 * @frmType: frame type
2185 * @txDir: tx diection
2186 * @tid: TID
2187 * @tx_frm_download_comp_cb: tx download callback handler
2188 * @tx_frm_ota_comp_cb: OTA complition handler
2189 * @tx_flag: tx flag
2190 * @vdev_id: vdev id
2191 * @tdlsFlag: tdls flag
2192 *
2193 * This function sends the frame corresponding to the
2194 * given vdev id.
2195 * This is blocking call till the downloading of frame is complete.
2196 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05302197 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002198 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302199QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002200 eFrameType frmType, eFrameTxDir txDir, uint8_t tid,
2201 pWMATxRxCompFunc tx_frm_download_comp_cb, void *pData,
2202 pWMAAckFnTxComp tx_frm_ota_comp_cb, uint8_t tx_flag,
2203 uint8_t vdev_id, bool tdlsFlag, uint16_t channel_freq)
2204{
2205 tp_wma_handle wma_handle = (tp_wma_handle) (wma_context);
2206 int32_t status;
Anurag Chouhance0dc992016-02-16 18:18:03 +05302207 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002208 int32_t is_high_latency;
2209 ol_txrx_vdev_handle txrx_vdev;
2210 enum frame_index tx_frm_index = GENERIC_NODOWNLD_NOACK_COMP_INDEX;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302211 tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002212 uint8_t use_6mbps = 0;
2213 uint8_t downld_comp_required = 0;
2214 uint16_t chanfreq;
2215#ifdef WLAN_FEATURE_11W
2216 uint8_t *pFrame = NULL;
2217 void *pPacket = NULL;
2218 uint16_t newFrmLen = 0;
2219#endif /* WLAN_FEATURE_11W */
2220 struct wma_txrx_node *iface;
2221 tpAniSirGlobal pMac;
2222 tpSirMacMgmtHdr mHdr;
2223#ifdef QCA_PKT_PROTO_TRACE
2224 uint8_t proto_type = 0;
2225#endif /* QCA_PKT_PROTO_TRACE */
Govind Singh09c3b492016-03-08 16:05:14 +05302226 struct wmi_mgmt_params mgmt_param = {0};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002227
2228 if (NULL == wma_handle) {
2229 WMA_LOGE("wma_handle is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302230 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002231 }
2232 iface = &wma_handle->interfaces[vdev_id];
2233 /* Get the vdev handle from vdev id */
2234 txrx_vdev = wma_handle->interfaces[vdev_id].handle;
2235
2236 if (!txrx_vdev) {
2237 WMA_LOGE("TxRx Vdev Handle is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302238 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002239 }
2240
2241 if (frmType >= TXRX_FRM_MAX) {
2242 WMA_LOGE("Invalid Frame Type Fail to send Frame");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302243 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002244 }
2245
Anurag Chouhan6d760662016-02-20 16:05:43 +05302246 pMac = cds_get_context(QDF_MODULE_ID_PE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002247 if (!pMac) {
2248 WMA_LOGE("pMac Handle is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302249 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002250 }
2251 /*
2252 * Currently only support to
2253 * send 80211 Mgmt and 80211 Data are added.
2254 */
2255 if (!((frmType == TXRX_FRM_802_11_MGMT) ||
2256 (frmType == TXRX_FRM_802_11_DATA))) {
2257 WMA_LOGE("No Support to send other frames except 802.11 Mgmt/Data");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302258 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002259 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05302260 mHdr = (tpSirMacMgmtHdr)qdf_nbuf_data(tx_frame);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002261#ifdef WLAN_FEATURE_11W
2262 if ((iface && iface->rmfEnabled) &&
2263 (frmType == TXRX_FRM_802_11_MGMT) &&
2264 (pFc->subType == SIR_MAC_MGMT_DISASSOC ||
2265 pFc->subType == SIR_MAC_MGMT_DEAUTH ||
2266 pFc->subType == SIR_MAC_MGMT_ACTION)) {
2267 struct ieee80211_frame *wh =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302268 (struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002269 if (!IEEE80211_IS_BROADCAST(wh->i_addr1) &&
2270 !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2271 if (pFc->wep) {
2272 /* Allocate extra bytes for privacy header and trailer */
2273 newFrmLen = frmLen + IEEE80211_CCMP_HEADERLEN +
2274 IEEE80211_CCMP_MICLEN;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302275 qdf_status =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002276 cds_packet_alloc((uint16_t) newFrmLen,
2277 (void **)&pFrame,
2278 (void **)&pPacket);
2279
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302280 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002281 WMA_LOGP("%s: Failed to allocate %d bytes for RMF status "
2282 "code (%x)", __func__, newFrmLen,
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302283 qdf_status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002284 /* Free the original packet memory */
2285 cds_packet_free((void *)tx_frame);
2286 goto error;
2287 }
2288
2289 /*
2290 * Initialize the frame with 0's and only fill
2291 * MAC header and data, Keep the CCMP header and
2292 * trailer as 0's, firmware shall fill this
2293 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302294 qdf_mem_set(pFrame, newFrmLen, 0);
2295 qdf_mem_copy(pFrame, wh, sizeof(*wh));
2296 qdf_mem_copy(pFrame + sizeof(*wh) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002297 IEEE80211_CCMP_HEADERLEN,
2298 pData + sizeof(*wh),
2299 frmLen - sizeof(*wh));
2300
2301 cds_packet_free((void *)tx_frame);
2302 tx_frame = pPacket;
2303 frmLen = newFrmLen;
2304 }
2305 } else {
2306 /* Allocate extra bytes for MMIE */
2307 newFrmLen = frmLen + IEEE80211_MMIE_LEN;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302308 qdf_status = cds_packet_alloc((uint16_t) newFrmLen,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002309 (void **)&pFrame,
2310 (void **)&pPacket);
2311
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302312 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002313 WMA_LOGP("%s: Failed to allocate %d bytes for RMF status "
2314 "code (%x)", __func__, newFrmLen,
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302315 qdf_status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002316 /* Free the original packet memory */
2317 cds_packet_free((void *)tx_frame);
2318 goto error;
2319 }
2320 /*
2321 * Initialize the frame with 0's and only fill
2322 * MAC header and data. MMIE field will be
2323 * filled by cds_attach_mmie API
2324 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302325 qdf_mem_set(pFrame, newFrmLen, 0);
2326 qdf_mem_copy(pFrame, wh, sizeof(*wh));
2327 qdf_mem_copy(pFrame + sizeof(*wh),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002328 pData + sizeof(*wh), frmLen - sizeof(*wh));
2329 if (!cds_attach_mmie(iface->key.key,
2330 iface->key.key_id[0].ipn,
2331 WMA_IGTK_KEY_INDEX_4,
2332 pFrame,
2333 pFrame + newFrmLen, newFrmLen)) {
2334 WMA_LOGP("%s: Failed to attach MMIE at the end of "
2335 "frame", __func__);
2336 /* Free the original packet memory */
2337 cds_packet_free((void *)tx_frame);
2338 goto error;
2339 }
2340 cds_packet_free((void *)tx_frame);
2341 tx_frame = pPacket;
2342 frmLen = newFrmLen;
2343 }
2344 }
2345#endif /* WLAN_FEATURE_11W */
2346
2347 if ((frmType == TXRX_FRM_802_11_MGMT) &&
2348 (pFc->subType == SIR_MAC_MGMT_PROBE_RSP)) {
2349 uint64_t adjusted_tsf_le;
2350 struct ieee80211_frame *wh =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302351 (struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002352
2353 /* Make the TSF offset negative to match TSF in beacons */
2354 adjusted_tsf_le = cpu_to_le64(0ULL -
2355 wma_handle->interfaces[vdev_id].
2356 tsfadjust);
2357 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2358 }
2359 if (frmType == TXRX_FRM_802_11_DATA) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302360 qdf_nbuf_t ret;
2361 qdf_nbuf_t skb = (qdf_nbuf_t) tx_frame;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002362 ol_txrx_pdev_handle pdev =
Anurag Chouhan6d760662016-02-20 16:05:43 +05302363 cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002364
2365 struct wma_decap_info_t decap_info;
2366 struct ieee80211_frame *wh =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302367 (struct ieee80211_frame *)qdf_nbuf_data(skb);
Anurag Chouhan210db072016-02-22 18:42:15 +05302368 unsigned long curr_timestamp = qdf_mc_timer_get_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002369
2370 if (pdev == NULL) {
2371 WMA_LOGE("%s: pdev pointer is not available", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302372 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002373 }
2374
2375 /*
2376 * 1) TxRx Module expects data input to be 802.3 format
2377 * So Decapsulation has to be done.
2378 * 2) Only one Outstanding Data pending for Ack is allowed
2379 */
2380 if (tx_frm_ota_comp_cb) {
2381 if (wma_handle->umac_data_ota_ack_cb) {
2382 /*
2383 * If last data frame was sent more than 5 seconds
2384 * ago and still we did not receive ack/nack from
2385 * fw then allow Tx of this data frame
2386 */
2387 if (curr_timestamp >=
2388 wma_handle->last_umac_data_ota_timestamp +
2389 500) {
2390 WMA_LOGE("%s: No Tx Ack for last data frame for more than 5 secs, allow Tx of current data frame",
2391 __func__);
2392 } else {
2393 WMA_LOGE("%s: Already one Data pending for Ack, reject Tx of data frame",
2394 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302395 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002396 }
2397 }
2398 } else {
2399 /*
2400 * Data Frames are sent through TxRx Non Standard Data Path
2401 * so Ack Complete Cb is must
2402 */
2403 WMA_LOGE("No Ack Complete Cb. Don't Allow");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302404 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002405 }
2406
2407 /* Take out 802.11 header from skb */
2408 decap_info.hdr_len = wma_ieee80211_hdrsize(wh);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302409 qdf_mem_copy(decap_info.hdr, wh, decap_info.hdr_len);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302410 qdf_nbuf_pull_head(skb, decap_info.hdr_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002411
2412 /* Decapsulate to 802.3 format */
2413 wma_decap_to_8023(skb, &decap_info);
2414
2415 /* Zero out skb's context buffer for the driver to use */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302416 qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002417
2418 /* Do the DMA Mapping */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302419 qdf_nbuf_map_single(pdev->osdev, skb, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002420
2421 /* Terminate the (single-element) list of tx frames */
2422 skb->next = NULL;
2423
2424 /* Store the Ack Complete Cb */
2425 wma_handle->umac_data_ota_ack_cb = tx_frm_ota_comp_cb;
2426
2427 /* Store the timestamp and nbuf for this data Tx */
2428 wma_handle->last_umac_data_ota_timestamp = curr_timestamp;
2429 wma_handle->last_umac_data_nbuf = skb;
2430
2431 /* Send the Data frame to TxRx in Non Standard Path */
2432 ret = ol_tx_non_std(txrx_vdev, ol_tx_spec_no_free, skb);
2433
2434 if (ret) {
2435 WMA_LOGE("TxRx Rejected. Fail to do Tx");
Nirav Shahcbc6d722016-03-01 16:24:53 +05302436 qdf_nbuf_unmap_single(pdev->osdev, skb,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302437 QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002438 /* Call Download Cb so that umac can free the buffer */
2439 if (tx_frm_download_comp_cb)
2440 tx_frm_download_comp_cb(wma_handle->mac_context,
2441 tx_frame,
2442 WMA_TX_FRAME_BUFFER_FREE);
2443 wma_handle->umac_data_ota_ack_cb = NULL;
2444 wma_handle->last_umac_data_nbuf = NULL;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302445 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002446 }
2447
2448 /* Call Download Callback if passed */
2449 if (tx_frm_download_comp_cb)
2450 tx_frm_download_comp_cb(wma_handle->mac_context,
2451 tx_frame,
2452 WMA_TX_FRAME_BUFFER_NO_FREE);
2453
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302454 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002455 }
2456
2457 is_high_latency =
2458 ol_cfg_is_high_latency(txrx_vdev->pdev->ctrl_pdev);
2459
2460 downld_comp_required = tx_frm_download_comp_cb && is_high_latency;
2461
2462 /* Fill the frame index to send */
2463 if (pFc->type == SIR_MAC_MGMT_FRAME) {
2464 if (tx_frm_ota_comp_cb) {
2465 if (downld_comp_required)
2466 tx_frm_index =
2467 GENERIC_DOWNLD_COMP_ACK_COMP_INDEX;
2468 else
2469 tx_frm_index = GENERIC_NODOWLOAD_ACK_COMP_INDEX;
2470
2471 /* Store the Ack Cb sent by UMAC */
2472 if (pFc->subType < SIR_MAC_MGMT_RESERVED15) {
2473 wma_handle->umac_ota_ack_cb[pFc->subType] =
2474 tx_frm_ota_comp_cb;
2475 }
2476#ifdef QCA_PKT_PROTO_TRACE
2477 if (pFc->subType == SIR_MAC_MGMT_ACTION)
2478 proto_type = cds_pkt_get_proto_type(tx_frame,
2479 pMac->fEnableDebugLog,
2480 NBUF_PKT_TRAC_TYPE_MGMT_ACTION);
2481 if (proto_type & NBUF_PKT_TRAC_TYPE_MGMT_ACTION)
2482 cds_pkt_trace_buf_update("WM:T:MACT");
Nirav Shahcbc6d722016-03-01 16:24:53 +05302483 qdf_nbuf_trace_set_proto_type(tx_frame, proto_type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002484#endif /* QCA_PKT_PROTO_TRACE */
2485 } else {
2486 if (downld_comp_required)
2487 tx_frm_index =
2488 GENERIC_DOWNLD_COMP_NOACK_COMP_INDEX;
2489 else
2490 tx_frm_index =
2491 GENERIC_NODOWNLD_NOACK_COMP_INDEX;
2492 }
2493 }
2494
2495 /*
2496 * If Dowload Complete is required
2497 * Wait for download complete
2498 */
2499 if (downld_comp_required) {
2500 /* Store Tx Comp Cb */
2501 wma_handle->tx_frm_download_comp_cb = tx_frm_download_comp_cb;
2502
2503 /* Reset the Tx Frame Complete Event */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302504 qdf_status =
Anurag Chouhance0dc992016-02-16 18:18:03 +05302505 qdf_event_reset(&wma_handle->tx_frm_download_comp_event);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002506
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302507 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002508 WMA_LOGP("%s: Event Reset failed tx comp event %x",
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302509 __func__, qdf_status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002510 goto error;
2511 }
2512 }
2513
2514 /* If the frame has to be sent at BD Rate2 inform TxRx */
2515 if (tx_flag & HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME)
2516 use_6mbps = 1;
2517
Deepak Dhamdhered97bfb32015-10-11 15:16:18 -07002518 if (wma_handle->interfaces[vdev_id].scan_info.chan_freq != 0) {
2519 chanfreq = wma_handle->interfaces[vdev_id].scan_info.chan_freq;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002520 WMA_LOGI("%s: Preauth frame on channel %d", __func__, chanfreq);
2521 } else if (pFc->subType == SIR_MAC_MGMT_PROBE_RSP) {
2522 chanfreq = wma_handle->interfaces[vdev_id].mhz;
2523 WMA_LOGI("%s: Probe response frame on channel %d", __func__,
2524 chanfreq);
2525 WMA_LOGI("%s: Probe response frame on vdev id %d", __func__,
2526 vdev_id);
2527 } else if (pFc->subType == SIR_MAC_MGMT_ACTION) {
2528 chanfreq = channel_freq;
2529 } else {
2530 chanfreq = 0;
2531 }
2532 if (pMac->fEnableDebugLog & 0x1) {
2533 if ((pFc->type == SIR_MAC_MGMT_FRAME) &&
2534 (pFc->subType != SIR_MAC_MGMT_PROBE_REQ) &&
2535 (pFc->subType != SIR_MAC_MGMT_PROBE_RSP)) {
2536 WMA_LOGE("TX MGMT - Type %hu, SubType %hu seq_num[%d]",
2537 pFc->type, pFc->subType,
2538 ((mHdr->seqControl.seqNumHi << 4) |
2539 mHdr->seqControl.seqNumLo));
2540 }
2541 }
2542
2543 if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
2544 WMI_SERVICE_MGMT_TX_WMI)) {
Govind Singh09c3b492016-03-08 16:05:14 +05302545 mgmt_param.tx_frame = tx_frame;
2546 mgmt_param.frm_len = frmLen;
2547 mgmt_param.vdev_id = vdev_id;
2548 mgmt_param.tx_complete_cb = tx_frm_download_comp_cb;
2549 mgmt_param.tx_ota_post_proc_cb = tx_frm_ota_comp_cb;
2550 mgmt_param.chanfreq = chanfreq;
2551 mgmt_param.wmi_desc = wmi_desc_get(wma_handle);
2552 mgmt_param.pdata = pData;
2553 mgmt_param.qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
2554
2555 status = wmi_mgmt_unified_cmd_send(wma_handle->wmi_handle,
2556 &mgmt_param);
2557 if (status)
2558 wmi_desc_put(wma_handle, mgmt_param.wmi_desc);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002559 } else {
2560 /* Hand over the Tx Mgmt frame to TxRx */
Dhanashri Atre12a08392016-02-17 13:10:34 -08002561 status = ol_txrx_mgmt_send_ext(txrx_vdev, tx_frame,
2562 tx_frm_index, use_6mbps, chanfreq);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002563 }
2564
2565 /*
2566 * Failed to send Tx Mgmt Frame
2567 */
2568 if (status) {
2569 /* Call Download Cb so that umac can free the buffer */
2570 if (tx_frm_download_comp_cb)
2571 tx_frm_download_comp_cb(wma_handle->mac_context,
2572 tx_frame,
2573 WMA_TX_FRAME_BUFFER_FREE);
2574 WMA_LOGP("%s: Failed to send Mgmt Frame", __func__);
2575 goto error;
2576 }
2577
2578 if (!tx_frm_download_comp_cb)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302579 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002580
2581 /*
2582 * Wait for Download Complete
2583 * if required
2584 */
2585 if (downld_comp_required) {
2586 /*
2587 * Wait for Download Complete
2588 * @ Integrated : Dxe Complete
2589 * @ Discrete : Target Download Complete
2590 */
Anurag Chouhance0dc992016-02-16 18:18:03 +05302591 qdf_status =
2592 qdf_wait_single_event(&wma_handle->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002593 tx_frm_download_comp_event,
2594 WMA_TX_FRAME_COMPLETE_TIMEOUT);
2595
Anurag Chouhance0dc992016-02-16 18:18:03 +05302596 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002597 WMA_LOGP("Wait Event failed txfrm_comp_event");
2598 /*
2599 * @Integrated: Something Wrong with Dxe
2600 * TODO: Some Debug Code
2601 * Here We need to trigger SSR since
2602 * since system went into a bad state where
2603 * we didn't get Download Complete for almost
2604 * WMA_TX_FRAME_COMPLETE_TIMEOUT (1 sec)
2605 */
2606 }
2607 } else {
2608 /*
2609 * For Low Latency Devices
2610 * Call the download complete
2611 * callback once the frame is successfully
2612 * given to txrx module
2613 */
2614 tx_frm_download_comp_cb(wma_handle->mac_context, tx_frame,
2615 WMA_TX_FRAME_BUFFER_NO_FREE);
2616 }
2617
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302618 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002619
2620error:
2621 wma_handle->tx_frm_download_comp_cb = NULL;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302622 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002623}
2624
2625/**
2626 * wma_ds_peek_rx_packet_info() - peek rx packet info
2627 * @pkt: packet
2628 * @pkt_meta: packet meta
2629 * @bSwap: byte swap
2630 *
2631 * Function fills the rx packet meta info from the the cds packet
2632 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05302633 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002634 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302635QDF_STATUS wma_ds_peek_rx_packet_info(cds_pkt_t *pkt, void **pkt_meta,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002636 bool bSwap)
2637{
2638 /* Sanity Check */
2639 if (pkt == NULL) {
2640 WMA_LOGE("wma:Invalid parameter sent on wma_peek_rx_pkt_info");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302641 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002642 }
2643
2644 *pkt_meta = &(pkt->pkt_meta);
2645
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302646 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002647}
2648
2649/**
2650 * ol_rx_err() - ol rx err handler
2651 * @pdev: ol pdev
2652 * @vdev_id: vdev id
2653 * @peer_mac_addr: peer mac address
2654 * @tid: TID
2655 * @tsf32: TSF
2656 * @err_type: error type
2657 * @rx_frame: rx frame
2658 * @pn: PN Number
2659 * @key_id: key id
2660 *
2661 * This function handles rx error and send MIC error failure to LIM
2662 *
2663 * Return: none
2664 */
2665void ol_rx_err(ol_pdev_handle pdev, uint8_t vdev_id,
2666 uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302667 enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002668 uint64_t *pn, uint8_t key_id)
2669{
Anurag Chouhan6d760662016-02-20 16:05:43 +05302670 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002671 tpSirSmeMicFailureInd mic_err_ind;
2672 struct ether_header *eth_hdr;
2673 cds_msg_t cds_msg;
2674
2675 if (NULL == wma) {
2676 WMA_LOGE("%s: Failed to get wma", __func__);
2677 return;
2678 }
2679
2680 if (err_type != OL_RX_ERR_TKIP_MIC)
2681 return;
2682
Nirav Shahcbc6d722016-03-01 16:24:53 +05302683 if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002684 return;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302685 eth_hdr = (struct ether_header *)qdf_nbuf_data(rx_frame);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302686 mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002687 if (!mic_err_ind) {
2688 WMA_LOGE("%s: Failed to allocate memory for MIC indication message",
2689 __func__);
2690 return;
2691 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302692 qdf_mem_set((void *)mic_err_ind, sizeof(*mic_err_ind), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002693
2694 mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND;
2695 mic_err_ind->length = sizeof(*mic_err_ind);
2696 mic_err_ind->sessionId = vdev_id;
Anurag Chouhanc5548422016-02-24 18:33:27 +05302697 qdf_copy_macaddr(&mic_err_ind->bssId,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302698 (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302699 qdf_mem_copy(mic_err_ind->info.taMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302700 (struct qdf_mac_addr *) peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002701 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302702 qdf_mem_copy(mic_err_ind->info.srcMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302703 (struct qdf_mac_addr *) eth_hdr->ether_shost,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002704 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302705 qdf_mem_copy(mic_err_ind->info.dstMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302706 (struct qdf_mac_addr *) eth_hdr->ether_dhost,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002707 sizeof(tSirMacAddr));
2708 mic_err_ind->info.keyId = key_id;
2709 mic_err_ind->info.multicast =
2710 IEEE80211_IS_MULTICAST(eth_hdr->ether_dhost);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302711 qdf_mem_copy(mic_err_ind->info.TSC, pn, SIR_CIPHER_SEQ_CTR_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002712
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302713 qdf_mem_set(&cds_msg, sizeof(cds_msg_t), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002714 cds_msg.type = eWNI_SME_MIC_FAILURE_IND;
2715 cds_msg.bodyptr = (void *) mic_err_ind;
2716
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302717 if (QDF_STATUS_SUCCESS !=
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002718 cds_mq_post_message(CDS_MQ_ID_SME, (cds_msg_t *) &cds_msg)) {
2719 WMA_LOGE("%s: could not post mic failure indication to SME",
2720 __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302721 qdf_mem_free((void *)mic_err_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002722 }
2723}
2724
2725/**
2726 * wma_tx_abort() - abort tx
2727 * @vdev_id: vdev id
2728 *
2729 * In case of deauth host abort transmitting packet.
2730 *
2731 * Return: none
2732 */
2733void wma_tx_abort(uint8_t vdev_id)
2734{
2735#define PEER_ALL_TID_BITMASK 0xffffffff
2736 tp_wma_handle wma;
2737 uint32_t peer_tid_bitmap = PEER_ALL_TID_BITMASK;
2738 struct wma_txrx_node *iface;
Govind Singhd76a5b02016-03-08 15:12:14 +05302739 struct peer_flush_params param = {0};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002740
Anurag Chouhan6d760662016-02-20 16:05:43 +05302741 wma = cds_get_context(QDF_MODULE_ID_WMA);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002742 if (NULL == wma) {
2743 WMA_LOGE("%s: wma is NULL", __func__);
2744 return;
2745 }
2746
2747 iface = &wma->interfaces[vdev_id];
2748 if (!iface->handle) {
2749 WMA_LOGE("%s: Failed to get iface handle: %p",
2750 __func__, iface->handle);
2751 return;
2752 }
2753 WMA_LOGA("%s: vdevid %d bssid %pM", __func__, vdev_id, iface->bssid);
2754 iface->pause_bitmap |= (1 << PAUSE_TYPE_HOST);
2755 ol_txrx_vdev_pause(iface->handle, OL_TXQ_PAUSE_REASON_TX_ABORT);
2756
2757 /* Flush all TIDs except MGMT TID for this peer in Target */
2758 peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID);
Govind Singhd76a5b02016-03-08 15:12:14 +05302759 param.peer_tid_bitmap = peer_tid_bitmap;
2760 param.vdev_id = vdev_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002761 wmi_unified_peer_flush_tids_send(wma->wmi_handle, iface->bssid,
Govind Singhd76a5b02016-03-08 15:12:14 +05302762 &param);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002763}
2764
2765#if defined(FEATURE_LRO)
2766/**
2767 * wma_lro_config_cmd() - process the LRO config command
2768 * @wma: Pointer to WMA handle
2769 * @wma_lro_cmd: Pointer to LRO configuration parameters
2770 *
2771 * This function sends down the LRO configuration parameters to
2772 * the firmware to enable LRO, sets the TCP flags and sets the
2773 * seed values for the toeplitz hash generation
2774 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302775 * Return: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002776 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302777QDF_STATUS wma_lro_config_cmd(tp_wma_handle wma_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002778 struct wma_lro_config_cmd_t *wma_lro_cmd)
2779{
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05302780 struct wmi_lro_config_cmd_t wmi_lro_cmd = {0};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002781
2782 if (NULL == wma_handle || NULL == wma_lro_cmd) {
2783 WMA_LOGE("wma_lro_config_cmd': invalid input!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302784 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002785 }
2786
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05302787 wmi_lro_cmd.lro_enable = wma_lro_cmd->lro_enable;
2788 wmi_lro_cmd.tcp_flag = wma_lro_cmd->tcp_flag;
2789 wmi_lro_cmd.tcp_flag_mask = wma_lro_cmd->tcp_flag_mask;
2790 qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv4,
2791 wma_lro_cmd->toeplitz_hash_ipv4,
2792 LRO_IPV4_SEED_ARR_SZ * sizeof(uint32_t));
2793 qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv6,
2794 wma_lro_cmd->toeplitz_hash_ipv6,
2795 LRO_IPV6_SEED_ARR_SZ * sizeof(uint32_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002796
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05302797 return wmi_unified_lro_config_cmd(wma_handle->wmi_handle,
2798 &wmi_lro_cmd);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002799}
2800#endif
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002801
2802/**
2803 * wma_indicate_err() - indicate an error to the protocol stack
2804 * @err_type: error type
2805 * @err_info: information associated with the error
2806 *
2807 * This function indicates an error encountered in the data path
2808 * to the protocol stack
2809 *
2810 * Return: none
2811 */
2812void
2813wma_indicate_err(
2814 enum ol_rx_err_type err_type,
2815 struct ol_error_info *err_info)
2816{
2817 switch (err_type) {
2818 case OL_RX_ERR_TKIP_MIC:
2819 {
Anurag Chouhan6d760662016-02-20 16:05:43 +05302820 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002821 tpSirSmeMicFailureInd mic_err_ind;
2822 cds_msg_t cds_msg;
2823 uint8_t vdev_id;
2824
2825 if (NULL == wma) {
2826 WMA_LOGE("%s: Failed to get wma context",
2827 __func__);
2828 return;
2829 }
2830
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302831 mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind));
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002832 if (!mic_err_ind) {
2833 WMA_LOGE("%s: MIC indication mem alloc failed",
2834 __func__);
2835 return;
2836 }
2837
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302838 qdf_mem_set((void *) mic_err_ind, 0,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002839 sizeof(*mic_err_ind));
2840 mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND;
2841 mic_err_ind->length = sizeof(*mic_err_ind);
2842 vdev_id = err_info->u.mic_err.vdev_id;
Anurag Chouhanc5548422016-02-24 18:33:27 +05302843 qdf_copy_macaddr(&mic_err_ind->bssId,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302844 (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002845 WMA_LOGE("MIC error: BSSID:%02x:%02x:%02x:%02x:%02x:%02x\n",
2846 mic_err_ind->bssId.bytes[0], mic_err_ind->bssId.bytes[1],
2847 mic_err_ind->bssId.bytes[2], mic_err_ind->bssId.bytes[3],
2848 mic_err_ind->bssId.bytes[4], mic_err_ind->bssId.bytes[5]);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302849 qdf_mem_copy(mic_err_ind->info.taMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302850 (struct qdf_mac_addr *) err_info->u.mic_err.ta,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002851 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302852 qdf_mem_copy(mic_err_ind->info.srcMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302853 (struct qdf_mac_addr *) err_info->u.mic_err.sa,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002854 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302855 qdf_mem_copy(mic_err_ind->info.dstMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302856 (struct qdf_mac_addr *) err_info->u.mic_err.da,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002857 sizeof(tSirMacAddr));
2858 mic_err_ind->info.keyId = err_info->u.mic_err.key_id;
2859 mic_err_ind->info.multicast =
2860 IEEE80211_IS_MULTICAST(err_info->u.mic_err.da);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302861 qdf_mem_copy(mic_err_ind->info.TSC,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002862 (void *)&err_info->
2863 u.mic_err.pn, SIR_CIPHER_SEQ_CTR_SIZE);
2864
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302865 qdf_mem_set(&cds_msg, sizeof(cds_msg_t), 0);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002866 cds_msg.type = eWNI_SME_MIC_FAILURE_IND;
2867 cds_msg.bodyptr = (void *) mic_err_ind;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302868 if (QDF_STATUS_SUCCESS !=
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002869 cds_mq_post_message(CDS_MQ_ID_SME,
2870 (cds_msg_t *) &cds_msg)) {
2871 WMA_LOGE("%s: mic failure ind post to SME failed",
2872 __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302873 qdf_mem_free((void *)mic_err_ind);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08002874 }
2875 break;
2876 }
2877 default:
2878 {
2879 WMA_LOGE("%s: unhandled ol error type %d", __func__, err_type);
2880 break;
2881 }
2882 }
2883}