blob: dbfce926619b44e17d645de8295c1c7f433c816f [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Houston Hoffmana2cdf222015-10-20 16:03:06 -07002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wma_data.c
30 * This file contains tx/rx and data path related functions.
31 */
32
33/* Header files */
34
35#include "wma.h"
36#include "wma_api.h"
37#include "cds_api.h"
38#include "wmi_unified_api.h"
39#include "wlan_qct_sys.h"
40#include "wni_api.h"
41#include "ani_global.h"
42#include "wmi_unified.h"
43#include "wni_cfg.h"
44#include "cfg_api.h"
45#include "ol_txrx_ctrl_api.h"
46#include "wlan_tgt_def_config.h"
47
Nirav Shahcbc6d722016-03-01 16:24:53 +053048#include "qdf_nbuf.h"
Anurag Chouhan6d760662016-02-20 16:05:43 +053049#include "qdf_types.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080050#include "ol_txrx_api.h"
Anurag Chouhan600c3a02016-03-01 10:33:54 +053051#include "qdf_mem.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080052#include "ol_txrx_types.h"
53#include "ol_txrx_peer_find.h"
54
55#include "wma_types.h"
56#include "lim_api.h"
57#include "lim_session_utils.h"
58
59#include "cds_utils.h"
60
61#if !defined(REMOVE_PKT_LOG)
62#include "pktlog_ac.h"
63#endif /* REMOVE_PKT_LOG */
64
65#include "dbglog_host.h"
66#include "csr_api.h"
67#include "ol_fw.h"
68
69#include "dfs.h"
70#include "wma_internal.h"
71
72typedef struct {
73 int32_t rate;
74 uint8_t flag;
75} wma_search_rate_t;
76
77#define WMA_MAX_OFDM_CCK_RATE_TBL_SIZE 12
78/* In ofdm_cck_rate_tbl->flag, if bit 7 is 1 it's CCK, otherwise it ofdm.
79 * Lower bit carries the ofdm/cck index for encoding the rate
80 */
81static wma_search_rate_t ofdm_cck_rate_tbl[WMA_MAX_OFDM_CCK_RATE_TBL_SIZE] = {
82 {540, 4}, /* 4: OFDM 54 Mbps */
83 {480, 0}, /* 0: OFDM 48 Mbps */
84 {360, 5}, /* 5: OFDM 36 Mbps */
85 {240, 1}, /* 1: OFDM 24 Mbps */
86 {180, 6}, /* 6: OFDM 18 Mbps */
87 {120, 2}, /* 2: OFDM 12 Mbps */
88 {110, (1 << 7)}, /* 0: CCK 11 Mbps Long */
89 {90, 7}, /* 7: OFDM 9 Mbps */
90 {60, 3}, /* 3: OFDM 6 Mbps */
91 {55, ((1 << 7) | 1)}, /* 1: CCK 5.5 Mbps Long */
92 {20, ((1 << 7) | 2)}, /* 2: CCK 2 Mbps Long */
93 {10, ((1 << 7) | 3)} /* 3: CCK 1 Mbps Long */
94};
95
96#define WMA_MAX_VHT20_RATE_TBL_SIZE 9
97/* In vht20_400ns_rate_tbl flag carries the mcs index for encoding the rate */
98static wma_search_rate_t vht20_400ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
99 {867, 8}, /* MCS8 1SS short GI */
100 {722, 7}, /* MCS7 1SS short GI */
101 {650, 6}, /* MCS6 1SS short GI */
102 {578, 5}, /* MCS5 1SS short GI */
103 {433, 4}, /* MCS4 1SS short GI */
104 {289, 3}, /* MCS3 1SS short GI */
105 {217, 2}, /* MCS2 1SS short GI */
106 {144, 1}, /* MCS1 1SS short GI */
107 {72, 0} /* MCS0 1SS short GI */
108};
109
110/* In vht20_800ns_rate_tbl flag carries the mcs index for encoding the rate */
111static wma_search_rate_t vht20_800ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
112 {780, 8}, /* MCS8 1SS long GI */
113 {650, 7}, /* MCS7 1SS long GI */
114 {585, 6}, /* MCS6 1SS long GI */
115 {520, 5}, /* MCS5 1SS long GI */
116 {390, 4}, /* MCS4 1SS long GI */
117 {260, 3}, /* MCS3 1SS long GI */
118 {195, 2}, /* MCS2 1SS long GI */
119 {130, 1}, /* MCS1 1SS long GI */
120 {65, 0} /* MCS0 1SS long GI */
121};
122
123#define WMA_MAX_VHT40_RATE_TBL_SIZE 10
124/* In vht40_400ns_rate_tbl flag carries the mcs index for encoding the rate */
125static wma_search_rate_t vht40_400ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
126 {2000, 9}, /* MCS9 1SS short GI */
127 {1800, 8}, /* MCS8 1SS short GI */
128 {1500, 7}, /* MCS7 1SS short GI */
129 {1350, 6}, /* MCS6 1SS short GI */
130 {1200, 5}, /* MCS5 1SS short GI */
131 {900, 4}, /* MCS4 1SS short GI */
132 {600, 3}, /* MCS3 1SS short GI */
133 {450, 2}, /* MCS2 1SS short GI */
134 {300, 1}, /* MCS1 1SS short GI */
135 {150, 0}, /* MCS0 1SS short GI */
136};
137
138static wma_search_rate_t vht40_800ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
139 {1800, 9}, /* MCS9 1SS long GI */
140 {1620, 8}, /* MCS8 1SS long GI */
141 {1350, 7}, /* MCS7 1SS long GI */
142 {1215, 6}, /* MCS6 1SS long GI */
143 {1080, 5}, /* MCS5 1SS long GI */
144 {810, 4}, /* MCS4 1SS long GI */
145 {540, 3}, /* MCS3 1SS long GI */
146 {405, 2}, /* MCS2 1SS long GI */
147 {270, 1}, /* MCS1 1SS long GI */
148 {135, 0} /* MCS0 1SS long GI */
149};
150
151#define WMA_MAX_VHT80_RATE_TBL_SIZE 10
152static wma_search_rate_t vht80_400ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
153 {4333, 9}, /* MCS9 1SS short GI */
154 {3900, 8}, /* MCS8 1SS short GI */
155 {3250, 7}, /* MCS7 1SS short GI */
156 {2925, 6}, /* MCS6 1SS short GI */
157 {2600, 5}, /* MCS5 1SS short GI */
158 {1950, 4}, /* MCS4 1SS short GI */
159 {1300, 3}, /* MCS3 1SS short GI */
160 {975, 2}, /* MCS2 1SS short GI */
161 {650, 1}, /* MCS1 1SS short GI */
162 {325, 0} /* MCS0 1SS short GI */
163};
164
165static wma_search_rate_t vht80_800ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
166 {3900, 9}, /* MCS9 1SS long GI */
167 {3510, 8}, /* MCS8 1SS long GI */
168 {2925, 7}, /* MCS7 1SS long GI */
169 {2633, 6}, /* MCS6 1SS long GI */
170 {2340, 5}, /* MCS5 1SS long GI */
171 {1755, 4}, /* MCS4 1SS long GI */
172 {1170, 3}, /* MCS3 1SS long GI */
173 {878, 2}, /* MCS2 1SS long GI */
174 {585, 1}, /* MCS1 1SS long GI */
175 {293, 0} /* MCS0 1SS long GI */
176};
177
178#define WMA_MAX_HT20_RATE_TBL_SIZE 8
179static wma_search_rate_t ht20_400ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
180 {722, 7}, /* MCS7 1SS short GI */
181 {650, 6}, /* MCS6 1SS short GI */
182 {578, 5}, /* MCS5 1SS short GI */
183 {433, 4}, /* MCS4 1SS short GI */
184 {289, 3}, /* MCS3 1SS short GI */
185 {217, 2}, /* MCS2 1SS short GI */
186 {144, 1}, /* MCS1 1SS short GI */
187 {72, 0} /* MCS0 1SS short GI */
188};
189
190static wma_search_rate_t ht20_800ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
191 {650, 7}, /* MCS7 1SS long GI */
192 {585, 6}, /* MCS6 1SS long GI */
193 {520, 5}, /* MCS5 1SS long GI */
194 {390, 4}, /* MCS4 1SS long GI */
195 {260, 3}, /* MCS3 1SS long GI */
196 {195, 2}, /* MCS2 1SS long GI */
197 {130, 1}, /* MCS1 1SS long GI */
198 {65, 0} /* MCS0 1SS long GI */
199};
200
201#define WMA_MAX_HT40_RATE_TBL_SIZE 8
202static wma_search_rate_t ht40_400ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
203 {1500, 7}, /* MCS7 1SS short GI */
204 {1350, 6}, /* MCS6 1SS short GI */
205 {1200, 5}, /* MCS5 1SS short GI */
206 {900, 4}, /* MCS4 1SS short GI */
207 {600, 3}, /* MCS3 1SS short GI */
208 {450, 2}, /* MCS2 1SS short GI */
209 {300, 1}, /* MCS1 1SS short GI */
210 {150, 0} /* MCS0 1SS short GI */
211};
212
213static wma_search_rate_t ht40_800ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
214 {1350, 7}, /* MCS7 1SS long GI */
215 {1215, 6}, /* MCS6 1SS long GI */
216 {1080, 5}, /* MCS5 1SS long GI */
217 {810, 4}, /* MCS4 1SS long GI */
218 {540, 3}, /* MCS3 1SS long GI */
219 {405, 2}, /* MCS2 1SS long GI */
220 {270, 1}, /* MCS1 1SS long GI */
221 {135, 0} /* MCS0 1SS long GI */
222};
223
224/**
225 * wma_bin_search_rate() - binary search function to find rate
226 * @tbl: rate table
227 * @tbl_size: table size
228 * @mbpsx10_rate: return mbps rate
229 * @ret_flag: return flag
230 *
231 * Return: none
232 */
233static void wma_bin_search_rate(wma_search_rate_t *tbl, int32_t tbl_size,
234 int32_t *mbpsx10_rate, uint8_t *ret_flag)
235{
236 int32_t upper, lower, mid;
237
238 /* the table is descenting. index holds the largest value and the
239 * bottom index holds the smallest value */
240
241 upper = 0; /* index 0 */
242 lower = tbl_size - 1; /* last index */
243
244 if (*mbpsx10_rate >= tbl[upper].rate) {
245 /* use the largest rate */
246 *mbpsx10_rate = tbl[upper].rate;
247 *ret_flag = tbl[upper].flag;
248 return;
249 } else if (*mbpsx10_rate <= tbl[lower].rate) {
250 /* use the smallest rate */
251 *mbpsx10_rate = tbl[lower].rate;
252 *ret_flag = tbl[lower].flag;
253 return;
254 }
255 /* now we do binery search to get the floor value */
256 while (lower - upper > 1) {
257 mid = (upper + lower) >> 1;
258 if (*mbpsx10_rate == tbl[mid].rate) {
259 /* found the exact match */
260 *mbpsx10_rate = tbl[mid].rate;
261 *ret_flag = tbl[mid].flag;
262 return;
263 } else {
264 /* not found. if mid's rate is larger than input move
265 * upper to mid. If mid's rate is larger than input
266 * move lower to mid.
267 */
268 if (*mbpsx10_rate > tbl[mid].rate)
269 lower = mid;
270 else
271 upper = mid;
272 }
273 }
274 /* after the bin search the index is the ceiling of rate */
275 *mbpsx10_rate = tbl[upper].rate;
276 *ret_flag = tbl[upper].flag;
277 return;
278}
279
280/**
281 * wma_fill_ofdm_cck_mcast_rate() - fill ofdm cck mcast rate
282 * @mbpsx10_rate: mbps rates
283 * @nss: nss
284 * @rate: rate
285 *
286 * Return: CDF status
287 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530288static QDF_STATUS wma_fill_ofdm_cck_mcast_rate(int32_t mbpsx10_rate,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800289 uint8_t nss, uint8_t *rate)
290{
291 uint8_t idx = 0;
292 wma_bin_search_rate(ofdm_cck_rate_tbl, WMA_MAX_OFDM_CCK_RATE_TBL_SIZE,
293 &mbpsx10_rate, &idx);
294
295 /* if bit 7 is set it uses CCK */
296 if (idx & 0x80)
297 *rate |= (1 << 6) | (idx & 0xF); /* set bit 6 to 1 for CCK */
298 else
299 *rate |= (idx & 0xF);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530300 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301}
302
303/**
304 * wma_set_ht_vht_mcast_rate() - set ht/vht mcast rate
305 * @shortgi: short gaurd interval
306 * @mbpsx10_rate: mbps rates
307 * @sgi_idx: shortgi index
308 * @sgi_rate: shortgi rate
309 * @lgi_idx: longgi index
310 * @lgi_rate: longgi rate
311 * @premable: preamble
312 * @rate: rate
313 * @streaming_rate: streaming rate
314 *
315 * Return: none
316 */
317static void wma_set_ht_vht_mcast_rate(uint32_t shortgi, int32_t mbpsx10_rate,
318 uint8_t sgi_idx, int32_t sgi_rate,
319 uint8_t lgi_idx, int32_t lgi_rate,
320 uint8_t premable, uint8_t *rate,
321 int32_t *streaming_rate)
322{
323 if (shortgi == 0) {
324 *rate |= (premable << 6) | (lgi_idx & 0xF);
325 *streaming_rate = lgi_rate;
326 } else {
327 *rate |= (premable << 6) | (sgi_idx & 0xF);
328 *streaming_rate = sgi_rate;
329 }
330}
331
332/**
333 * wma_fill_ht20_mcast_rate() - fill ht20 mcast rate
334 * @shortgi: short gaurd interval
335 * @mbpsx10_rate: mbps rates
336 * @nss: nss
337 * @rate: rate
338 * @streaming_rate: streaming rate
339 *
340 * Return: CDF status
341 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530342static QDF_STATUS wma_fill_ht20_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800343 int32_t mbpsx10_rate, uint8_t nss,
344 uint8_t *rate,
345 int32_t *streaming_rate)
346{
347 uint8_t sgi_idx = 0, lgi_idx = 0;
348 int32_t sgi_rate, lgi_rate;
349 if (nss == 1)
350 mbpsx10_rate = mbpsx10_rate >> 1;
351
352 sgi_rate = mbpsx10_rate;
353 lgi_rate = mbpsx10_rate;
354 if (shortgi)
355 wma_bin_search_rate(ht20_400ns_rate_tbl,
356 WMA_MAX_HT20_RATE_TBL_SIZE, &sgi_rate,
357 &sgi_idx);
358 else
359 wma_bin_search_rate(ht20_800ns_rate_tbl,
360 WMA_MAX_HT20_RATE_TBL_SIZE, &lgi_rate,
361 &lgi_idx);
362
363 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
364 lgi_idx, lgi_rate, 2, rate, streaming_rate);
365 if (nss == 1)
366 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530367 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800368}
369
370/**
371 * wma_fill_ht40_mcast_rate() - fill ht40 mcast rate
372 * @shortgi: short gaurd interval
373 * @mbpsx10_rate: mbps rates
374 * @nss: nss
375 * @rate: rate
376 * @streaming_rate: streaming rate
377 *
378 * Return: CDF status
379 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530380static QDF_STATUS wma_fill_ht40_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800381 int32_t mbpsx10_rate, uint8_t nss,
382 uint8_t *rate,
383 int32_t *streaming_rate)
384{
385 uint8_t sgi_idx = 0, lgi_idx = 0;
386 int32_t sgi_rate, lgi_rate;
387
388 /* for 2x2 divide the rate by 2 */
389 if (nss == 1)
390 mbpsx10_rate = mbpsx10_rate >> 1;
391
392 sgi_rate = mbpsx10_rate;
393 lgi_rate = mbpsx10_rate;
394 if (shortgi)
395 wma_bin_search_rate(ht40_400ns_rate_tbl,
396 WMA_MAX_HT40_RATE_TBL_SIZE, &sgi_rate,
397 &sgi_idx);
398 else
399 wma_bin_search_rate(ht40_800ns_rate_tbl,
400 WMA_MAX_HT40_RATE_TBL_SIZE, &lgi_rate,
401 &lgi_idx);
402
403 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
404 lgi_idx, lgi_rate, 2, rate, streaming_rate);
405
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530406 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800407}
408
409/**
410 * wma_fill_vht20_mcast_rate() - fill vht20 mcast rate
411 * @shortgi: short gaurd interval
412 * @mbpsx10_rate: mbps rates
413 * @nss: nss
414 * @rate: rate
415 * @streaming_rate: streaming rate
416 *
417 * Return: CDF status
418 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530419static QDF_STATUS wma_fill_vht20_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800420 int32_t mbpsx10_rate, uint8_t nss,
421 uint8_t *rate,
422 int32_t *streaming_rate)
423{
424 uint8_t sgi_idx = 0, lgi_idx = 0;
425 int32_t sgi_rate, lgi_rate;
426
427 /* for 2x2 divide the rate by 2 */
428 if (nss == 1)
429 mbpsx10_rate = mbpsx10_rate >> 1;
430
431 sgi_rate = mbpsx10_rate;
432 lgi_rate = mbpsx10_rate;
433 if (shortgi)
434 wma_bin_search_rate(vht20_400ns_rate_tbl,
435 WMA_MAX_VHT20_RATE_TBL_SIZE, &sgi_rate,
436 &sgi_idx);
437 else
438 wma_bin_search_rate(vht20_800ns_rate_tbl,
439 WMA_MAX_VHT20_RATE_TBL_SIZE, &lgi_rate,
440 &lgi_idx);
441
442 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
443 lgi_idx, lgi_rate, 3, rate, streaming_rate);
444 if (nss == 1)
445 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530446 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800447}
448
449/**
450 * wma_fill_vht40_mcast_rate() - fill vht40 mcast rate
451 * @shortgi: short gaurd interval
452 * @mbpsx10_rate: mbps rates
453 * @nss: nss
454 * @rate: rate
455 * @streaming_rate: streaming rate
456 *
457 * Return: CDF status
458 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530459static QDF_STATUS wma_fill_vht40_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800460 int32_t mbpsx10_rate, uint8_t nss,
461 uint8_t *rate,
462 int32_t *streaming_rate)
463{
464 uint8_t sgi_idx = 0, lgi_idx = 0;
465 int32_t sgi_rate, lgi_rate;
466
467 /* for 2x2 divide the rate by 2 */
468 if (nss == 1)
469 mbpsx10_rate = mbpsx10_rate >> 1;
470
471 sgi_rate = mbpsx10_rate;
472 lgi_rate = mbpsx10_rate;
473 if (shortgi)
474 wma_bin_search_rate(vht40_400ns_rate_tbl,
475 WMA_MAX_VHT40_RATE_TBL_SIZE, &sgi_rate,
476 &sgi_idx);
477 else
478 wma_bin_search_rate(vht40_800ns_rate_tbl,
479 WMA_MAX_VHT40_RATE_TBL_SIZE, &lgi_rate,
480 &lgi_idx);
481
482 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate,
483 sgi_idx, sgi_rate, lgi_idx, lgi_rate,
484 3, rate, streaming_rate);
485 if (nss == 1)
486 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530487 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800488}
489
490/**
491 * wma_fill_vht80_mcast_rate() - fill vht80 mcast rate
492 * @shortgi: short gaurd interval
493 * @mbpsx10_rate: mbps rates
494 * @nss: nss
495 * @rate: rate
496 * @streaming_rate: streaming rate
497 *
498 * Return: CDF status
499 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530500static QDF_STATUS wma_fill_vht80_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800501 int32_t mbpsx10_rate, uint8_t nss,
502 uint8_t *rate,
503 int32_t *streaming_rate)
504{
505 uint8_t sgi_idx = 0, lgi_idx = 0;
506 int32_t sgi_rate, lgi_rate;
507
508 /* for 2x2 divide the rate by 2 */
509 if (nss == 1)
510 mbpsx10_rate = mbpsx10_rate >> 1;
511
512 sgi_rate = mbpsx10_rate;
513 lgi_rate = mbpsx10_rate;
514 if (shortgi)
515 wma_bin_search_rate(vht80_400ns_rate_tbl,
516 WMA_MAX_VHT80_RATE_TBL_SIZE, &sgi_rate,
517 &sgi_idx);
518 else
519 wma_bin_search_rate(vht80_800ns_rate_tbl,
520 WMA_MAX_VHT80_RATE_TBL_SIZE, &lgi_rate,
521 &lgi_idx);
522
523 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
524 lgi_idx, lgi_rate, 3, rate, streaming_rate);
525 if (nss == 1)
526 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530527 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800528}
529
530/**
531 * wma_fill_ht_mcast_rate() - fill ht mcast rate
532 * @shortgi: short gaurd interval
533 * @chwidth: channel width
534 * @chanmode: channel mode
535 * @mhz: frequency
536 * @mbpsx10_rate: mbps rates
537 * @nss: nss
538 * @rate: rate
539 * @streaming_rate: streaming rate
540 *
541 * Return: CDF status
542 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530543static QDF_STATUS wma_fill_ht_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800544 uint32_t chwidth, int32_t mbpsx10_rate,
545 uint8_t nss, WLAN_PHY_MODE chanmode,
546 uint8_t *rate,
547 int32_t *streaming_rate)
548{
549 int32_t ret = 0;
550
551 *streaming_rate = 0;
552 if (chwidth == 0)
553 ret = wma_fill_ht20_mcast_rate(shortgi, mbpsx10_rate,
554 nss, rate, streaming_rate);
555 else if (chwidth == 1)
556 ret = wma_fill_ht40_mcast_rate(shortgi, mbpsx10_rate,
557 nss, rate, streaming_rate);
558 else
559 WMA_LOGE("%s: Error, Invalid chwidth enum %d", __func__,
560 chwidth);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530561 return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800562}
563
564/**
565 * wma_fill_vht_mcast_rate() - fill vht mcast rate
566 * @shortgi: short gaurd interval
567 * @chwidth: channel width
568 * @chanmode: channel mode
569 * @mhz: frequency
570 * @mbpsx10_rate: mbps rates
571 * @nss: nss
572 * @rate: rate
573 * @streaming_rate: streaming rate
574 *
575 * Return: CDF status
576 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530577static QDF_STATUS wma_fill_vht_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800578 uint32_t chwidth,
579 int32_t mbpsx10_rate, uint8_t nss,
580 WLAN_PHY_MODE chanmode,
581 uint8_t *rate,
582 int32_t *streaming_rate)
583{
584 int32_t ret = 0;
585
586 *streaming_rate = 0;
587 if (chwidth == 0)
588 ret = wma_fill_vht20_mcast_rate(shortgi, mbpsx10_rate, nss,
589 rate, streaming_rate);
590 else if (chwidth == 1)
591 ret = wma_fill_vht40_mcast_rate(shortgi, mbpsx10_rate, nss,
592 rate, streaming_rate);
593 else if (chwidth == 2)
594 ret = wma_fill_vht80_mcast_rate(shortgi, mbpsx10_rate, nss,
595 rate, streaming_rate);
596 else
597 WMA_LOGE("%s: chwidth enum %d not supported",
598 __func__, chwidth);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530599 return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800600}
601
602#define WMA_MCAST_1X1_CUT_OFF_RATE 2000
603/**
604 * wma_encode_mc_rate() - fill mc rates
605 * @shortgi: short gaurd interval
606 * @chwidth: channel width
607 * @chanmode: channel mode
608 * @mhz: frequency
609 * @mbpsx10_rate: mbps rates
610 * @nss: nss
611 * @rate: rate
612 *
613 * Return: CDF status
614 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530615static QDF_STATUS wma_encode_mc_rate(uint32_t shortgi, uint32_t chwidth,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800616 WLAN_PHY_MODE chanmode, A_UINT32 mhz,
617 int32_t mbpsx10_rate, uint8_t nss,
618 uint8_t *rate)
619{
620 int32_t ret = 0;
621
622 /* nss input value: 0 - 1x1; 1 - 2x2; 2 - 3x3
623 * the phymode selection is based on following assumption:
624 * (1) if the app specifically requested 1x1 or 2x2 we hornor it
625 * (2) if mbpsx10_rate <= 540: always use BG
626 * (3) 540 < mbpsx10_rate <= 2000: use 1x1 HT/VHT
627 * (4) 2000 < mbpsx10_rate: use 2x2 HT/VHT
628 */
629 WMA_LOGE("%s: Input: nss = %d, chanmode = %d, "
630 "mbpsx10 = 0x%x, chwidth = %d, shortgi = %d",
631 __func__, nss, chanmode, mbpsx10_rate, chwidth, shortgi);
632 if ((mbpsx10_rate & 0x40000000) && nss > 0) {
633 /* bit 30 indicates user inputed nss,
634 * bit 28 and 29 used to encode nss
635 */
636 uint8_t user_nss = (mbpsx10_rate & 0x30000000) >> 28;
637
638 nss = (user_nss < nss) ? user_nss : nss;
639 /* zero out bits 19 - 21 to recover the actual rate */
640 mbpsx10_rate &= ~0x70000000;
641 } else if (mbpsx10_rate <= WMA_MCAST_1X1_CUT_OFF_RATE) {
642 /* if the input rate is less or equal to the
643 * 1x1 cutoff rate we use 1x1 only
644 */
645 nss = 0;
646 }
647 /* encode NSS bits (bit 4, bit 5) */
648 *rate = (nss & 0x3) << 4;
649 /* if mcast input rate exceeds the ofdm/cck max rate 54mpbs
650 * we try to choose best ht/vht mcs rate
651 */
652 if (540 < mbpsx10_rate) {
653 /* cannot use ofdm/cck, choose closest ht/vht mcs rate */
654 uint8_t rate_ht = *rate;
655 uint8_t rate_vht = *rate;
656 int32_t stream_rate_ht = 0;
657 int32_t stream_rate_vht = 0;
658 int32_t stream_rate = 0;
659
660 ret = wma_fill_ht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
661 nss, chanmode, &rate_ht,
662 &stream_rate_ht);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530663 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800664 stream_rate_ht = 0;
665 }
666 if (mhz < WMA_2_4_GHZ_MAX_FREQ) {
667 /* not in 5 GHZ frequency */
668 *rate = rate_ht;
669 stream_rate = stream_rate_ht;
670 goto ht_vht_done;
671 }
672 /* capable doing 11AC mcast so that search vht tables */
673 ret = wma_fill_vht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
674 nss, chanmode, &rate_vht,
675 &stream_rate_vht);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530676 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800677 if (stream_rate_ht != 0)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530678 ret = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800679 *rate = rate_ht;
680 stream_rate = stream_rate_ht;
681 goto ht_vht_done;
682 }
683 if (stream_rate_ht == 0) {
684 /* only vht rate available */
685 *rate = rate_vht;
686 stream_rate = stream_rate_vht;
687 } else {
688 /* set ht as default first */
689 *rate = rate_ht;
690 stream_rate = stream_rate_ht;
691 if (stream_rate < mbpsx10_rate) {
692 if (mbpsx10_rate <= stream_rate_vht ||
693 stream_rate < stream_rate_vht) {
694 *rate = rate_vht;
695 stream_rate = stream_rate_vht;
696 }
697 } else {
698 if (stream_rate_vht >= mbpsx10_rate &&
699 stream_rate_vht < stream_rate) {
700 *rate = rate_vht;
701 stream_rate = stream_rate_vht;
702 }
703 }
704 }
705ht_vht_done:
706 WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, "
707 "freq = %d, input_rate = %d, chwidth = %d "
708 "rate = 0x%x, streaming_rate = %d",
709 __func__, nss, chanmode, mhz,
710 mbpsx10_rate, chwidth, *rate, stream_rate);
711 } else {
712 if (mbpsx10_rate > 0)
713 ret = wma_fill_ofdm_cck_mcast_rate(mbpsx10_rate,
714 nss, rate);
715 else
716 *rate = 0xFF;
717
718 WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, "
719 "input_rate = %d, rate = 0x%x",
720 __func__, nss, chanmode, mbpsx10_rate, *rate);
721 }
722 return ret;
723}
724
725/**
726 * wma_set_bss_rate_flags() - set rate flags based on BSS capability
727 * @iface: txrx_node ctx
728 * @add_bss: add_bss params
729 *
730 * Return: none
731 */
732void wma_set_bss_rate_flags(struct wma_txrx_node *iface,
733 tpAddBssParams add_bss)
734{
735 iface->rate_flags = 0;
736
737 if (add_bss->vhtCapable) {
738 if (add_bss->ch_width == CH_WIDTH_80P80MHZ)
739 iface->rate_flags |= eHAL_TX_RATE_VHT80;
740 if (add_bss->ch_width == CH_WIDTH_160MHZ)
741 iface->rate_flags |= eHAL_TX_RATE_VHT80;
742 if (add_bss->ch_width == CH_WIDTH_80MHZ)
743 iface->rate_flags |= eHAL_TX_RATE_VHT80;
744 else if (add_bss->ch_width)
745 iface->rate_flags |= eHAL_TX_RATE_VHT40;
746 else
747 iface->rate_flags |= eHAL_TX_RATE_VHT20;
748 }
749 /* avoid to conflict with htCapable flag */
750 else if (add_bss->htCapable) {
751 if (add_bss->ch_width)
752 iface->rate_flags |= eHAL_TX_RATE_HT40;
753 else
754 iface->rate_flags |= eHAL_TX_RATE_HT20;
755 }
756
757 if (add_bss->staContext.fShortGI20Mhz ||
758 add_bss->staContext.fShortGI40Mhz)
759 iface->rate_flags |= eHAL_TX_RATE_SGI;
760
761 if (!add_bss->htCapable && !add_bss->vhtCapable)
762 iface->rate_flags = eHAL_TX_RATE_LEGACY;
763}
764
765/**
766 * wmi_unified_send_txbf() - set txbf parameter to fw
767 * @wma: wma handle
768 * @params: txbf parameters
769 *
770 * Return: 0 for success or error code
771 */
772int32_t wmi_unified_send_txbf(tp_wma_handle wma, tpAddStaParams params)
773{
774 wmi_vdev_txbf_en txbf_en;
775
776 /* This is set when Other partner is Bformer
777 * and we are capable bformee(enabled both in ini and fw)
778 */
779 txbf_en.sutxbfee = params->vhtTxBFCapable;
780 txbf_en.mutxbfee = params->vhtTxMUBformeeCapable;
781 txbf_en.sutxbfer = params->enable_su_tx_bformer;
782 txbf_en.mutxbfer = 0;
783
784 /* When MU TxBfee is set, SU TxBfee must be set by default */
785 if (txbf_en.mutxbfee)
786 txbf_en.sutxbfee = txbf_en.mutxbfee;
787
788 WMA_LOGD("txbf_en.sutxbfee %d txbf_en.mutxbfee %d, sutxbfer %d",
789 txbf_en.sutxbfee, txbf_en.mutxbfee, txbf_en.sutxbfer);
790
791 return wmi_unified_vdev_set_param_send(wma->wmi_handle,
792 params->smesessionId,
793 WMI_VDEV_PARAM_TXBF,
794 *((A_UINT8 *) &txbf_en));
795}
796
797/**
798 * wma_data_tx_ack_work_handler() - process data tx ack
799 * @ack_work: work structure
800 *
801 * Return: none
802 */
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -0800803static void wma_data_tx_ack_work_handler(void *ack_work)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800804{
805 struct wma_tx_ack_work_ctx *work;
806 tp_wma_handle wma_handle;
807 pWMAAckFnTxComp ack_cb;
808
Rajeev Kumarfec3dbe2016-01-19 15:23:52 -0800809 if (cds_is_load_or_unload_in_progress()) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800810 WMA_LOGE("%s: Driver load/unload in progress", __func__);
811 return;
812 }
813
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -0800814 work = (struct wma_tx_ack_work_ctx *)ack_work;
815
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800816 wma_handle = work->wma_handle;
817 ack_cb = wma_handle->umac_data_ota_ack_cb;
818
819 if (work->status)
820 WMA_LOGE("Data Tx Ack Cb Status %d", work->status);
821 else
822 WMA_LOGD("Data Tx Ack Cb Status %d", work->status);
823
824 /* Call the Ack Cb registered by UMAC */
825 if (ack_cb)
826 ack_cb((tpAniSirGlobal) (wma_handle->mac_context),
827 work->status ? 0 : 1);
828 else
829 WMA_LOGE("Data Tx Ack Cb is NULL");
830
831 wma_handle->umac_data_ota_ack_cb = NULL;
832 wma_handle->last_umac_data_nbuf = NULL;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530833 qdf_mem_free(work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800834 wma_handle->ack_work_ctx = NULL;
835}
836
837/**
838 * wma_data_tx_ack_comp_hdlr() - handles tx data ack completion
839 * @context: context with which the handler is registered
840 * @netbuf: tx data nbuf
841 * @err: status of tx completion
842 *
843 * This is the cb registered with TxRx for
844 * Ack Complete
845 *
846 * Return: none
847 */
848void
Nirav Shahcbc6d722016-03-01 16:24:53 +0530849wma_data_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800850{
851 ol_txrx_pdev_handle pdev;
852 tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
853
854 if (NULL == wma_handle) {
855 WMA_LOGE("%s: Invalid WMA Handle", __func__);
856 return;
857 }
858
Anurag Chouhan6d760662016-02-20 16:05:43 +0530859 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800860
861 if (NULL == pdev) {
862 WMA_LOGE("%s: Failed to get pdev", __func__);
863 return;
864 }
865
866 /*
867 * if netBuf does not match with pending nbuf then just free the
868 * netbuf and do not call ack cb
869 */
870 if (wma_handle->last_umac_data_nbuf != netbuf) {
871 if (wma_handle->umac_data_ota_ack_cb) {
872 WMA_LOGE("%s: nbuf does not match but umac_data_ota_ack_cb is not null",
873 __func__);
874 } else {
875 WMA_LOGE("%s: nbuf does not match and umac_data_ota_ack_cb is also null",
876 __func__);
877 }
878 goto free_nbuf;
879 }
880
881 if (wma_handle && wma_handle->umac_data_ota_ack_cb) {
882 struct wma_tx_ack_work_ctx *ack_work;
883
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530884 ack_work = qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800885 wma_handle->ack_work_ctx = ack_work;
886 if (ack_work) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800887 ack_work->wma_handle = wma_handle;
888 ack_work->sub_type = 0;
889 ack_work->status = status;
890
Anurag Chouhan42958bb2016-02-19 15:43:11 +0530891 qdf_create_work(0, &ack_work->ack_cmp_work,
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -0800892 wma_data_tx_ack_work_handler,
893 ack_work);
Anurag Chouhan42958bb2016-02-19 15:43:11 +0530894 qdf_sched_work(0, &ack_work->ack_cmp_work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800895 }
896 }
897
898free_nbuf:
899 /* unmap and freeing the tx buf as txrx is not taking care */
Nirav Shahcbc6d722016-03-01 16:24:53 +0530900 qdf_nbuf_unmap_single(pdev->osdev, netbuf, QDF_DMA_TO_DEVICE);
901 qdf_nbuf_free(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800902}
903
904/**
905 * wma_update_txrx_chainmask() - update txrx chainmask
906 * @num_rf_chains: number rf chains
907 * @cmd_value: command value
908 *
909 * Return: none
910 */
911void wma_update_txrx_chainmask(int num_rf_chains, int *cmd_value)
912{
913 if (*cmd_value > WMA_MAX_RF_CHAINS(num_rf_chains)) {
914 WMA_LOGE("%s: Chainmask value exceeds the maximum"
915 " supported range setting it to"
916 " maximum value. Requested value %d"
917 " Updated value %d", __func__, *cmd_value,
918 WMA_MAX_RF_CHAINS(num_rf_chains));
919 *cmd_value = WMA_MAX_RF_CHAINS(num_rf_chains);
920 } else if (*cmd_value < WMA_MIN_RF_CHAINS) {
921 WMA_LOGE("%s: Chainmask value is less than the minimum"
922 " supported range setting it to"
923 " minimum value. Requested value %d"
924 " Updated value %d", __func__, *cmd_value,
925 WMA_MIN_RF_CHAINS);
926 *cmd_value = WMA_MIN_RF_CHAINS;
927 }
928}
929
930/**
931 * wma_peer_state_change_event_handler() - peer state change event handler
932 * @handle: wma handle
933 * @event_buff: event buffer
934 * @len: length of buffer
935 *
936 * This event handler unpauses vdev if peer state change to AUTHORIZED STATE
937 *
938 * Return: 0 for success or error code
939 */
940int wma_peer_state_change_event_handler(void *handle,
941 uint8_t *event_buff,
942 uint32_t len)
943{
944 WMI_PEER_STATE_EVENTID_param_tlvs *param_buf;
945 wmi_peer_state_event_fixed_param *event;
946 ol_txrx_vdev_handle vdev;
947 tp_wma_handle wma_handle = (tp_wma_handle) handle;
948
949 if (!event_buff) {
950 WMA_LOGE("%s: Received NULL event ptr from FW", __func__);
951 return -EINVAL;
952 }
953 param_buf = (WMI_PEER_STATE_EVENTID_param_tlvs *) event_buff;
954 if (!param_buf) {
955 WMA_LOGE("%s: Received NULL buf ptr from FW", __func__);
956 return -ENOMEM;
957 }
958
959 event = param_buf->fixed_param;
960 vdev = wma_find_vdev_by_id(wma_handle, event->vdev_id);
961 if (NULL == vdev) {
962 WMA_LOGP("%s: Couldn't find vdev for vdev_id: %d",
963 __func__, event->vdev_id);
964 return -EINVAL;
965 }
966
967 if (vdev->opmode == wlan_op_mode_sta
968 && event->state == WMI_PEER_STATE_AUTHORIZED) {
969 /*
970 * set event so that hdd
971 * can procced and unpause tx queue
972 */
973#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
974 if (!wma_handle->peer_authorized_cb) {
975 WMA_LOGE("%s: peer authorized cb not registered",
976 __func__);
977 return -EINVAL;
978 }
979 wma_handle->peer_authorized_cb(vdev->vdev_id);
980#endif
981 }
982
983 return 0;
984}
985
986/**
987 * wma_set_enable_disable_mcc_adaptive_scheduler() -enable/disable mcc scheduler
988 * @mcc_adaptive_scheduler: enable/disable
989 *
990 * This function enable/disable mcc adaptive scheduler in fw.
991 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530992 * Return: QDF_STATUS_SUCCESS for sucess or error code
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800993 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530994QDF_STATUS wma_set_enable_disable_mcc_adaptive_scheduler(uint32_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800995 mcc_adaptive_scheduler)
996{
997 int ret = -1;
998 wmi_buf_t buf = 0;
999 wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param *cmd = NULL;
1000 tp_wma_handle wma = NULL;
1001 uint16_t len =
1002 sizeof(wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param);
1003
Anurag Chouhan6d760662016-02-20 16:05:43 +05301004 wma = cds_get_context(QDF_MODULE_ID_WMA);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001005 if (NULL == wma) {
1006 WMA_LOGE("%s : Failed to get wma", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301007 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001008 }
1009
1010 buf = wmi_buf_alloc(wma->wmi_handle, len);
1011 if (!buf) {
1012 WMA_LOGP("%s : wmi_buf_alloc failed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301013 return QDF_STATUS_E_NOMEM;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001014 }
1015 cmd = (wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param *)
1016 wmi_buf_data(buf);
1017
1018 WMITLV_SET_HDR(&cmd->tlv_header,
1019 WMITLV_TAG_STRUC_wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param,
1020 WMITLV_GET_STRUCT_TLVLEN
1021 (wmi_resmgr_adaptive_ocs_enable_disable_cmd_fixed_param));
1022 cmd->enable = mcc_adaptive_scheduler;
1023
1024 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
1025 WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID);
1026 if (ret) {
1027 WMA_LOGP("%s: Failed to send enable/disable MCC"
1028 " adaptive scheduler command", __func__);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301029 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001030 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301031 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001032}
1033
1034/**
1035 * wma_set_mcc_channel_time_latency() -set MCC channel time latency
1036 * @wma: wma handle
1037 * @mcc_channel: mcc channel
1038 * @mcc_channel_time_latency: MCC channel time latency.
1039 *
1040 * Currently used to set time latency for an MCC vdev/adapter using operating
1041 * channel of it and channel number. The info is provided run time using
1042 * iwpriv command: iwpriv <wlan0 | p2p0> setMccLatency <latency in ms>.
1043 *
1044 * Return: CDF status
1045 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301046QDF_STATUS wma_set_mcc_channel_time_latency
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001047 (tp_wma_handle wma,
1048 uint32_t mcc_channel, uint32_t mcc_channel_time_latency)
1049{
1050 int ret = -1;
1051 wmi_buf_t buf = 0;
1052 wmi_resmgr_set_chan_latency_cmd_fixed_param *cmdTL = NULL;
1053 uint16_t len = 0;
1054 uint8_t *buf_ptr = NULL;
1055 uint32_t cfg_val = 0;
1056 wmi_resmgr_chan_latency chan_latency;
1057 struct sAniSirGlobal *pMac = NULL;
1058 /* Note: we only support MCC time latency for a single channel */
1059 uint32_t num_channels = 1;
1060 uint32_t channel1 = mcc_channel;
1061 uint32_t chan1_freq = cds_chan_to_freq(channel1);
1062 uint32_t latency_chan1 = mcc_channel_time_latency;
1063
1064 if (!wma) {
1065 WMA_LOGE("%s:NULL wma ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301066 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301067 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001068 }
Anurag Chouhan6d760662016-02-20 16:05:43 +05301069 pMac = cds_get_context(QDF_MODULE_ID_PE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001070 if (!pMac) {
1071 WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301072 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301073 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001074 }
1075
1076 /* First step is to confirm if MCC is active */
1077 if (!lim_is_in_mcc(pMac)) {
1078 WMA_LOGE("%s: MCC is not active. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301079 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301080 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001081 }
1082 /* Confirm MCC adaptive scheduler feature is disabled */
1083 if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED,
1084 &cfg_val) == eSIR_SUCCESS) {
1085 if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) {
1086 WMA_LOGD("%s: Can't set channel latency while MCC "
1087 "ADAPTIVE SCHED is enabled. Exit", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301088 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001089 }
1090 } else {
1091 WMA_LOGE("%s: Failed to get value for MCC_ADAPTIVE_SCHED, "
1092 "Exit w/o setting latency", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301093 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301094 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001095 }
1096 /* If 0ms latency is provided, then FW will set to a default.
1097 * Otherwise, latency must be at least 30ms.
1098 */
1099 if ((latency_chan1 > 0) &&
1100 (latency_chan1 < WMI_MCC_MIN_NON_ZERO_CHANNEL_LATENCY)) {
1101 WMA_LOGE("%s: Invalid time latency for Channel #1 = %dms "
1102 "Minimum is 30ms (or 0 to use default value by "
1103 "firmware)", __func__, latency_chan1);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301104 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001105 }
1106
1107 /* Set WMI CMD for channel time latency here */
1108 len = sizeof(wmi_resmgr_set_chan_latency_cmd_fixed_param) +
1109 WMI_TLV_HDR_SIZE + /*Place holder for chan_time_latency array */
1110 num_channels * sizeof(wmi_resmgr_chan_latency);
1111 buf = wmi_buf_alloc(wma->wmi_handle, len);
1112 if (!buf) {
1113 WMA_LOGE("%s : wmi_buf_alloc failed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301114 return QDF_STATUS_E_NOMEM;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001115 }
1116 buf_ptr = (uint8_t *) wmi_buf_data(buf);
1117 cmdTL = (wmi_resmgr_set_chan_latency_cmd_fixed_param *)
1118 wmi_buf_data(buf);
1119 WMITLV_SET_HDR(&cmdTL->tlv_header,
1120 WMITLV_TAG_STRUC_wmi_resmgr_set_chan_latency_cmd_fixed_param,
1121 WMITLV_GET_STRUCT_TLVLEN
1122 (wmi_resmgr_set_chan_latency_cmd_fixed_param));
1123 cmdTL->num_chans = num_channels;
1124 /* Update channel time latency information for home channel(s) */
1125 buf_ptr += sizeof(*cmdTL);
1126 WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE,
1127 num_channels * sizeof(wmi_resmgr_chan_latency));
1128 buf_ptr += WMI_TLV_HDR_SIZE;
1129 chan_latency.chan_mhz = chan1_freq;
1130 chan_latency.latency = latency_chan1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301131 qdf_mem_copy(buf_ptr, &chan_latency, sizeof(chan_latency));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001132 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
1133 WMI_RESMGR_SET_CHAN_LATENCY_CMDID);
1134 if (ret) {
1135 WMA_LOGE("%s: Failed to send MCC Channel Time Latency command",
1136 __func__);
Nirav Shahcbc6d722016-03-01 16:24:53 +05301137 qdf_nbuf_free(buf);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301138 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301139 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001140 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301141 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001142}
1143
1144/**
1145 * wma_set_mcc_channel_time_quota() -set MCC channel time quota
1146 * @wma: wma handle
1147 * @adapter_1_chan_number: adapter 1 channel number
1148 * @adapter_1_quota: adapter 1 quota
1149 * @adapter_2_chan_number: adapter 2 channel number
1150 *
1151 * Currently used to set time quota for 2 MCC vdevs/adapters using (operating
1152 * channel, quota) for each mode . The info is provided run time using
1153 * iwpriv command: iwpriv <wlan0 | p2p0> setMccQuota <quota in ms>.
1154 * Note: the quota provided in command is for the same mode in cmd. HDD
1155 * checks if MCC mode is active, gets the second mode and its operating chan.
1156 * Quota for the 2nd role is calculated as 100 - quota of first mode.
1157 *
1158 * Return: CDF status
1159 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301160QDF_STATUS wma_set_mcc_channel_time_quota
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001161 (tp_wma_handle wma,
1162 uint32_t adapter_1_chan_number,
1163 uint32_t adapter_1_quota, uint32_t adapter_2_chan_number)
1164{
1165 int ret = -1;
1166 wmi_buf_t buf = 0;
1167 uint16_t len = 0;
1168 uint8_t *buf_ptr = NULL;
1169 uint32_t cfg_val = 0;
1170 struct sAniSirGlobal *pMac = NULL;
1171 wmi_resmgr_set_chan_time_quota_cmd_fixed_param *cmdTQ = NULL;
1172 wmi_resmgr_chan_time_quota chan_quota;
1173 uint32_t channel1 = adapter_1_chan_number;
1174 uint32_t channel2 = adapter_2_chan_number;
1175 uint32_t quota_chan1 = adapter_1_quota;
1176 /* Knowing quota of 1st chan., derive quota for 2nd chan. */
1177 uint32_t quota_chan2 = 100 - quota_chan1;
1178 /* Note: setting time quota for MCC requires info for 2 channels */
1179 uint32_t num_channels = 2;
1180 uint32_t chan1_freq = cds_chan_to_freq(adapter_1_chan_number);
1181 uint32_t chan2_freq = cds_chan_to_freq(adapter_2_chan_number);
1182
1183 WMA_LOGD("%s: Channel1:%d, freq1:%dMHz, Quota1:%dms, "
1184 "Channel2:%d, freq2:%dMHz, Quota2:%dms", __func__,
1185 channel1, chan1_freq, quota_chan1, channel2, chan2_freq,
1186 quota_chan2);
1187
1188 if (!wma) {
1189 WMA_LOGE("%s:NULL wma ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301190 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301191 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001192 }
Anurag Chouhan6d760662016-02-20 16:05:43 +05301193 pMac = cds_get_context(QDF_MODULE_ID_PE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001194 if (!pMac) {
1195 WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301196 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301197 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001198 }
1199
1200 /* First step is to confirm if MCC is active */
1201 if (!lim_is_in_mcc(pMac)) {
1202 WMA_LOGD("%s: MCC is not active. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301203 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301204 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001205 }
1206
1207 /* Confirm MCC adaptive scheduler feature is disabled */
1208 if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED,
1209 &cfg_val) == eSIR_SUCCESS) {
1210 if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) {
1211 WMA_LOGD("%s: Can't set channel quota while "
1212 "MCC_ADAPTIVE_SCHED is enabled. Exit",
1213 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301214 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001215 }
1216 } else {
1217 WMA_LOGE("%s: Failed to retrieve "
1218 "WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED. Exit", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301219 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301220 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001221 }
1222
1223 /*
1224 * Perform sanity check on time quota values provided.
1225 */
1226 if (quota_chan1 < WMI_MCC_MIN_CHANNEL_QUOTA ||
1227 quota_chan1 > WMI_MCC_MAX_CHANNEL_QUOTA) {
1228 WMA_LOGE("%s: Invalid time quota for Channel #1=%dms. Minimum "
1229 "is 20ms & maximum is 80ms", __func__, quota_chan1);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301230 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001231 }
1232 /* Set WMI CMD for channel time quota here */
1233 len = sizeof(wmi_resmgr_set_chan_time_quota_cmd_fixed_param) +
1234 WMI_TLV_HDR_SIZE + /* Place holder for chan_time_quota array */
1235 num_channels * sizeof(wmi_resmgr_chan_time_quota);
1236 buf = wmi_buf_alloc(wma->wmi_handle, len);
1237 if (!buf) {
1238 WMA_LOGE("%s : wmi_buf_alloc failed", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301239 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301240 return QDF_STATUS_E_NOMEM;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001241 }
1242 buf_ptr = (uint8_t *) wmi_buf_data(buf);
1243 cmdTQ = (wmi_resmgr_set_chan_time_quota_cmd_fixed_param *)
1244 wmi_buf_data(buf);
1245 WMITLV_SET_HDR(&cmdTQ->tlv_header,
1246 WMITLV_TAG_STRUC_wmi_resmgr_set_chan_time_quota_cmd_fixed_param,
1247 WMITLV_GET_STRUCT_TLVLEN
1248 (wmi_resmgr_set_chan_time_quota_cmd_fixed_param));
1249 cmdTQ->num_chans = num_channels;
1250
1251 /* Update channel time quota information for home channel(s) */
1252 buf_ptr += sizeof(*cmdTQ);
1253 WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE,
1254 num_channels * sizeof(wmi_resmgr_chan_time_quota));
1255 buf_ptr += WMI_TLV_HDR_SIZE;
1256 chan_quota.chan_mhz = chan1_freq;
1257 chan_quota.channel_time_quota = quota_chan1;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301258 qdf_mem_copy(buf_ptr, &chan_quota, sizeof(chan_quota));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001259 /* Construct channel and quota record for the 2nd MCC mode. */
1260 buf_ptr += sizeof(chan_quota);
1261 chan_quota.chan_mhz = chan2_freq;
1262 chan_quota.channel_time_quota = quota_chan2;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301263 qdf_mem_copy(buf_ptr, &chan_quota, sizeof(chan_quota));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001264
1265 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
1266 WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID);
1267 if (ret) {
1268 WMA_LOGE("Failed to send MCC Channel Time Quota command");
Nirav Shahcbc6d722016-03-01 16:24:53 +05301269 qdf_nbuf_free(buf);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301270 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301271 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001272 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301273 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001274}
1275
1276/**
1277 * wma_set_linkstate() - set wma linkstate
1278 * @wma: wma handle
1279 * @params: link state params
1280 *
1281 * Return: none
1282 */
1283void wma_set_linkstate(tp_wma_handle wma, tpLinkStateParams params)
1284{
1285 ol_txrx_pdev_handle pdev;
1286 ol_txrx_vdev_handle vdev;
1287 ol_txrx_peer_handle peer;
1288 uint8_t vdev_id, peer_id;
1289 bool roam_synch_in_progress = false;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301290 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001291
1292 params->status = true;
1293 WMA_LOGD("%s: state %d selfmac %pM", __func__,
1294 params->state, params->selfMacAddr);
1295 if ((params->state != eSIR_LINK_PREASSOC_STATE) &&
1296 (params->state != eSIR_LINK_DOWN_STATE)) {
1297 WMA_LOGD("%s: unsupported link state %d",
1298 __func__, params->state);
1299 goto out;
1300 }
1301
Anurag Chouhan6d760662016-02-20 16:05:43 +05301302 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001303
1304 if (NULL == pdev) {
1305 WMA_LOGE("%s: Unable to get TXRX context", __func__);
1306 goto out;
1307 }
1308
1309 vdev = wma_find_vdev_by_addr(wma, params->selfMacAddr, &vdev_id);
1310 if (!vdev) {
1311 WMA_LOGP("%s: vdev not found for addr: %pM",
1312 __func__, params->selfMacAddr);
1313 goto out;
1314 }
1315
1316 if (wma_is_vdev_in_ap_mode(wma, vdev_id)) {
1317 WMA_LOGD("%s: Ignoring set link req in ap mode", __func__);
1318 goto out;
1319 }
1320
1321 if (params->state == eSIR_LINK_PREASSOC_STATE) {
Varun Reddy Yeturud5939f82015-12-24 18:14:02 -08001322 if (wma_is_roam_synch_in_progress(wma, vdev_id))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001323 roam_synch_in_progress = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001324 status = wma_create_peer(wma, pdev, vdev, params->bssid,
1325 WMI_PEER_TYPE_DEFAULT, vdev_id,
1326 roam_synch_in_progress);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301327 if (status != QDF_STATUS_SUCCESS)
Varun Reddy Yeturud5939f82015-12-24 18:14:02 -08001328 WMA_LOGE("%s: Unable to create peer", __func__);
1329 if (roam_synch_in_progress)
1330 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001331 } else {
1332 WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP",
1333 __func__, vdev_id);
1334 ol_txrx_vdev_pause(wma->interfaces[vdev_id].handle,
1335 OL_TXQ_PAUSE_REASON_VDEV_STOP);
1336 wma->interfaces[vdev_id].pause_bitmap |= (1 << PAUSE_TYPE_HOST);
1337 if (wmi_unified_vdev_stop_send(wma->wmi_handle, vdev_id)) {
1338 WMA_LOGP("%s: %d Failed to send vdev stop",
1339 __func__, __LINE__);
1340 }
1341 peer = ol_txrx_find_peer_by_addr(pdev, params->bssid, &peer_id);
1342 if (peer) {
1343 WMA_LOGP("%s: Deleting peer %pM vdev id %d",
1344 __func__, params->bssid, vdev_id);
1345 wma_remove_peer(wma, params->bssid, vdev_id, peer,
1346 roam_synch_in_progress);
1347 }
1348 }
1349out:
1350 wma_send_msg(wma, WMA_SET_LINK_STATE_RSP, (void *)params, 0);
1351}
1352
1353/**
1354 * wma_unpause_vdev - unpause all vdev
1355 * @wma: wma handle
1356 *
1357 * unpause all vdev aftter resume/coming out of wow mode
1358 *
1359 * Return: none
1360 */
1361void wma_unpause_vdev(tp_wma_handle wma)
1362{
1363 int8_t vdev_id;
1364 struct wma_txrx_node *iface;
1365
1366 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
1367 if (!wma->interfaces[vdev_id].handle)
1368 continue;
1369
1370#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
1371 /* When host resume, by default, unpause all active vdev */
1372 if (wma->interfaces[vdev_id].pause_bitmap) {
1373 ol_txrx_vdev_unpause(wma->interfaces[vdev_id].handle,
1374 0xffffffff);
1375 wma->interfaces[vdev_id].pause_bitmap = 0;
1376 }
1377#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1378
1379 iface = &wma->interfaces[vdev_id];
1380 iface->conn_state = false;
1381 }
1382}
1383
1384/**
1385 * wma_process_rate_update_indate() - rate update indication
1386 * @wma: wma handle
1387 * @pRateUpdateParams: Rate update params
1388 *
1389 * This function update rate & short GI interval to fw based on params
1390 * send by SME.
1391 *
1392 * Return: CDF status
1393 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301394QDF_STATUS wma_process_rate_update_indicate(tp_wma_handle wma,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001395 tSirRateUpdateInd *
1396 pRateUpdateParams)
1397{
1398 int32_t ret = 0;
1399 uint8_t vdev_id = 0;
1400 void *pdev;
1401 int32_t mbpsx10_rate = -1;
1402 uint32_t paramId;
1403 uint8_t rate = 0;
1404 uint32_t short_gi;
1405 struct wma_txrx_node *intr = wma->interfaces;
1406
1407 /* Get the vdev id */
Srinivas Girigowdaafede182015-11-18 22:36:12 -08001408 pdev = wma_find_vdev_by_addr(wma, pRateUpdateParams->bssid.bytes,
1409 &vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001410 if (!pdev) {
1411 WMA_LOGE("vdev handle is invalid for %pM",
Srinivas Girigowdaafede182015-11-18 22:36:12 -08001412 pRateUpdateParams->bssid.bytes);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301413 qdf_mem_free(pRateUpdateParams);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301414 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001415 }
1416 short_gi = intr[vdev_id].config.shortgi;
1417 if (short_gi == 0)
1418 short_gi = (intr[vdev_id].rate_flags & eHAL_TX_RATE_SGI) ?
1419 true : false;
1420 /* first check if reliable TX mcast rate is used. If not check the bcast.
1421 * Then is mcast. Mcast rate is saved in mcastDataRate24GHz
1422 */
1423 if (pRateUpdateParams->reliableMcastDataRateTxFlag > 0) {
1424 mbpsx10_rate = pRateUpdateParams->reliableMcastDataRate;
1425 paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE;
1426 if (pRateUpdateParams->
1427 reliableMcastDataRateTxFlag & eHAL_TX_RATE_SGI)
1428 short_gi = 1; /* upper layer specified short GI */
1429 } else if (pRateUpdateParams->bcastDataRate > -1) {
1430 mbpsx10_rate = pRateUpdateParams->bcastDataRate;
1431 paramId = WMI_VDEV_PARAM_BCAST_DATA_RATE;
1432 } else {
1433 mbpsx10_rate = pRateUpdateParams->mcastDataRate24GHz;
1434 paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE;
1435 if (pRateUpdateParams->
1436 mcastDataRate24GHzTxFlag & eHAL_TX_RATE_SGI)
1437 short_gi = 1; /* upper layer specified short GI */
1438 }
1439 WMA_LOGE("%s: dev_id = %d, dev_type = %d, dev_mode = %d, "
1440 "mac = %pM, config.shortgi = %d, rate_flags = 0x%x",
1441 __func__, vdev_id, intr[vdev_id].type,
Srinivas Girigowdaafede182015-11-18 22:36:12 -08001442 pRateUpdateParams->dev_mode, pRateUpdateParams->bssid.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001443 intr[vdev_id].config.shortgi, intr[vdev_id].rate_flags);
1444 ret = wma_encode_mc_rate(short_gi, intr[vdev_id].config.chwidth,
1445 intr[vdev_id].chanmode, intr[vdev_id].mhz,
1446 mbpsx10_rate, pRateUpdateParams->nss, &rate);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301447 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001448 WMA_LOGE("%s: Error, Invalid input rate value", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301449 qdf_mem_free(pRateUpdateParams);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001450 return ret;
1451 }
1452 ret = wmi_unified_vdev_set_param_send(wma->wmi_handle, vdev_id,
1453 WMI_VDEV_PARAM_SGI, short_gi);
1454 if (ret) {
1455 WMA_LOGE("%s: Failed to Set WMI_VDEV_PARAM_SGI (%d), ret = %d",
1456 __func__, short_gi, ret);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301457 qdf_mem_free(pRateUpdateParams);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301458 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001459 }
1460 ret = wmi_unified_vdev_set_param_send(wma->wmi_handle,
1461 vdev_id, paramId, rate);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301462 qdf_mem_free(pRateUpdateParams);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001463 if (ret) {
1464 WMA_LOGE("%s: Failed to Set rate, ret = %d", __func__, ret);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301465 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001466 }
1467
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301468 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001469}
1470
1471/**
1472 * wma_mgmt_tx_ack_work_handler() - mgmt tx ack work queue
1473 * @ack_work: work structure
1474 *
1475 * Return: none
1476 */
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -08001477static void wma_mgmt_tx_ack_work_handler(void *ack_work)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001478{
1479 struct wma_tx_ack_work_ctx *work;
1480 tp_wma_handle wma_handle;
1481 pWMAAckFnTxComp ack_cb;
1482
Rajeev Kumarfec3dbe2016-01-19 15:23:52 -08001483 if (cds_is_load_or_unload_in_progress()) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001484 WMA_LOGE("%s: Driver load/unload in progress", __func__);
1485 return;
1486 }
1487
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -08001488 work = (struct wma_tx_ack_work_ctx *)ack_work;
1489
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001490 wma_handle = work->wma_handle;
1491 ack_cb = wma_handle->umac_ota_ack_cb[work->sub_type];
1492
1493 WMA_LOGD("Tx Ack Cb SubType %d Status %d",
1494 work->sub_type, work->status);
1495
1496 /* Call the Ack Cb registered by UMAC */
1497 ack_cb((tpAniSirGlobal) (wma_handle->mac_context),
1498 work->status ? 0 : 1);
1499
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301500 qdf_mem_free(work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001501 wma_handle->ack_work_ctx = NULL;
1502}
1503
1504/**
1505 * wma_mgmt_tx_comp_conf_ind() - Post mgmt tx complete indication to PE.
1506 * @wma_handle: Pointer to WMA handle
1507 * @sub_type: Tx mgmt frame sub type
1508 * @status: Mgmt frame tx status
1509 *
1510 * This function sends mgmt complition confirmation to PE for deauth
1511 * and deassoc frames.
1512 *
1513 * Return: none
1514 */
1515static void
1516wma_mgmt_tx_comp_conf_ind(tp_wma_handle wma_handle, uint8_t sub_type,
1517 int32_t status)
1518{
1519 int32_t tx_comp_status;
1520
1521 tx_comp_status = status ? 0 : 1;
1522 if (sub_type == SIR_MAC_MGMT_DISASSOC) {
1523 wma_send_msg(wma_handle, WMA_DISASSOC_TX_COMP, NULL,
1524 tx_comp_status);
1525 } else if (sub_type == SIR_MAC_MGMT_DEAUTH) {
1526 wma_send_msg(wma_handle, WMA_DEAUTH_TX_COMP, NULL,
1527 tx_comp_status);
1528 }
1529}
1530
1531/**
1532 * wma_mgmt_tx_ack_comp_hdlr() - handles tx ack mgmt completion
1533 * @context: context with which the handler is registered
1534 * @netbuf: tx mgmt nbuf
1535 * @status: status of tx completion
1536 *
1537 * This is callback registered with TxRx for
1538 * Ack Complete.
1539 *
1540 * Return: none
1541 */
1542static void
Nirav Shahcbc6d722016-03-01 16:24:53 +05301543wma_mgmt_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001544{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301545 tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(netbuf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001546 tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1547
1548 if (wma_handle && wma_handle->umac_ota_ack_cb[pFc->subType]) {
1549 if ((pFc->subType == SIR_MAC_MGMT_DISASSOC) ||
1550 (pFc->subType == SIR_MAC_MGMT_DEAUTH)) {
1551 wma_mgmt_tx_comp_conf_ind(wma_handle,
1552 (uint8_t) pFc->subType,
1553 status);
1554 } else {
1555 struct wma_tx_ack_work_ctx *ack_work;
1556
1557 ack_work =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301558 qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001559
1560 if (ack_work) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001561 ack_work->wma_handle = wma_handle;
1562 ack_work->sub_type = pFc->subType;
1563 ack_work->status = status;
1564
Anurag Chouhan42958bb2016-02-19 15:43:11 +05301565 qdf_create_work(0, &ack_work->ack_cmp_work,
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -08001566 wma_mgmt_tx_ack_work_handler,
1567 ack_work);
1568
Anurag Chouhan42958bb2016-02-19 15:43:11 +05301569 qdf_sched_work(0, &ack_work->ack_cmp_work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001570 }
1571 }
1572 }
1573}
1574
1575/**
1576 * wma_mgmt_tx_dload_comp_hldr() - handles tx mgmt completion
1577 * @context: context with which the handler is registered
1578 * @netbuf: tx mgmt nbuf
1579 * @status: status of tx completion
1580 *
1581 * This function calls registered download callback while sending mgmt packet.
1582 *
1583 * Return: none
1584 */
1585static void
Nirav Shahcbc6d722016-03-01 16:24:53 +05301586wma_mgmt_tx_dload_comp_hldr(void *wma_context, qdf_nbuf_t netbuf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001587 int32_t status)
1588{
Anurag Chouhance0dc992016-02-16 18:18:03 +05301589 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001590
1591 tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1592 void *mac_context = wma_handle->mac_context;
1593
1594 WMA_LOGD("Tx Complete Status %d", status);
1595
1596 if (!wma_handle->tx_frm_download_comp_cb) {
1597 WMA_LOGE("Tx Complete Cb not registered by umac");
1598 return;
1599 }
1600
1601 /* Call Tx Mgmt Complete Callback registered by umac */
1602 wma_handle->tx_frm_download_comp_cb(mac_context, netbuf, 0);
1603
1604 /* Reset Callback */
1605 wma_handle->tx_frm_download_comp_cb = NULL;
1606
1607 /* Set the Tx Mgmt Complete Event */
Anurag Chouhance0dc992016-02-16 18:18:03 +05301608 qdf_status = qdf_event_set(&wma_handle->tx_frm_download_comp_event);
1609 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001610 WMA_LOGP("%s: Event Set failed - tx_frm_comp_event", __func__);
1611}
1612
1613/**
1614 * wma_tx_attach() - attach tx related callbacks
1615 * @pwmaCtx: wma context
1616 *
1617 * attaches tx fn with underlying layer.
1618 *
1619 * Return: CDF status
1620 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301621QDF_STATUS wma_tx_attach(tp_wma_handle wma_handle)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001622{
1623 /* Get the Vos Context */
1624 p_cds_contextType cds_handle =
1625 (p_cds_contextType) (wma_handle->cds_context);
1626
1627 /* Get the txRx Pdev handle */
1628 ol_txrx_pdev_handle txrx_pdev =
1629 (ol_txrx_pdev_handle) (cds_handle->pdev_txrx_ctx);
1630
1631 /* Register for Tx Management Frames */
1632 ol_txrx_mgmt_tx_cb_set(txrx_pdev, GENERIC_NODOWLOAD_ACK_COMP_INDEX,
1633 NULL, wma_mgmt_tx_ack_comp_hdlr, wma_handle);
1634
1635 ol_txrx_mgmt_tx_cb_set(txrx_pdev, GENERIC_DOWNLD_COMP_NOACK_COMP_INDEX,
1636 wma_mgmt_tx_dload_comp_hldr, NULL, wma_handle);
1637
1638 ol_txrx_mgmt_tx_cb_set(txrx_pdev, GENERIC_DOWNLD_COMP_ACK_COMP_INDEX,
1639 wma_mgmt_tx_dload_comp_hldr,
1640 wma_mgmt_tx_ack_comp_hdlr, wma_handle);
1641
1642 /* Store the Mac Context */
1643 wma_handle->mac_context = cds_handle->pMACContext;
1644
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301645 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001646}
1647
1648/**
1649 * wma_tx_detach() - detach tx related callbacks
1650 * @tp_wma_handle: wma context
1651 *
1652 * Deregister with TxRx for Tx Mgmt Download and Ack completion.
1653 *
1654 * Return: CDF status
1655 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301656QDF_STATUS wma_tx_detach(tp_wma_handle wma_handle)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001657{
1658 uint32_t frame_index = 0;
1659
1660 /* Get the Vos Context */
1661 p_cds_contextType cds_handle =
1662 (p_cds_contextType) (wma_handle->cds_context);
1663
1664 /* Get the txRx Pdev handle */
1665 ol_txrx_pdev_handle txrx_pdev =
1666 (ol_txrx_pdev_handle) (cds_handle->pdev_txrx_ctx);
1667
Himanshu Agarwale1086fa2015-10-19 18:05:15 +05301668 if (txrx_pdev) {
1669 /* Deregister with TxRx for Tx Mgmt completion call back */
1670 for (frame_index = 0; frame_index < FRAME_INDEX_MAX;
1671 frame_index++) {
1672 ol_txrx_mgmt_tx_cb_set(txrx_pdev, frame_index, NULL,
1673 NULL, txrx_pdev);
1674 }
1675 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001676 /* Destroy Tx Frame Complete event */
Anurag Chouhance0dc992016-02-16 18:18:03 +05301677 qdf_event_destroy(&wma_handle->tx_frm_download_comp_event);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001678
1679 /* Tx queue empty check event (dummy event) */
Anurag Chouhance0dc992016-02-16 18:18:03 +05301680 qdf_event_destroy(&wma_handle->tx_queue_empty_event);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001681
1682 /* Reset Tx Frm Callbacks */
1683 wma_handle->tx_frm_download_comp_cb = NULL;
1684
1685 /* Reset Tx Data Frame Ack Cb */
1686 wma_handle->umac_data_ota_ack_cb = NULL;
1687
1688 /* Reset last Tx Data Frame nbuf ptr */
1689 wma_handle->last_umac_data_nbuf = NULL;
1690
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301691 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001692}
1693
1694#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
1695/**
1696 * wma_mcc_vdev_tx_pause_evt_handler() - pause event handler
1697 * @handle: wma handle
1698 * @event: event buffer
1699 * @len: data length
1700 *
1701 * This function handle pause event from fw and pause/unpause
1702 * vdev.
1703 *
1704 * Return: 0 for success or error code.
1705 */
1706int wma_mcc_vdev_tx_pause_evt_handler(void *handle, uint8_t *event,
1707 uint32_t len)
1708{
1709 tp_wma_handle wma = (tp_wma_handle) handle;
1710 WMI_TX_PAUSE_EVENTID_param_tlvs *param_buf;
1711 wmi_tx_pause_event_fixed_param *wmi_event;
1712 uint8_t vdev_id;
1713 A_UINT32 vdev_map;
1714
1715 param_buf = (WMI_TX_PAUSE_EVENTID_param_tlvs *) event;
1716 if (!param_buf) {
1717 WMA_LOGE("Invalid roam event buffer");
1718 return -EINVAL;
1719 }
1720
1721 if (wma_get_wow_bus_suspend(wma)) {
1722 WMA_LOGD(" Suspend is in progress: Pause/Unpause Tx is NoOp");
1723 return 0;
1724 }
1725
1726 wmi_event = param_buf->fixed_param;
1727 vdev_map = wmi_event->vdev_map;
1728 /* FW mapped vdev from ID
1729 * vdev_map = (1 << vdev_id)
1730 * So, host should unmap to ID */
1731 for (vdev_id = 0; vdev_map != 0; vdev_id++) {
1732 if (!(vdev_map & 0x1)) {
1733 /* No Vdev */
1734 } else {
1735 if (!wma->interfaces[vdev_id].handle) {
1736 WMA_LOGE("%s: invalid vdev ID %d", __func__,
1737 vdev_id);
1738 /* Test Next VDEV */
1739 vdev_map >>= 1;
1740 continue;
1741 }
1742
1743 /* PAUSE action, add bitmap */
1744 if (ACTION_PAUSE == wmi_event->action) {
1745 /*
1746 * Now only support per-dev pause so it is not
1747 * necessary to pause a paused queue again.
1748 */
1749 if (!wma->interfaces[vdev_id].pause_bitmap)
1750 ol_txrx_vdev_pause(
1751 wma->interfaces[vdev_id].
1752 handle,
1753 OL_TXQ_PAUSE_REASON_FW);
1754 wma->interfaces[vdev_id].pause_bitmap |=
1755 (1 << wmi_event->pause_type);
1756 }
1757 /* UNPAUSE action, clean bitmap */
1758 else if (ACTION_UNPAUSE == wmi_event->action) {
1759 /* Handle unpause only if already paused */
1760 if (wma->interfaces[vdev_id].pause_bitmap) {
1761 wma->interfaces[vdev_id].pause_bitmap &=
1762 ~(1 << wmi_event->pause_type);
1763
1764 if (!wma->interfaces[vdev_id].
1765 pause_bitmap) {
1766 /* PAUSE BIT MAP is cleared
1767 * UNPAUSE VDEV */
1768 ol_txrx_vdev_unpause(
1769 wma->interfaces[vdev_id]
1770 .handle,
1771 OL_TXQ_PAUSE_REASON_FW);
1772 }
1773 }
1774 } else {
1775 WMA_LOGE("Not Valid Action Type %d",
1776 wmi_event->action);
1777 }
1778
1779 WMA_LOGD
1780 ("vdev_id %d, pause_map 0x%x, pause type %d, action %d",
1781 vdev_id, wma->interfaces[vdev_id].pause_bitmap,
1782 wmi_event->pause_type, wmi_event->action);
1783 }
1784 /* Test Next VDEV */
1785 vdev_map >>= 1;
1786 }
1787
1788 return 0;
1789}
1790
1791#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1792
1793/**
1794 * wma_process_init_thermal_info() - initialize thermal info
1795 * @wma: Pointer to WMA handle
1796 * @pThermalParams: Pointer to thermal mitigation parameters
1797 *
1798 * This function initializes the thermal management table in WMA,
1799 * sends down the initial temperature thresholds to the firmware
1800 * and configures the throttle period in the tx rx module
1801 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301802 * Returns: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001803 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301804QDF_STATUS wma_process_init_thermal_info(tp_wma_handle wma,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001805 t_thermal_mgmt *pThermalParams)
1806{
1807 t_thermal_cmd_params thermal_params;
1808 ol_txrx_pdev_handle curr_pdev;
1809
1810 if (NULL == wma || NULL == pThermalParams) {
1811 WMA_LOGE("TM Invalid input");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301812 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001813 }
1814
Anurag Chouhan6d760662016-02-20 16:05:43 +05301815 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001816 if (NULL == curr_pdev) {
1817 WMA_LOGE("%s: Failed to get pdev", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301818 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001819 }
1820
1821 WMA_LOGD("TM enable %d period %d", pThermalParams->thermalMgmtEnabled,
1822 pThermalParams->throttlePeriod);
1823
1824 wma->thermal_mgmt_info.thermalMgmtEnabled =
1825 pThermalParams->thermalMgmtEnabled;
1826 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold =
1827 pThermalParams->thermalLevels[0].minTempThreshold;
1828 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold =
1829 pThermalParams->thermalLevels[0].maxTempThreshold;
1830 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold =
1831 pThermalParams->thermalLevels[1].minTempThreshold;
1832 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold =
1833 pThermalParams->thermalLevels[1].maxTempThreshold;
1834 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold =
1835 pThermalParams->thermalLevels[2].minTempThreshold;
1836 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold =
1837 pThermalParams->thermalLevels[2].maxTempThreshold;
1838 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold =
1839 pThermalParams->thermalLevels[3].minTempThreshold;
1840 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold =
1841 pThermalParams->thermalLevels[3].maxTempThreshold;
1842 wma->thermal_mgmt_info.thermalCurrLevel = WLAN_WMA_THERMAL_LEVEL_0;
1843
1844 WMA_LOGD("TM level min max:\n"
1845 "0 %d %d\n"
1846 "1 %d %d\n"
1847 "2 %d %d\n"
1848 "3 %d %d",
1849 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold,
1850 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold,
1851 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold,
1852 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold,
1853 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold,
1854 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold,
1855 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold,
1856 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold);
1857
1858 if (wma->thermal_mgmt_info.thermalMgmtEnabled) {
1859 ol_tx_throttle_init_period(curr_pdev,
1860 pThermalParams->throttlePeriod);
1861
1862 /* Get the temperature thresholds to set in firmware */
1863 thermal_params.minTemp =
1864 wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].minTempThreshold;
1865 thermal_params.maxTemp =
1866 wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].maxTempThreshold;
1867 thermal_params.thermalEnable =
1868 wma->thermal_mgmt_info.thermalMgmtEnabled;
1869
1870 WMA_LOGE("TM sending the following to firmware: min %d max %d enable %d",
1871 thermal_params.minTemp, thermal_params.maxTemp,
1872 thermal_params.thermalEnable);
1873
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301874 if (QDF_STATUS_SUCCESS !=
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001875 wma_set_thermal_mgmt(wma, thermal_params)) {
1876 WMA_LOGE("Could not send thermal mgmt command to the firmware!");
1877 }
1878 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301879 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001880}
1881
1882/**
1883 * wma_set_thermal_level_ind() - send SME set thermal level indication message
1884 * @level: thermal level
1885 *
1886 * Send SME SET_THERMAL_LEVEL_IND message
1887 *
1888 * Returns: none
1889 */
1890static void wma_set_thermal_level_ind(u_int8_t level)
1891{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301892 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001893 cds_msg_t sme_msg = {0};
1894
1895 WMA_LOGI(FL("Thermal level: %d"), level);
1896
1897 sme_msg.type = eWNI_SME_SET_THERMAL_LEVEL_IND;
1898 sme_msg.bodyptr = NULL;
1899 sme_msg.bodyval = level;
1900
Anurag Chouhan6d760662016-02-20 16:05:43 +05301901 qdf_status = cds_mq_post_message(QDF_MODULE_ID_SME, &sme_msg);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301902 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001903 WMA_LOGE(FL(
1904 "Fail to post set thermal level ind msg"));
1905}
1906
1907/**
1908 * wma_process_set_thermal_level() - sets thermal level
1909 * @wma: Pointer to WMA handle
1910 * @thermal_level : Thermal level
1911 *
1912 * This function sets the new thermal throttle level in the
1913 * txrx module and sends down the corresponding temperature
1914 * thresholds to the firmware
1915 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301916 * Returns: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001917 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301918QDF_STATUS wma_process_set_thermal_level(tp_wma_handle wma,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001919 uint8_t thermal_level)
1920{
1921 ol_txrx_pdev_handle curr_pdev;
1922
1923 if (NULL == wma) {
1924 WMA_LOGE("TM Invalid input");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301925 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001926 }
1927
Anurag Chouhan6d760662016-02-20 16:05:43 +05301928 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001929 if (NULL == curr_pdev) {
1930 WMA_LOGE("%s: Failed to get pdev", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301931 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001932 }
1933
1934 WMA_LOGE("TM set level %d", thermal_level);
1935
1936 /* Check if thermal mitigation is enabled */
1937 if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
1938 WMA_LOGE("Thermal mgmt is not enabled, ignoring set level command");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301939 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001940 }
1941
1942 if (thermal_level >= WLAN_WMA_MAX_THERMAL_LEVELS) {
1943 WMA_LOGE("Invalid thermal level set %d", thermal_level);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301944 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001945 }
1946
1947 if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
1948 WMA_LOGD("Current level %d is same as the set level, ignoring",
1949 wma->thermal_mgmt_info.thermalCurrLevel);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301950 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001951 }
1952
1953 wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
1954
1955 ol_tx_throttle_set_level(curr_pdev, thermal_level);
1956
1957 /* Send SME SET_THERMAL_LEVEL_IND message */
1958 wma_set_thermal_level_ind(thermal_level);
1959
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301960 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001961}
1962
1963
1964/**
1965 * wma_set_thermal_mgmt() - set thermal mgmt command to fw
1966 * @wma_handle: Pointer to WMA handle
1967 * @thermal_info: Thermal command information
1968 *
1969 * This function sends the thermal management command
1970 * to the firmware
1971 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301972 * Return: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001973 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301974QDF_STATUS wma_set_thermal_mgmt(tp_wma_handle wma_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001975 t_thermal_cmd_params thermal_info)
1976{
1977 wmi_thermal_mgmt_cmd_fixed_param *cmd = NULL;
1978 wmi_buf_t buf = NULL;
1979 int status = 0;
1980 uint32_t len = 0;
1981
1982 len = sizeof(*cmd);
1983
1984 buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
1985 if (!buf) {
1986 WMA_LOGE("Failed to allocate buffer to send set key cmd");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301987 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001988 }
1989
1990 cmd = (wmi_thermal_mgmt_cmd_fixed_param *) wmi_buf_data(buf);
1991
1992 WMITLV_SET_HDR(&cmd->tlv_header,
1993 WMITLV_TAG_STRUC_wmi_thermal_mgmt_cmd_fixed_param,
1994 WMITLV_GET_STRUCT_TLVLEN
1995 (wmi_thermal_mgmt_cmd_fixed_param));
1996
1997 cmd->lower_thresh_degreeC = thermal_info.minTemp;
1998 cmd->upper_thresh_degreeC = thermal_info.maxTemp;
1999 cmd->enable = thermal_info.thermalEnable;
2000
2001 WMA_LOGE("TM Sending thermal mgmt cmd: low temp %d, upper temp %d, enabled %d",
2002 cmd->lower_thresh_degreeC, cmd->upper_thresh_degreeC, cmd->enable);
2003
2004 status = wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
2005 WMI_THERMAL_MGMT_CMDID);
2006 if (status) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302007 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002008 WMA_LOGE("%s:Failed to send thermal mgmt command", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302009 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002010 }
2011
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302012 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002013}
2014
2015/**
2016 * wma_thermal_mgmt_get_level() - returns throttle level
2017 * @handle: Pointer to WMA handle
2018 * @temp: temperature
2019 *
2020 * This function returns the thermal(throttle) level
2021 * given the temperature
2022 *
2023 * Return: thermal (throttle) level
2024 */
2025uint8_t wma_thermal_mgmt_get_level(void *handle, uint32_t temp)
2026{
2027 tp_wma_handle wma = (tp_wma_handle) handle;
2028 int i;
2029 uint8_t level;
2030
2031 level = i = wma->thermal_mgmt_info.thermalCurrLevel;
2032 while (temp < wma->thermal_mgmt_info.thermalLevels[i].minTempThreshold
2033 && i > 0) {
2034 i--;
2035 level = i;
2036 }
2037
2038 i = wma->thermal_mgmt_info.thermalCurrLevel;
2039 while (temp > wma->thermal_mgmt_info.thermalLevels[i].maxTempThreshold
2040 && i < (WLAN_WMA_MAX_THERMAL_LEVELS - 1)) {
2041 i++;
2042 level = i;
2043 }
2044
2045 WMA_LOGW("Change thermal level from %d -> %d\n",
2046 wma->thermal_mgmt_info.thermalCurrLevel, level);
2047
2048 return level;
2049}
2050
2051/**
2052 * wma_thermal_mgmt_evt_handler() - thermal mgmt event handler
2053 * @wma_handle: Pointer to WMA handle
2054 * @event: Thermal event information
2055 *
2056 * This function handles the thermal mgmt event from the firmware len
2057 *
2058 * Return: 0 for success otherwise failure
2059 */
2060int wma_thermal_mgmt_evt_handler(void *handle, uint8_t *event,
2061 uint32_t len)
2062{
2063 tp_wma_handle wma;
2064 wmi_thermal_mgmt_event_fixed_param *tm_event;
2065 uint8_t thermal_level;
2066 t_thermal_cmd_params thermal_params;
2067 WMI_THERMAL_MGMT_EVENTID_param_tlvs *param_buf;
2068 ol_txrx_pdev_handle curr_pdev;
2069
2070 if (NULL == event || NULL == handle) {
2071 WMA_LOGE("Invalid thermal mitigation event buffer");
2072 return -EINVAL;
2073 }
2074
2075 wma = (tp_wma_handle) handle;
2076
2077 if (NULL == wma) {
2078 WMA_LOGE("%s: Failed to get wma handle", __func__);
2079 return -EINVAL;
2080 }
2081
2082 param_buf = (WMI_THERMAL_MGMT_EVENTID_param_tlvs *) event;
2083
Anurag Chouhan6d760662016-02-20 16:05:43 +05302084 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002085 if (NULL == curr_pdev) {
2086 WMA_LOGE("%s: Failed to get pdev", __func__);
2087 return -EINVAL;
2088 }
2089
2090 /* Check if thermal mitigation is enabled */
2091 if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
2092 WMA_LOGE("Thermal mgmt is not enabled, ignoring event");
2093 return -EINVAL;
2094 }
2095
2096 tm_event = param_buf->fixed_param;
2097 WMA_LOGD("Thermal mgmt event received with temperature %d",
2098 tm_event->temperature_degreeC);
2099
2100 /* Get the thermal mitigation level for the reported temperature */
2101 thermal_level =
2102 wma_thermal_mgmt_get_level(handle, tm_event->temperature_degreeC);
2103 WMA_LOGD("Thermal mgmt level %d", thermal_level);
2104
2105 if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
2106 WMA_LOGD("Current level %d is same as the set level, ignoring",
2107 wma->thermal_mgmt_info.thermalCurrLevel);
2108 return 0;
2109 }
2110
2111 wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
2112
2113 /* Inform txrx */
2114 ol_tx_throttle_set_level(curr_pdev, thermal_level);
2115
2116 /* Send SME SET_THERMAL_LEVEL_IND message */
2117 wma_set_thermal_level_ind(thermal_level);
2118
2119 /* Get the temperature thresholds to set in firmware */
2120 thermal_params.minTemp =
2121 wma->thermal_mgmt_info.thermalLevels[thermal_level].
2122 minTempThreshold;
2123 thermal_params.maxTemp =
2124 wma->thermal_mgmt_info.thermalLevels[thermal_level].
2125 maxTempThreshold;
2126 thermal_params.thermalEnable =
2127 wma->thermal_mgmt_info.thermalMgmtEnabled;
2128
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302129 if (QDF_STATUS_SUCCESS != wma_set_thermal_mgmt(wma, thermal_params)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002130 WMA_LOGE("Could not send thermal mgmt command to the firmware!");
2131 return -EINVAL;
2132 }
2133
2134 return 0;
2135}
2136
2137/**
2138 * wma_decap_to_8023() - Decapsulate to 802.3 format
2139 * @msdu: skb buffer
2140 * @info: decapsulate info
2141 *
2142 * Return: none
2143 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302144static void wma_decap_to_8023(qdf_nbuf_t msdu, struct wma_decap_info_t *info)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002145{
2146 struct llc_snap_hdr_t *llc_hdr;
2147 uint16_t ether_type;
2148 uint16_t l2_hdr_space;
2149 struct ieee80211_qosframe_addr4 *wh;
2150 uint8_t local_buf[ETHERNET_HDR_LEN];
2151 uint8_t *buf;
2152 struct ethernet_hdr_t *ethr_hdr;
2153
Nirav Shahcbc6d722016-03-01 16:24:53 +05302154 buf = (uint8_t *) qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002155 llc_hdr = (struct llc_snap_hdr_t *)buf;
2156 ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
2157 /* do llc remove if needed */
2158 l2_hdr_space = 0;
2159 if (IS_SNAP(llc_hdr)) {
2160 if (IS_BTEP(llc_hdr)) {
2161 /* remove llc */
2162 l2_hdr_space += sizeof(struct llc_snap_hdr_t);
2163 llc_hdr = NULL;
2164 } else if (IS_RFC1042(llc_hdr)) {
2165 if (!(ether_type == ETHERTYPE_AARP ||
2166 ether_type == ETHERTYPE_IPX)) {
2167 /* remove llc */
2168 l2_hdr_space += sizeof(struct llc_snap_hdr_t);
2169 llc_hdr = NULL;
2170 }
2171 }
2172 }
2173 if (l2_hdr_space > ETHERNET_HDR_LEN) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302174 buf = qdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002175 } else if (l2_hdr_space < ETHERNET_HDR_LEN) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302176 buf = qdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002177 }
2178
2179 /* mpdu hdr should be present in info,re-create ethr_hdr based on mpdu hdr */
2180 wh = (struct ieee80211_qosframe_addr4 *)info->hdr;
2181 ethr_hdr = (struct ethernet_hdr_t *)local_buf;
2182 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
2183 case IEEE80211_FC1_DIR_NODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302184 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002185 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302186 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002187 ETHERNET_ADDR_LEN);
2188 break;
2189 case IEEE80211_FC1_DIR_TODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302190 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002191 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302192 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002193 ETHERNET_ADDR_LEN);
2194 break;
2195 case IEEE80211_FC1_DIR_FROMDS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302196 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002197 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302198 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002199 ETHERNET_ADDR_LEN);
2200 break;
2201 case IEEE80211_FC1_DIR_DSTODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302202 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002203 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302204 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr4,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002205 ETHERNET_ADDR_LEN);
2206 break;
2207 }
2208
2209 if (llc_hdr == NULL) {
2210 ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2211 ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2212 } else {
2213 uint32_t pktlen =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302214 qdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002215 ether_type = (uint16_t) pktlen;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302216 ether_type = qdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002217 ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2218 ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2219 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302220 qdf_mem_copy(buf, ethr_hdr, ETHERNET_HDR_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002221}
2222
2223/**
2224 * wma_ieee80211_hdrsize() - get 802.11 header size
2225 * @data: 80211 frame
2226 *
2227 * Return: size of header
2228 */
2229static int32_t wma_ieee80211_hdrsize(const void *data)
2230{
2231 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
2232 int32_t size = sizeof(struct ieee80211_frame);
2233
2234 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2235 size += IEEE80211_ADDR_LEN;
2236 if (IEEE80211_QOS_HAS_SEQ(wh))
2237 size += sizeof(uint16_t);
2238 return size;
2239}
2240
2241/**
2242 * wmi_desc_pool_init() - Initialize the WMI descriptor pool
2243 * @wma_handle: handle to wma
2244 * @pool_size: Size of wma pool
2245 *
2246 * Return: 0 for success, error code on failure.
2247 */
2248int wmi_desc_pool_init(tp_wma_handle wma_handle, uint32_t pool_size)
2249{
2250 int i;
2251
2252 if (!pool_size) {
2253 WMA_LOGE("%s: failed to allocate desc pool", __func__);
Anurag Chouhanc5548422016-02-24 18:33:27 +05302254 qdf_assert_always(pool_size);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002255 return -EINVAL;
2256 }
2257 WMA_LOGE("%s: initialize desc pool of size %d", __func__, pool_size);
2258 wma_handle->wmi_desc_pool.pool_size = pool_size;
2259 wma_handle->wmi_desc_pool.num_free = pool_size;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302260 wma_handle->wmi_desc_pool.array = qdf_mem_malloc(pool_size *
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002261 sizeof(union wmi_desc_elem_t));
2262 if (!wma_handle->wmi_desc_pool.array) {
2263 WMA_LOGE("%s: failed to allocate desc pool", __func__);
2264 return -ENOMEM;
2265 }
2266 wma_handle->wmi_desc_pool.freelist = &wma_handle->
2267 wmi_desc_pool.array[0];
2268
2269 for (i = 0; i < (pool_size - 1); i++) {
2270 wma_handle->wmi_desc_pool.array[i].wmi_desc.desc_id = i;
2271 wma_handle->wmi_desc_pool.array[i].next =
2272 &wma_handle->wmi_desc_pool.array[i + 1];
2273 }
2274
2275 wma_handle->wmi_desc_pool.array[i].next = NULL;
2276 wma_handle->wmi_desc_pool.array[i].wmi_desc.desc_id = i;
2277
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302278 qdf_spinlock_create(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002279 return 0;
2280}
2281
2282/**
2283 * wmi_desc_pool_deinit() - Deinitialize the WMI descriptor pool
2284 * @wma_handle: handle to wma
2285 *
2286 * Return: None
2287 */
2288void wmi_desc_pool_deinit(tp_wma_handle wma_handle)
2289{
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302290 qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002291 if (wma_handle->wmi_desc_pool.array) {
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302292 qdf_mem_free(wma_handle->wmi_desc_pool.array);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002293 wma_handle->wmi_desc_pool.array = NULL;
2294 } else {
2295 WMA_LOGE("%s: Empty WMI descriptor pool", __func__);
2296 }
2297
2298 wma_handle->wmi_desc_pool.freelist = NULL;
2299 wma_handle->wmi_desc_pool.pool_size = 0;
2300 wma_handle->wmi_desc_pool.num_free = 0;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302301 qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
2302 qdf_spinlock_destroy(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002303}
2304
2305/**
2306 * wmi_desc_get() - Get wmi descriptor from wmi free descriptor pool
2307 * @wma_handle: handle to wma
2308 *
2309 * Return: pointer to wmi descriptor, NULL on failure
2310 */
2311struct wmi_desc_t *wmi_desc_get(tp_wma_handle wma_handle)
2312{
2313 struct wmi_desc_t *wmi_desc = NULL;
2314
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302315 qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002316 if (wma_handle->wmi_desc_pool.freelist) {
2317 wma_handle->wmi_desc_pool.num_free--;
2318 wmi_desc = &wma_handle->wmi_desc_pool.freelist->wmi_desc;
2319 wma_handle->wmi_desc_pool.freelist =
2320 wma_handle->wmi_desc_pool.freelist->next;
2321 }
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302322 qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002323
2324 return wmi_desc;
2325}
2326
2327/**
2328 * wmi_desc_put() - Put wmi descriptor to wmi free descriptor pool
2329 * @wma_handle: handle to wma
2330 * @wmi_desc: wmi descriptor
2331 *
2332 * Return: None
2333 */
2334void wmi_desc_put(tp_wma_handle wma_handle, struct wmi_desc_t *wmi_desc)
2335{
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302336 qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002337 ((union wmi_desc_elem_t *)wmi_desc)->next =
2338 wma_handle->wmi_desc_pool.freelist;
2339 wma_handle->wmi_desc_pool.freelist = (union wmi_desc_elem_t *)wmi_desc;
2340 wma_handle->wmi_desc_pool.num_free++;
Anurag Chouhana37b5b72016-02-21 14:53:42 +05302341 qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002342}
2343
2344#define mgmt_tx_dl_frm_len 64
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302345static inline QDF_STATUS
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002346mgmt_wmi_unified_cmd_send(tp_wma_handle wma_handle, void *tx_frame,
2347 uint16_t frmLen, uint8_t vdev_id,
2348 pWMATxRxCompFunc tx_complete_cb,
2349 pWMAAckFnTxComp tx_ota_post_proc_cb,
2350 uint16_t chanfreq, void *pData)
2351{
2352 wmi_buf_t buf;
2353 wmi_mgmt_tx_send_cmd_fixed_param *cmd;
2354 int32_t cmd_len;
2355 uint64_t dma_addr;
2356 struct wmi_desc_t *wmi_desc = NULL;
Anurag Chouhandf2b2682016-02-29 14:15:27 +05302357 void *qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002358 uint8_t *bufp;
2359 int32_t bufp_len = (frmLen < mgmt_tx_dl_frm_len) ? frmLen :
2360 mgmt_tx_dl_frm_len;
2361
2362 cmd_len = sizeof(wmi_mgmt_tx_send_cmd_fixed_param) +
2363 WMI_TLV_HDR_SIZE + roundup(bufp_len, sizeof(uint32_t));
2364
2365 buf = wmi_buf_alloc(wma_handle->wmi_handle, cmd_len);
2366 if (!buf) {
2367 WMA_LOGE("%s:wmi_buf_alloc failed", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302368 return QDF_STATUS_E_NOMEM;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002369 }
2370
2371 cmd = (wmi_mgmt_tx_send_cmd_fixed_param *)wmi_buf_data(buf);
2372 bufp = (uint8_t *) cmd;
2373 WMITLV_SET_HDR(&cmd->tlv_header,
2374 WMITLV_TAG_STRUC_wmi_mgmt_tx_send_cmd_fixed_param,
2375 WMITLV_GET_STRUCT_TLVLEN
2376 (wmi_mgmt_tx_send_cmd_fixed_param));
2377
2378 cmd->vdev_id = vdev_id;
2379
2380 wmi_desc = wmi_desc_get(wma_handle);
2381 if (!wmi_desc) {
2382 WMA_LOGE("%s: Failed to get wmi_desc", __func__);
2383 goto err2;
2384 }
2385 wmi_desc->nbuf = tx_frame;
2386 wmi_desc->tx_cmpl_cb = tx_complete_cb;
2387 wmi_desc->ota_post_proc_cb = tx_ota_post_proc_cb;
2388
2389 cmd->desc_id = wmi_desc->desc_id;
2390 cmd->chanfreq = chanfreq;
2391 bufp += sizeof(wmi_mgmt_tx_send_cmd_fixed_param);
2392 WMITLV_SET_HDR(bufp, WMITLV_TAG_ARRAY_BYTE, roundup(bufp_len,
2393 sizeof(uint32_t)));
2394 bufp += WMI_TLV_HDR_SIZE;
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302395 qdf_mem_copy(bufp, pData, bufp_len);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302396 qdf_nbuf_map_single(qdf_ctx, tx_frame, QDF_DMA_TO_DEVICE);
2397 dma_addr = qdf_nbuf_get_frag_paddr(tx_frame, 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002398 cmd->paddr_lo = (uint32_t)(dma_addr & 0xffffffff);
2399#if defined(HELIUMPLUS_PADDR64)
2400 cmd->paddr_hi = (uint32_t)((dma_addr >> 32) & 0x1F);
2401#endif
2402 cmd->frame_len = frmLen;
2403 cmd->buf_len = bufp_len;
2404
2405 if (wmi_unified_cmd_send(wma_handle->wmi_handle, buf, cmd_len,
2406 WMI_MGMT_TX_SEND_CMDID)) {
2407 WMA_LOGE("%s: Failed to send mgmt Tx", __func__);
2408 goto err1;
2409 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302410 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002411err1:
2412 wmi_desc_put(wma_handle, wmi_desc);
2413err2:
2414 wmi_buf_free(buf);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302415 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002416}
2417
2418/**
2419 * wma_tx_packet() - Sends Tx Frame to TxRx
2420 * @wma_context: wma context
2421 * @tx_frame: frame buffer
2422 * @frmLen: frame length
2423 * @frmType: frame type
2424 * @txDir: tx diection
2425 * @tid: TID
2426 * @tx_frm_download_comp_cb: tx download callback handler
2427 * @tx_frm_ota_comp_cb: OTA complition handler
2428 * @tx_flag: tx flag
2429 * @vdev_id: vdev id
2430 * @tdlsFlag: tdls flag
2431 *
2432 * This function sends the frame corresponding to the
2433 * given vdev id.
2434 * This is blocking call till the downloading of frame is complete.
2435 *
2436 * Return: CDF status
2437 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302438QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002439 eFrameType frmType, eFrameTxDir txDir, uint8_t tid,
2440 pWMATxRxCompFunc tx_frm_download_comp_cb, void *pData,
2441 pWMAAckFnTxComp tx_frm_ota_comp_cb, uint8_t tx_flag,
2442 uint8_t vdev_id, bool tdlsFlag, uint16_t channel_freq)
2443{
2444 tp_wma_handle wma_handle = (tp_wma_handle) (wma_context);
2445 int32_t status;
Anurag Chouhance0dc992016-02-16 18:18:03 +05302446 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002447 int32_t is_high_latency;
2448 ol_txrx_vdev_handle txrx_vdev;
2449 enum frame_index tx_frm_index = GENERIC_NODOWNLD_NOACK_COMP_INDEX;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302450 tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002451 uint8_t use_6mbps = 0;
2452 uint8_t downld_comp_required = 0;
2453 uint16_t chanfreq;
2454#ifdef WLAN_FEATURE_11W
2455 uint8_t *pFrame = NULL;
2456 void *pPacket = NULL;
2457 uint16_t newFrmLen = 0;
2458#endif /* WLAN_FEATURE_11W */
2459 struct wma_txrx_node *iface;
2460 tpAniSirGlobal pMac;
2461 tpSirMacMgmtHdr mHdr;
2462#ifdef QCA_PKT_PROTO_TRACE
2463 uint8_t proto_type = 0;
2464#endif /* QCA_PKT_PROTO_TRACE */
2465
2466 if (NULL == wma_handle) {
2467 WMA_LOGE("wma_handle is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302468 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002469 }
2470 iface = &wma_handle->interfaces[vdev_id];
2471 /* Get the vdev handle from vdev id */
2472 txrx_vdev = wma_handle->interfaces[vdev_id].handle;
2473
2474 if (!txrx_vdev) {
2475 WMA_LOGE("TxRx Vdev Handle is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302476 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002477 }
2478
2479 if (frmType >= TXRX_FRM_MAX) {
2480 WMA_LOGE("Invalid Frame Type Fail to send Frame");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302481 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002482 }
2483
Anurag Chouhan6d760662016-02-20 16:05:43 +05302484 pMac = cds_get_context(QDF_MODULE_ID_PE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002485 if (!pMac) {
2486 WMA_LOGE("pMac Handle is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302487 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002488 }
2489 /*
2490 * Currently only support to
2491 * send 80211 Mgmt and 80211 Data are added.
2492 */
2493 if (!((frmType == TXRX_FRM_802_11_MGMT) ||
2494 (frmType == TXRX_FRM_802_11_DATA))) {
2495 WMA_LOGE("No Support to send other frames except 802.11 Mgmt/Data");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302496 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002497 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05302498 mHdr = (tpSirMacMgmtHdr)qdf_nbuf_data(tx_frame);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002499#ifdef WLAN_FEATURE_11W
2500 if ((iface && iface->rmfEnabled) &&
2501 (frmType == TXRX_FRM_802_11_MGMT) &&
2502 (pFc->subType == SIR_MAC_MGMT_DISASSOC ||
2503 pFc->subType == SIR_MAC_MGMT_DEAUTH ||
2504 pFc->subType == SIR_MAC_MGMT_ACTION)) {
2505 struct ieee80211_frame *wh =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302506 (struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002507 if (!IEEE80211_IS_BROADCAST(wh->i_addr1) &&
2508 !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2509 if (pFc->wep) {
2510 /* Allocate extra bytes for privacy header and trailer */
2511 newFrmLen = frmLen + IEEE80211_CCMP_HEADERLEN +
2512 IEEE80211_CCMP_MICLEN;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302513 qdf_status =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002514 cds_packet_alloc((uint16_t) newFrmLen,
2515 (void **)&pFrame,
2516 (void **)&pPacket);
2517
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302518 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002519 WMA_LOGP("%s: Failed to allocate %d bytes for RMF status "
2520 "code (%x)", __func__, newFrmLen,
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302521 qdf_status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002522 /* Free the original packet memory */
2523 cds_packet_free((void *)tx_frame);
2524 goto error;
2525 }
2526
2527 /*
2528 * Initialize the frame with 0's and only fill
2529 * MAC header and data, Keep the CCMP header and
2530 * trailer as 0's, firmware shall fill this
2531 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302532 qdf_mem_set(pFrame, newFrmLen, 0);
2533 qdf_mem_copy(pFrame, wh, sizeof(*wh));
2534 qdf_mem_copy(pFrame + sizeof(*wh) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002535 IEEE80211_CCMP_HEADERLEN,
2536 pData + sizeof(*wh),
2537 frmLen - sizeof(*wh));
2538
2539 cds_packet_free((void *)tx_frame);
2540 tx_frame = pPacket;
2541 frmLen = newFrmLen;
2542 }
2543 } else {
2544 /* Allocate extra bytes for MMIE */
2545 newFrmLen = frmLen + IEEE80211_MMIE_LEN;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302546 qdf_status = cds_packet_alloc((uint16_t) newFrmLen,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002547 (void **)&pFrame,
2548 (void **)&pPacket);
2549
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302550 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002551 WMA_LOGP("%s: Failed to allocate %d bytes for RMF status "
2552 "code (%x)", __func__, newFrmLen,
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302553 qdf_status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002554 /* Free the original packet memory */
2555 cds_packet_free((void *)tx_frame);
2556 goto error;
2557 }
2558 /*
2559 * Initialize the frame with 0's and only fill
2560 * MAC header and data. MMIE field will be
2561 * filled by cds_attach_mmie API
2562 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302563 qdf_mem_set(pFrame, newFrmLen, 0);
2564 qdf_mem_copy(pFrame, wh, sizeof(*wh));
2565 qdf_mem_copy(pFrame + sizeof(*wh),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002566 pData + sizeof(*wh), frmLen - sizeof(*wh));
2567 if (!cds_attach_mmie(iface->key.key,
2568 iface->key.key_id[0].ipn,
2569 WMA_IGTK_KEY_INDEX_4,
2570 pFrame,
2571 pFrame + newFrmLen, newFrmLen)) {
2572 WMA_LOGP("%s: Failed to attach MMIE at the end of "
2573 "frame", __func__);
2574 /* Free the original packet memory */
2575 cds_packet_free((void *)tx_frame);
2576 goto error;
2577 }
2578 cds_packet_free((void *)tx_frame);
2579 tx_frame = pPacket;
2580 frmLen = newFrmLen;
2581 }
2582 }
2583#endif /* WLAN_FEATURE_11W */
2584
2585 if ((frmType == TXRX_FRM_802_11_MGMT) &&
2586 (pFc->subType == SIR_MAC_MGMT_PROBE_RSP)) {
2587 uint64_t adjusted_tsf_le;
2588 struct ieee80211_frame *wh =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302589 (struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002590
2591 /* Make the TSF offset negative to match TSF in beacons */
2592 adjusted_tsf_le = cpu_to_le64(0ULL -
2593 wma_handle->interfaces[vdev_id].
2594 tsfadjust);
2595 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2596 }
2597 if (frmType == TXRX_FRM_802_11_DATA) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302598 qdf_nbuf_t ret;
2599 qdf_nbuf_t skb = (qdf_nbuf_t) tx_frame;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002600 ol_txrx_pdev_handle pdev =
Anurag Chouhan6d760662016-02-20 16:05:43 +05302601 cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002602
2603 struct wma_decap_info_t decap_info;
2604 struct ieee80211_frame *wh =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302605 (struct ieee80211_frame *)qdf_nbuf_data(skb);
Anurag Chouhan210db072016-02-22 18:42:15 +05302606 unsigned long curr_timestamp = qdf_mc_timer_get_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002607
2608 if (pdev == NULL) {
2609 WMA_LOGE("%s: pdev pointer is not available", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302610 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002611 }
2612
2613 /*
2614 * 1) TxRx Module expects data input to be 802.3 format
2615 * So Decapsulation has to be done.
2616 * 2) Only one Outstanding Data pending for Ack is allowed
2617 */
2618 if (tx_frm_ota_comp_cb) {
2619 if (wma_handle->umac_data_ota_ack_cb) {
2620 /*
2621 * If last data frame was sent more than 5 seconds
2622 * ago and still we did not receive ack/nack from
2623 * fw then allow Tx of this data frame
2624 */
2625 if (curr_timestamp >=
2626 wma_handle->last_umac_data_ota_timestamp +
2627 500) {
2628 WMA_LOGE("%s: No Tx Ack for last data frame for more than 5 secs, allow Tx of current data frame",
2629 __func__);
2630 } else {
2631 WMA_LOGE("%s: Already one Data pending for Ack, reject Tx of data frame",
2632 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302633 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002634 }
2635 }
2636 } else {
2637 /*
2638 * Data Frames are sent through TxRx Non Standard Data Path
2639 * so Ack Complete Cb is must
2640 */
2641 WMA_LOGE("No Ack Complete Cb. Don't Allow");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302642 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002643 }
2644
2645 /* Take out 802.11 header from skb */
2646 decap_info.hdr_len = wma_ieee80211_hdrsize(wh);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302647 qdf_mem_copy(decap_info.hdr, wh, decap_info.hdr_len);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302648 qdf_nbuf_pull_head(skb, decap_info.hdr_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002649
2650 /* Decapsulate to 802.3 format */
2651 wma_decap_to_8023(skb, &decap_info);
2652
2653 /* Zero out skb's context buffer for the driver to use */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302654 qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002655
2656 /* Do the DMA Mapping */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302657 qdf_nbuf_map_single(pdev->osdev, skb, QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002658
2659 /* Terminate the (single-element) list of tx frames */
2660 skb->next = NULL;
2661
2662 /* Store the Ack Complete Cb */
2663 wma_handle->umac_data_ota_ack_cb = tx_frm_ota_comp_cb;
2664
2665 /* Store the timestamp and nbuf for this data Tx */
2666 wma_handle->last_umac_data_ota_timestamp = curr_timestamp;
2667 wma_handle->last_umac_data_nbuf = skb;
2668
2669 /* Send the Data frame to TxRx in Non Standard Path */
2670 ret = ol_tx_non_std(txrx_vdev, ol_tx_spec_no_free, skb);
2671
2672 if (ret) {
2673 WMA_LOGE("TxRx Rejected. Fail to do Tx");
Nirav Shahcbc6d722016-03-01 16:24:53 +05302674 qdf_nbuf_unmap_single(pdev->osdev, skb,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302675 QDF_DMA_TO_DEVICE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002676 /* Call Download Cb so that umac can free the buffer */
2677 if (tx_frm_download_comp_cb)
2678 tx_frm_download_comp_cb(wma_handle->mac_context,
2679 tx_frame,
2680 WMA_TX_FRAME_BUFFER_FREE);
2681 wma_handle->umac_data_ota_ack_cb = NULL;
2682 wma_handle->last_umac_data_nbuf = NULL;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302683 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002684 }
2685
2686 /* Call Download Callback if passed */
2687 if (tx_frm_download_comp_cb)
2688 tx_frm_download_comp_cb(wma_handle->mac_context,
2689 tx_frame,
2690 WMA_TX_FRAME_BUFFER_NO_FREE);
2691
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302692 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002693 }
2694
2695 is_high_latency =
2696 ol_cfg_is_high_latency(txrx_vdev->pdev->ctrl_pdev);
2697
2698 downld_comp_required = tx_frm_download_comp_cb && is_high_latency;
2699
2700 /* Fill the frame index to send */
2701 if (pFc->type == SIR_MAC_MGMT_FRAME) {
2702 if (tx_frm_ota_comp_cb) {
2703 if (downld_comp_required)
2704 tx_frm_index =
2705 GENERIC_DOWNLD_COMP_ACK_COMP_INDEX;
2706 else
2707 tx_frm_index = GENERIC_NODOWLOAD_ACK_COMP_INDEX;
2708
2709 /* Store the Ack Cb sent by UMAC */
2710 if (pFc->subType < SIR_MAC_MGMT_RESERVED15) {
2711 wma_handle->umac_ota_ack_cb[pFc->subType] =
2712 tx_frm_ota_comp_cb;
2713 }
2714#ifdef QCA_PKT_PROTO_TRACE
2715 if (pFc->subType == SIR_MAC_MGMT_ACTION)
2716 proto_type = cds_pkt_get_proto_type(tx_frame,
2717 pMac->fEnableDebugLog,
2718 NBUF_PKT_TRAC_TYPE_MGMT_ACTION);
2719 if (proto_type & NBUF_PKT_TRAC_TYPE_MGMT_ACTION)
2720 cds_pkt_trace_buf_update("WM:T:MACT");
Nirav Shahcbc6d722016-03-01 16:24:53 +05302721 qdf_nbuf_trace_set_proto_type(tx_frame, proto_type);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002722#endif /* QCA_PKT_PROTO_TRACE */
2723 } else {
2724 if (downld_comp_required)
2725 tx_frm_index =
2726 GENERIC_DOWNLD_COMP_NOACK_COMP_INDEX;
2727 else
2728 tx_frm_index =
2729 GENERIC_NODOWNLD_NOACK_COMP_INDEX;
2730 }
2731 }
2732
2733 /*
2734 * If Dowload Complete is required
2735 * Wait for download complete
2736 */
2737 if (downld_comp_required) {
2738 /* Store Tx Comp Cb */
2739 wma_handle->tx_frm_download_comp_cb = tx_frm_download_comp_cb;
2740
2741 /* Reset the Tx Frame Complete Event */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302742 qdf_status =
Anurag Chouhance0dc992016-02-16 18:18:03 +05302743 qdf_event_reset(&wma_handle->tx_frm_download_comp_event);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002744
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302745 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002746 WMA_LOGP("%s: Event Reset failed tx comp event %x",
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302747 __func__, qdf_status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002748 goto error;
2749 }
2750 }
2751
2752 /* If the frame has to be sent at BD Rate2 inform TxRx */
2753 if (tx_flag & HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME)
2754 use_6mbps = 1;
2755
Deepak Dhamdhered97bfb32015-10-11 15:16:18 -07002756 if (wma_handle->interfaces[vdev_id].scan_info.chan_freq != 0) {
2757 chanfreq = wma_handle->interfaces[vdev_id].scan_info.chan_freq;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002758 WMA_LOGI("%s: Preauth frame on channel %d", __func__, chanfreq);
2759 } else if (pFc->subType == SIR_MAC_MGMT_PROBE_RSP) {
2760 chanfreq = wma_handle->interfaces[vdev_id].mhz;
2761 WMA_LOGI("%s: Probe response frame on channel %d", __func__,
2762 chanfreq);
2763 WMA_LOGI("%s: Probe response frame on vdev id %d", __func__,
2764 vdev_id);
2765 } else if (pFc->subType == SIR_MAC_MGMT_ACTION) {
2766 chanfreq = channel_freq;
2767 } else {
2768 chanfreq = 0;
2769 }
2770 if (pMac->fEnableDebugLog & 0x1) {
2771 if ((pFc->type == SIR_MAC_MGMT_FRAME) &&
2772 (pFc->subType != SIR_MAC_MGMT_PROBE_REQ) &&
2773 (pFc->subType != SIR_MAC_MGMT_PROBE_RSP)) {
2774 WMA_LOGE("TX MGMT - Type %hu, SubType %hu seq_num[%d]",
2775 pFc->type, pFc->subType,
2776 ((mHdr->seqControl.seqNumHi << 4) |
2777 mHdr->seqControl.seqNumLo));
2778 }
2779 }
2780
2781 if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
2782 WMI_SERVICE_MGMT_TX_WMI)) {
2783 status = mgmt_wmi_unified_cmd_send(wma_handle, tx_frame, frmLen,
2784 vdev_id,
2785 tx_frm_download_comp_cb,
2786 tx_frm_ota_comp_cb,
2787 chanfreq, pData);
2788 } else {
2789 /* Hand over the Tx Mgmt frame to TxRx */
2790 status = ol_txrx_mgmt_send(txrx_vdev, tx_frame, tx_frm_index,
2791 use_6mbps, chanfreq);
2792 }
2793
2794 /*
2795 * Failed to send Tx Mgmt Frame
2796 */
2797 if (status) {
2798 /* Call Download Cb so that umac can free the buffer */
2799 if (tx_frm_download_comp_cb)
2800 tx_frm_download_comp_cb(wma_handle->mac_context,
2801 tx_frame,
2802 WMA_TX_FRAME_BUFFER_FREE);
2803 WMA_LOGP("%s: Failed to send Mgmt Frame", __func__);
2804 goto error;
2805 }
2806
2807 if (!tx_frm_download_comp_cb)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302808 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002809
2810 /*
2811 * Wait for Download Complete
2812 * if required
2813 */
2814 if (downld_comp_required) {
2815 /*
2816 * Wait for Download Complete
2817 * @ Integrated : Dxe Complete
2818 * @ Discrete : Target Download Complete
2819 */
Anurag Chouhance0dc992016-02-16 18:18:03 +05302820 qdf_status =
2821 qdf_wait_single_event(&wma_handle->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002822 tx_frm_download_comp_event,
2823 WMA_TX_FRAME_COMPLETE_TIMEOUT);
2824
Anurag Chouhance0dc992016-02-16 18:18:03 +05302825 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002826 WMA_LOGP("Wait Event failed txfrm_comp_event");
2827 /*
2828 * @Integrated: Something Wrong with Dxe
2829 * TODO: Some Debug Code
2830 * Here We need to trigger SSR since
2831 * since system went into a bad state where
2832 * we didn't get Download Complete for almost
2833 * WMA_TX_FRAME_COMPLETE_TIMEOUT (1 sec)
2834 */
2835 }
2836 } else {
2837 /*
2838 * For Low Latency Devices
2839 * Call the download complete
2840 * callback once the frame is successfully
2841 * given to txrx module
2842 */
2843 tx_frm_download_comp_cb(wma_handle->mac_context, tx_frame,
2844 WMA_TX_FRAME_BUFFER_NO_FREE);
2845 }
2846
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302847 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002848
2849error:
2850 wma_handle->tx_frm_download_comp_cb = NULL;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302851 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002852}
2853
2854/**
2855 * wma_ds_peek_rx_packet_info() - peek rx packet info
2856 * @pkt: packet
2857 * @pkt_meta: packet meta
2858 * @bSwap: byte swap
2859 *
2860 * Function fills the rx packet meta info from the the cds packet
2861 *
2862 * Return: CDF status
2863 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302864QDF_STATUS wma_ds_peek_rx_packet_info(cds_pkt_t *pkt, void **pkt_meta,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002865 bool bSwap)
2866{
2867 /* Sanity Check */
2868 if (pkt == NULL) {
2869 WMA_LOGE("wma:Invalid parameter sent on wma_peek_rx_pkt_info");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302870 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002871 }
2872
2873 *pkt_meta = &(pkt->pkt_meta);
2874
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302875 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002876}
2877
2878/**
2879 * ol_rx_err() - ol rx err handler
2880 * @pdev: ol pdev
2881 * @vdev_id: vdev id
2882 * @peer_mac_addr: peer mac address
2883 * @tid: TID
2884 * @tsf32: TSF
2885 * @err_type: error type
2886 * @rx_frame: rx frame
2887 * @pn: PN Number
2888 * @key_id: key id
2889 *
2890 * This function handles rx error and send MIC error failure to LIM
2891 *
2892 * Return: none
2893 */
2894void ol_rx_err(ol_pdev_handle pdev, uint8_t vdev_id,
2895 uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302896 enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002897 uint64_t *pn, uint8_t key_id)
2898{
Anurag Chouhan6d760662016-02-20 16:05:43 +05302899 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002900 tpSirSmeMicFailureInd mic_err_ind;
2901 struct ether_header *eth_hdr;
2902 cds_msg_t cds_msg;
2903
2904 if (NULL == wma) {
2905 WMA_LOGE("%s: Failed to get wma", __func__);
2906 return;
2907 }
2908
2909 if (err_type != OL_RX_ERR_TKIP_MIC)
2910 return;
2911
Nirav Shahcbc6d722016-03-01 16:24:53 +05302912 if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002913 return;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302914 eth_hdr = (struct ether_header *)qdf_nbuf_data(rx_frame);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302915 mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002916 if (!mic_err_ind) {
2917 WMA_LOGE("%s: Failed to allocate memory for MIC indication message",
2918 __func__);
2919 return;
2920 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302921 qdf_mem_set((void *)mic_err_ind, sizeof(*mic_err_ind), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002922
2923 mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND;
2924 mic_err_ind->length = sizeof(*mic_err_ind);
2925 mic_err_ind->sessionId = vdev_id;
Anurag Chouhanc5548422016-02-24 18:33:27 +05302926 qdf_copy_macaddr(&mic_err_ind->bssId,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302927 (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302928 qdf_mem_copy(mic_err_ind->info.taMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302929 (struct qdf_mac_addr *) peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002930 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302931 qdf_mem_copy(mic_err_ind->info.srcMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302932 (struct qdf_mac_addr *) eth_hdr->ether_shost,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002933 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302934 qdf_mem_copy(mic_err_ind->info.dstMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302935 (struct qdf_mac_addr *) eth_hdr->ether_dhost,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002936 sizeof(tSirMacAddr));
2937 mic_err_ind->info.keyId = key_id;
2938 mic_err_ind->info.multicast =
2939 IEEE80211_IS_MULTICAST(eth_hdr->ether_dhost);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302940 qdf_mem_copy(mic_err_ind->info.TSC, pn, SIR_CIPHER_SEQ_CTR_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002941
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302942 qdf_mem_set(&cds_msg, sizeof(cds_msg_t), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002943 cds_msg.type = eWNI_SME_MIC_FAILURE_IND;
2944 cds_msg.bodyptr = (void *) mic_err_ind;
2945
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302946 if (QDF_STATUS_SUCCESS !=
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002947 cds_mq_post_message(CDS_MQ_ID_SME, (cds_msg_t *) &cds_msg)) {
2948 WMA_LOGE("%s: could not post mic failure indication to SME",
2949 __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302950 qdf_mem_free((void *)mic_err_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002951 }
2952}
2953
2954/**
2955 * wma_tx_abort() - abort tx
2956 * @vdev_id: vdev id
2957 *
2958 * In case of deauth host abort transmitting packet.
2959 *
2960 * Return: none
2961 */
2962void wma_tx_abort(uint8_t vdev_id)
2963{
2964#define PEER_ALL_TID_BITMASK 0xffffffff
2965 tp_wma_handle wma;
2966 uint32_t peer_tid_bitmap = PEER_ALL_TID_BITMASK;
2967 struct wma_txrx_node *iface;
2968
Anurag Chouhan6d760662016-02-20 16:05:43 +05302969 wma = cds_get_context(QDF_MODULE_ID_WMA);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002970 if (NULL == wma) {
2971 WMA_LOGE("%s: wma is NULL", __func__);
2972 return;
2973 }
2974
2975 iface = &wma->interfaces[vdev_id];
2976 if (!iface->handle) {
2977 WMA_LOGE("%s: Failed to get iface handle: %p",
2978 __func__, iface->handle);
2979 return;
2980 }
2981 WMA_LOGA("%s: vdevid %d bssid %pM", __func__, vdev_id, iface->bssid);
2982 iface->pause_bitmap |= (1 << PAUSE_TYPE_HOST);
2983 ol_txrx_vdev_pause(iface->handle, OL_TXQ_PAUSE_REASON_TX_ABORT);
2984
2985 /* Flush all TIDs except MGMT TID for this peer in Target */
2986 peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID);
2987 wmi_unified_peer_flush_tids_send(wma->wmi_handle, iface->bssid,
2988 peer_tid_bitmap, vdev_id);
2989}
2990
2991#if defined(FEATURE_LRO)
2992/**
2993 * wma_lro_config_cmd() - process the LRO config command
2994 * @wma: Pointer to WMA handle
2995 * @wma_lro_cmd: Pointer to LRO configuration parameters
2996 *
2997 * This function sends down the LRO configuration parameters to
2998 * the firmware to enable LRO, sets the TCP flags and sets the
2999 * seed values for the toeplitz hash generation
3000 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303001 * Return: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003002 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303003QDF_STATUS wma_lro_config_cmd(tp_wma_handle wma_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003004 struct wma_lro_config_cmd_t *wma_lro_cmd)
3005{
3006 wmi_lro_info_cmd_fixed_param *cmd;
3007 wmi_buf_t buf;
3008 int status;
3009
3010 if (NULL == wma_handle || NULL == wma_lro_cmd) {
3011 WMA_LOGE("wma_lro_config_cmd': invalid input!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303012 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003013 }
3014
3015 buf = wmi_buf_alloc(wma_handle->wmi_handle, sizeof(*cmd));
3016 if (!buf) {
3017 WMA_LOGE("Failed to allocate buffer to send set key cmd");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303018 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003019 }
3020
3021 cmd = (wmi_lro_info_cmd_fixed_param *) wmi_buf_data(buf);
3022
3023 WMITLV_SET_HDR(&cmd->tlv_header,
3024 WMITLV_TAG_STRUC_wmi_lro_info_cmd_fixed_param,
3025 WMITLV_GET_STRUCT_TLVLEN(wmi_lro_info_cmd_fixed_param));
3026
3027 cmd->lro_enable = wma_lro_cmd->lro_enable;
3028 WMI_LRO_INFO_TCP_FLAG_VALS_SET(cmd->tcp_flag_u32,
3029 wma_lro_cmd->tcp_flag);
3030 WMI_LRO_INFO_TCP_FLAGS_MASK_SET(cmd->tcp_flag_u32,
3031 wma_lro_cmd->tcp_flag_mask);
3032 cmd->toeplitz_hash_ipv4_0_3 =
3033 wma_lro_cmd->toeplitz_hash_ipv4[0];
3034 cmd->toeplitz_hash_ipv4_4_7 =
3035 wma_lro_cmd->toeplitz_hash_ipv4[1];
3036 cmd->toeplitz_hash_ipv4_8_11 =
3037 wma_lro_cmd->toeplitz_hash_ipv4[2];
3038 cmd->toeplitz_hash_ipv4_12_15 =
3039 wma_lro_cmd->toeplitz_hash_ipv4[3];
3040 cmd->toeplitz_hash_ipv4_16 =
3041 wma_lro_cmd->toeplitz_hash_ipv4[4];
3042
3043 cmd->toeplitz_hash_ipv6_0_3 =
3044 wma_lro_cmd->toeplitz_hash_ipv6[0];
3045 cmd->toeplitz_hash_ipv6_4_7 =
3046 wma_lro_cmd->toeplitz_hash_ipv6[1];
3047 cmd->toeplitz_hash_ipv6_8_11 =
3048 wma_lro_cmd->toeplitz_hash_ipv6[2];
3049 cmd->toeplitz_hash_ipv6_12_15 =
3050 wma_lro_cmd->toeplitz_hash_ipv6[3];
3051 cmd->toeplitz_hash_ipv6_16_19 =
3052 wma_lro_cmd->toeplitz_hash_ipv6[4];
3053 cmd->toeplitz_hash_ipv6_20_23 =
3054 wma_lro_cmd->toeplitz_hash_ipv6[5];
3055 cmd->toeplitz_hash_ipv6_24_27 =
3056 wma_lro_cmd->toeplitz_hash_ipv6[6];
3057 cmd->toeplitz_hash_ipv6_28_31 =
3058 wma_lro_cmd->toeplitz_hash_ipv6[7];
3059 cmd->toeplitz_hash_ipv6_32_35 =
3060 wma_lro_cmd->toeplitz_hash_ipv6[8];
3061 cmd->toeplitz_hash_ipv6_36_39 =
3062 wma_lro_cmd->toeplitz_hash_ipv6[9];
3063 cmd->toeplitz_hash_ipv6_40 =
3064 wma_lro_cmd->toeplitz_hash_ipv6[10];
3065
3066 WMA_LOGD("WMI_LRO_CONFIG: lro_enable %d, tcp_flag 0x%x",
3067 cmd->lro_enable, cmd->tcp_flag_u32);
3068
3069 status = wmi_unified_cmd_send(wma_handle->wmi_handle, buf,
3070 sizeof(*cmd), WMI_LRO_CONFIG_CMDID);
3071 if (status) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05303072 qdf_nbuf_free(buf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003073 WMA_LOGE("%s:Failed to send WMI_LRO_CONFIG_CMDID", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303074 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003075 }
3076
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303077 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003078}
3079#endif
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003080
3081/**
3082 * wma_indicate_err() - indicate an error to the protocol stack
3083 * @err_type: error type
3084 * @err_info: information associated with the error
3085 *
3086 * This function indicates an error encountered in the data path
3087 * to the protocol stack
3088 *
3089 * Return: none
3090 */
3091void
3092wma_indicate_err(
3093 enum ol_rx_err_type err_type,
3094 struct ol_error_info *err_info)
3095{
3096 switch (err_type) {
3097 case OL_RX_ERR_TKIP_MIC:
3098 {
Anurag Chouhan6d760662016-02-20 16:05:43 +05303099 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003100 tpSirSmeMicFailureInd mic_err_ind;
3101 cds_msg_t cds_msg;
3102 uint8_t vdev_id;
3103
3104 if (NULL == wma) {
3105 WMA_LOGE("%s: Failed to get wma context",
3106 __func__);
3107 return;
3108 }
3109
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303110 mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind));
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003111 if (!mic_err_ind) {
3112 WMA_LOGE("%s: MIC indication mem alloc failed",
3113 __func__);
3114 return;
3115 }
3116
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303117 qdf_mem_set((void *) mic_err_ind, 0,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003118 sizeof(*mic_err_ind));
3119 mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND;
3120 mic_err_ind->length = sizeof(*mic_err_ind);
3121 vdev_id = err_info->u.mic_err.vdev_id;
Anurag Chouhanc5548422016-02-24 18:33:27 +05303122 qdf_copy_macaddr(&mic_err_ind->bssId,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303123 (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003124 WMA_LOGE("MIC error: BSSID:%02x:%02x:%02x:%02x:%02x:%02x\n",
3125 mic_err_ind->bssId.bytes[0], mic_err_ind->bssId.bytes[1],
3126 mic_err_ind->bssId.bytes[2], mic_err_ind->bssId.bytes[3],
3127 mic_err_ind->bssId.bytes[4], mic_err_ind->bssId.bytes[5]);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303128 qdf_mem_copy(mic_err_ind->info.taMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303129 (struct qdf_mac_addr *) err_info->u.mic_err.ta,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003130 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303131 qdf_mem_copy(mic_err_ind->info.srcMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303132 (struct qdf_mac_addr *) err_info->u.mic_err.sa,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003133 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303134 qdf_mem_copy(mic_err_ind->info.dstMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303135 (struct qdf_mac_addr *) err_info->u.mic_err.da,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003136 sizeof(tSirMacAddr));
3137 mic_err_ind->info.keyId = err_info->u.mic_err.key_id;
3138 mic_err_ind->info.multicast =
3139 IEEE80211_IS_MULTICAST(err_info->u.mic_err.da);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303140 qdf_mem_copy(mic_err_ind->info.TSC,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003141 (void *)&err_info->
3142 u.mic_err.pn, SIR_CIPHER_SEQ_CTR_SIZE);
3143
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303144 qdf_mem_set(&cds_msg, sizeof(cds_msg_t), 0);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003145 cds_msg.type = eWNI_SME_MIC_FAILURE_IND;
3146 cds_msg.bodyptr = (void *) mic_err_ind;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303147 if (QDF_STATUS_SUCCESS !=
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003148 cds_mq_post_message(CDS_MQ_ID_SME,
3149 (cds_msg_t *) &cds_msg)) {
3150 WMA_LOGE("%s: mic failure ind post to SME failed",
3151 __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303152 qdf_mem_free((void *)mic_err_ind);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003153 }
3154 break;
3155 }
3156 default:
3157 {
3158 WMA_LOGE("%s: unhandled ol error type %d", __func__, err_type);
3159 break;
3160 }
3161 }
3162}