blob: 13b722cccd52b920350eb37dd431ae488d4329af [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +05302 * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wma_data.c
30 * This file contains tx/rx and data path related functions.
31 */
32
33/* Header files */
34
35#include "wma.h"
36#include "wma_api.h"
37#include "cds_api.h"
38#include "wmi_unified_api.h"
39#include "wlan_qct_sys.h"
40#include "wni_api.h"
41#include "ani_global.h"
42#include "wmi_unified.h"
43#include "wni_cfg.h"
44#include "cfg_api.h"
Manjunathappa Prakash3454fd62016-04-01 08:52:06 -070045#include <cdp_txrx_tx_throttle.h>
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +053046#if defined(CONFIG_HL_SUPPORT)
47#include "wlan_tgt_def_config_hl.h"
48#else
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049#include "wlan_tgt_def_config.h"
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +053050#endif
Nirav Shahcbc6d722016-03-01 16:24:53 +053051#include "qdf_nbuf.h"
Anurag Chouhan6d760662016-02-20 16:05:43 +053052#include "qdf_types.h"
Anurag Chouhan600c3a02016-03-01 10:33:54 +053053#include "qdf_mem.h"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080054
55#include "wma_types.h"
56#include "lim_api.h"
57#include "lim_session_utils.h"
58
59#include "cds_utils.h"
60
61#if !defined(REMOVE_PKT_LOG)
62#include "pktlog_ac.h"
63#endif /* REMOVE_PKT_LOG */
64
65#include "dbglog_host.h"
66#include "csr_api.h"
67#include "ol_fw.h"
68
69#include "dfs.h"
70#include "wma_internal.h"
Dhanashri Atreb08959a2016-03-01 17:28:03 -080071#include "cdp_txrx_flow_ctrl_legacy.h"
72#include "cdp_txrx_cmn.h"
73#include "cdp_txrx_misc.h"
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -070074#include <cdp_txrx_peer_ops.h>
75#include <cdp_txrx_cfg.h>
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +053076#include "cdp_txrx_stats.h"
Leo Chang96464902016-10-28 11:10:54 -070077#include <cdp_txrx_misc.h>
78#include "enet.h"
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +053079#include "wlan_mgmt_txrx_utils_api.h"
80#include "wlan_objmgr_psoc_obj.h"
81#include "wlan_objmgr_pdev_obj.h"
82#include "wlan_objmgr_vdev_obj.h"
83#include "wlan_objmgr_peer_obj.h"
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080084#include <cdp_txrx_handle.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080085
86typedef struct {
87 int32_t rate;
88 uint8_t flag;
89} wma_search_rate_t;
90
91#define WMA_MAX_OFDM_CCK_RATE_TBL_SIZE 12
92/* In ofdm_cck_rate_tbl->flag, if bit 7 is 1 it's CCK, otherwise it ofdm.
93 * Lower bit carries the ofdm/cck index for encoding the rate
94 */
95static wma_search_rate_t ofdm_cck_rate_tbl[WMA_MAX_OFDM_CCK_RATE_TBL_SIZE] = {
96 {540, 4}, /* 4: OFDM 54 Mbps */
97 {480, 0}, /* 0: OFDM 48 Mbps */
98 {360, 5}, /* 5: OFDM 36 Mbps */
99 {240, 1}, /* 1: OFDM 24 Mbps */
100 {180, 6}, /* 6: OFDM 18 Mbps */
101 {120, 2}, /* 2: OFDM 12 Mbps */
102 {110, (1 << 7)}, /* 0: CCK 11 Mbps Long */
103 {90, 7}, /* 7: OFDM 9 Mbps */
104 {60, 3}, /* 3: OFDM 6 Mbps */
105 {55, ((1 << 7) | 1)}, /* 1: CCK 5.5 Mbps Long */
106 {20, ((1 << 7) | 2)}, /* 2: CCK 2 Mbps Long */
107 {10, ((1 << 7) | 3)} /* 3: CCK 1 Mbps Long */
108};
109
110#define WMA_MAX_VHT20_RATE_TBL_SIZE 9
111/* In vht20_400ns_rate_tbl flag carries the mcs index for encoding the rate */
112static wma_search_rate_t vht20_400ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
113 {867, 8}, /* MCS8 1SS short GI */
114 {722, 7}, /* MCS7 1SS short GI */
115 {650, 6}, /* MCS6 1SS short GI */
116 {578, 5}, /* MCS5 1SS short GI */
117 {433, 4}, /* MCS4 1SS short GI */
118 {289, 3}, /* MCS3 1SS short GI */
119 {217, 2}, /* MCS2 1SS short GI */
120 {144, 1}, /* MCS1 1SS short GI */
121 {72, 0} /* MCS0 1SS short GI */
122};
123
124/* In vht20_800ns_rate_tbl flag carries the mcs index for encoding the rate */
125static wma_search_rate_t vht20_800ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = {
126 {780, 8}, /* MCS8 1SS long GI */
127 {650, 7}, /* MCS7 1SS long GI */
128 {585, 6}, /* MCS6 1SS long GI */
129 {520, 5}, /* MCS5 1SS long GI */
130 {390, 4}, /* MCS4 1SS long GI */
131 {260, 3}, /* MCS3 1SS long GI */
132 {195, 2}, /* MCS2 1SS long GI */
133 {130, 1}, /* MCS1 1SS long GI */
134 {65, 0} /* MCS0 1SS long GI */
135};
136
137#define WMA_MAX_VHT40_RATE_TBL_SIZE 10
138/* In vht40_400ns_rate_tbl flag carries the mcs index for encoding the rate */
139static wma_search_rate_t vht40_400ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
140 {2000, 9}, /* MCS9 1SS short GI */
141 {1800, 8}, /* MCS8 1SS short GI */
142 {1500, 7}, /* MCS7 1SS short GI */
143 {1350, 6}, /* MCS6 1SS short GI */
144 {1200, 5}, /* MCS5 1SS short GI */
145 {900, 4}, /* MCS4 1SS short GI */
146 {600, 3}, /* MCS3 1SS short GI */
147 {450, 2}, /* MCS2 1SS short GI */
148 {300, 1}, /* MCS1 1SS short GI */
149 {150, 0}, /* MCS0 1SS short GI */
150};
151
152static wma_search_rate_t vht40_800ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = {
153 {1800, 9}, /* MCS9 1SS long GI */
154 {1620, 8}, /* MCS8 1SS long GI */
155 {1350, 7}, /* MCS7 1SS long GI */
156 {1215, 6}, /* MCS6 1SS long GI */
157 {1080, 5}, /* MCS5 1SS long GI */
158 {810, 4}, /* MCS4 1SS long GI */
159 {540, 3}, /* MCS3 1SS long GI */
160 {405, 2}, /* MCS2 1SS long GI */
161 {270, 1}, /* MCS1 1SS long GI */
162 {135, 0} /* MCS0 1SS long GI */
163};
164
165#define WMA_MAX_VHT80_RATE_TBL_SIZE 10
166static wma_search_rate_t vht80_400ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
167 {4333, 9}, /* MCS9 1SS short GI */
168 {3900, 8}, /* MCS8 1SS short GI */
169 {3250, 7}, /* MCS7 1SS short GI */
170 {2925, 6}, /* MCS6 1SS short GI */
171 {2600, 5}, /* MCS5 1SS short GI */
172 {1950, 4}, /* MCS4 1SS short GI */
173 {1300, 3}, /* MCS3 1SS short GI */
174 {975, 2}, /* MCS2 1SS short GI */
175 {650, 1}, /* MCS1 1SS short GI */
176 {325, 0} /* MCS0 1SS short GI */
177};
178
179static wma_search_rate_t vht80_800ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = {
180 {3900, 9}, /* MCS9 1SS long GI */
181 {3510, 8}, /* MCS8 1SS long GI */
182 {2925, 7}, /* MCS7 1SS long GI */
183 {2633, 6}, /* MCS6 1SS long GI */
184 {2340, 5}, /* MCS5 1SS long GI */
185 {1755, 4}, /* MCS4 1SS long GI */
186 {1170, 3}, /* MCS3 1SS long GI */
187 {878, 2}, /* MCS2 1SS long GI */
188 {585, 1}, /* MCS1 1SS long GI */
189 {293, 0} /* MCS0 1SS long GI */
190};
191
192#define WMA_MAX_HT20_RATE_TBL_SIZE 8
193static wma_search_rate_t ht20_400ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
194 {722, 7}, /* MCS7 1SS short GI */
195 {650, 6}, /* MCS6 1SS short GI */
196 {578, 5}, /* MCS5 1SS short GI */
197 {433, 4}, /* MCS4 1SS short GI */
198 {289, 3}, /* MCS3 1SS short GI */
199 {217, 2}, /* MCS2 1SS short GI */
200 {144, 1}, /* MCS1 1SS short GI */
201 {72, 0} /* MCS0 1SS short GI */
202};
203
204static wma_search_rate_t ht20_800ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = {
205 {650, 7}, /* MCS7 1SS long GI */
206 {585, 6}, /* MCS6 1SS long GI */
207 {520, 5}, /* MCS5 1SS long GI */
208 {390, 4}, /* MCS4 1SS long GI */
209 {260, 3}, /* MCS3 1SS long GI */
210 {195, 2}, /* MCS2 1SS long GI */
211 {130, 1}, /* MCS1 1SS long GI */
212 {65, 0} /* MCS0 1SS long GI */
213};
214
215#define WMA_MAX_HT40_RATE_TBL_SIZE 8
216static wma_search_rate_t ht40_400ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
217 {1500, 7}, /* MCS7 1SS short GI */
218 {1350, 6}, /* MCS6 1SS short GI */
219 {1200, 5}, /* MCS5 1SS short GI */
220 {900, 4}, /* MCS4 1SS short GI */
221 {600, 3}, /* MCS3 1SS short GI */
222 {450, 2}, /* MCS2 1SS short GI */
223 {300, 1}, /* MCS1 1SS short GI */
224 {150, 0} /* MCS0 1SS short GI */
225};
226
227static wma_search_rate_t ht40_800ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = {
228 {1350, 7}, /* MCS7 1SS long GI */
229 {1215, 6}, /* MCS6 1SS long GI */
230 {1080, 5}, /* MCS5 1SS long GI */
231 {810, 4}, /* MCS4 1SS long GI */
232 {540, 3}, /* MCS3 1SS long GI */
233 {405, 2}, /* MCS2 1SS long GI */
234 {270, 1}, /* MCS1 1SS long GI */
235 {135, 0} /* MCS0 1SS long GI */
236};
237
238/**
239 * wma_bin_search_rate() - binary search function to find rate
240 * @tbl: rate table
241 * @tbl_size: table size
242 * @mbpsx10_rate: return mbps rate
243 * @ret_flag: return flag
244 *
245 * Return: none
246 */
247static void wma_bin_search_rate(wma_search_rate_t *tbl, int32_t tbl_size,
248 int32_t *mbpsx10_rate, uint8_t *ret_flag)
249{
250 int32_t upper, lower, mid;
251
252 /* the table is descenting. index holds the largest value and the
253 * bottom index holds the smallest value */
254
255 upper = 0; /* index 0 */
256 lower = tbl_size - 1; /* last index */
257
258 if (*mbpsx10_rate >= tbl[upper].rate) {
259 /* use the largest rate */
260 *mbpsx10_rate = tbl[upper].rate;
261 *ret_flag = tbl[upper].flag;
262 return;
263 } else if (*mbpsx10_rate <= tbl[lower].rate) {
264 /* use the smallest rate */
265 *mbpsx10_rate = tbl[lower].rate;
266 *ret_flag = tbl[lower].flag;
267 return;
268 }
269 /* now we do binery search to get the floor value */
270 while (lower - upper > 1) {
271 mid = (upper + lower) >> 1;
272 if (*mbpsx10_rate == tbl[mid].rate) {
273 /* found the exact match */
274 *mbpsx10_rate = tbl[mid].rate;
275 *ret_flag = tbl[mid].flag;
276 return;
277 } else {
278 /* not found. if mid's rate is larger than input move
279 * upper to mid. If mid's rate is larger than input
280 * move lower to mid.
281 */
282 if (*mbpsx10_rate > tbl[mid].rate)
283 lower = mid;
284 else
285 upper = mid;
286 }
287 }
288 /* after the bin search the index is the ceiling of rate */
289 *mbpsx10_rate = tbl[upper].rate;
290 *ret_flag = tbl[upper].flag;
291 return;
292}
293
294/**
295 * wma_fill_ofdm_cck_mcast_rate() - fill ofdm cck mcast rate
296 * @mbpsx10_rate: mbps rates
297 * @nss: nss
298 * @rate: rate
299 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530300 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530302static QDF_STATUS wma_fill_ofdm_cck_mcast_rate(int32_t mbpsx10_rate,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800303 uint8_t nss, uint8_t *rate)
304{
305 uint8_t idx = 0;
306 wma_bin_search_rate(ofdm_cck_rate_tbl, WMA_MAX_OFDM_CCK_RATE_TBL_SIZE,
307 &mbpsx10_rate, &idx);
308
309 /* if bit 7 is set it uses CCK */
310 if (idx & 0x80)
311 *rate |= (1 << 6) | (idx & 0xF); /* set bit 6 to 1 for CCK */
312 else
313 *rate |= (idx & 0xF);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530314 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315}
316
317/**
318 * wma_set_ht_vht_mcast_rate() - set ht/vht mcast rate
319 * @shortgi: short gaurd interval
320 * @mbpsx10_rate: mbps rates
321 * @sgi_idx: shortgi index
322 * @sgi_rate: shortgi rate
323 * @lgi_idx: longgi index
324 * @lgi_rate: longgi rate
325 * @premable: preamble
326 * @rate: rate
327 * @streaming_rate: streaming rate
328 *
329 * Return: none
330 */
331static void wma_set_ht_vht_mcast_rate(uint32_t shortgi, int32_t mbpsx10_rate,
332 uint8_t sgi_idx, int32_t sgi_rate,
333 uint8_t lgi_idx, int32_t lgi_rate,
334 uint8_t premable, uint8_t *rate,
335 int32_t *streaming_rate)
336{
337 if (shortgi == 0) {
338 *rate |= (premable << 6) | (lgi_idx & 0xF);
339 *streaming_rate = lgi_rate;
340 } else {
341 *rate |= (premable << 6) | (sgi_idx & 0xF);
342 *streaming_rate = sgi_rate;
343 }
344}
345
346/**
347 * wma_fill_ht20_mcast_rate() - fill ht20 mcast rate
348 * @shortgi: short gaurd interval
349 * @mbpsx10_rate: mbps rates
350 * @nss: nss
351 * @rate: rate
352 * @streaming_rate: streaming rate
353 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530354 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530356static QDF_STATUS wma_fill_ht20_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800357 int32_t mbpsx10_rate, uint8_t nss,
358 uint8_t *rate,
359 int32_t *streaming_rate)
360{
361 uint8_t sgi_idx = 0, lgi_idx = 0;
362 int32_t sgi_rate, lgi_rate;
363 if (nss == 1)
364 mbpsx10_rate = mbpsx10_rate >> 1;
365
366 sgi_rate = mbpsx10_rate;
367 lgi_rate = mbpsx10_rate;
368 if (shortgi)
369 wma_bin_search_rate(ht20_400ns_rate_tbl,
370 WMA_MAX_HT20_RATE_TBL_SIZE, &sgi_rate,
371 &sgi_idx);
372 else
373 wma_bin_search_rate(ht20_800ns_rate_tbl,
374 WMA_MAX_HT20_RATE_TBL_SIZE, &lgi_rate,
375 &lgi_idx);
376
377 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
378 lgi_idx, lgi_rate, 2, rate, streaming_rate);
379 if (nss == 1)
380 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530381 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800382}
383
384/**
385 * wma_fill_ht40_mcast_rate() - fill ht40 mcast rate
386 * @shortgi: short gaurd interval
387 * @mbpsx10_rate: mbps rates
388 * @nss: nss
389 * @rate: rate
390 * @streaming_rate: streaming rate
391 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530392 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800393 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530394static QDF_STATUS wma_fill_ht40_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800395 int32_t mbpsx10_rate, uint8_t nss,
396 uint8_t *rate,
397 int32_t *streaming_rate)
398{
399 uint8_t sgi_idx = 0, lgi_idx = 0;
400 int32_t sgi_rate, lgi_rate;
401
402 /* for 2x2 divide the rate by 2 */
403 if (nss == 1)
404 mbpsx10_rate = mbpsx10_rate >> 1;
405
406 sgi_rate = mbpsx10_rate;
407 lgi_rate = mbpsx10_rate;
408 if (shortgi)
409 wma_bin_search_rate(ht40_400ns_rate_tbl,
410 WMA_MAX_HT40_RATE_TBL_SIZE, &sgi_rate,
411 &sgi_idx);
412 else
413 wma_bin_search_rate(ht40_800ns_rate_tbl,
414 WMA_MAX_HT40_RATE_TBL_SIZE, &lgi_rate,
415 &lgi_idx);
416
417 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
418 lgi_idx, lgi_rate, 2, rate, streaming_rate);
419
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530420 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800421}
422
423/**
424 * wma_fill_vht20_mcast_rate() - fill vht20 mcast rate
425 * @shortgi: short gaurd interval
426 * @mbpsx10_rate: mbps rates
427 * @nss: nss
428 * @rate: rate
429 * @streaming_rate: streaming rate
430 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530431 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800432 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530433static QDF_STATUS wma_fill_vht20_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800434 int32_t mbpsx10_rate, uint8_t nss,
435 uint8_t *rate,
436 int32_t *streaming_rate)
437{
438 uint8_t sgi_idx = 0, lgi_idx = 0;
439 int32_t sgi_rate, lgi_rate;
440
441 /* for 2x2 divide the rate by 2 */
442 if (nss == 1)
443 mbpsx10_rate = mbpsx10_rate >> 1;
444
445 sgi_rate = mbpsx10_rate;
446 lgi_rate = mbpsx10_rate;
447 if (shortgi)
448 wma_bin_search_rate(vht20_400ns_rate_tbl,
449 WMA_MAX_VHT20_RATE_TBL_SIZE, &sgi_rate,
450 &sgi_idx);
451 else
452 wma_bin_search_rate(vht20_800ns_rate_tbl,
453 WMA_MAX_VHT20_RATE_TBL_SIZE, &lgi_rate,
454 &lgi_idx);
455
456 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
457 lgi_idx, lgi_rate, 3, rate, streaming_rate);
458 if (nss == 1)
459 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530460 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800461}
462
463/**
464 * wma_fill_vht40_mcast_rate() - fill vht40 mcast rate
465 * @shortgi: short gaurd interval
466 * @mbpsx10_rate: mbps rates
467 * @nss: nss
468 * @rate: rate
469 * @streaming_rate: streaming rate
470 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530471 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800472 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530473static QDF_STATUS wma_fill_vht40_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800474 int32_t mbpsx10_rate, uint8_t nss,
475 uint8_t *rate,
476 int32_t *streaming_rate)
477{
478 uint8_t sgi_idx = 0, lgi_idx = 0;
479 int32_t sgi_rate, lgi_rate;
480
481 /* for 2x2 divide the rate by 2 */
482 if (nss == 1)
483 mbpsx10_rate = mbpsx10_rate >> 1;
484
485 sgi_rate = mbpsx10_rate;
486 lgi_rate = mbpsx10_rate;
487 if (shortgi)
488 wma_bin_search_rate(vht40_400ns_rate_tbl,
489 WMA_MAX_VHT40_RATE_TBL_SIZE, &sgi_rate,
490 &sgi_idx);
491 else
492 wma_bin_search_rate(vht40_800ns_rate_tbl,
493 WMA_MAX_VHT40_RATE_TBL_SIZE, &lgi_rate,
494 &lgi_idx);
495
496 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate,
497 sgi_idx, sgi_rate, lgi_idx, lgi_rate,
498 3, rate, streaming_rate);
499 if (nss == 1)
500 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530501 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800502}
503
504/**
505 * wma_fill_vht80_mcast_rate() - fill vht80 mcast rate
506 * @shortgi: short gaurd interval
507 * @mbpsx10_rate: mbps rates
508 * @nss: nss
509 * @rate: rate
510 * @streaming_rate: streaming rate
511 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530512 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800513 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530514static QDF_STATUS wma_fill_vht80_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800515 int32_t mbpsx10_rate, uint8_t nss,
516 uint8_t *rate,
517 int32_t *streaming_rate)
518{
519 uint8_t sgi_idx = 0, lgi_idx = 0;
520 int32_t sgi_rate, lgi_rate;
521
522 /* for 2x2 divide the rate by 2 */
523 if (nss == 1)
524 mbpsx10_rate = mbpsx10_rate >> 1;
525
526 sgi_rate = mbpsx10_rate;
527 lgi_rate = mbpsx10_rate;
528 if (shortgi)
529 wma_bin_search_rate(vht80_400ns_rate_tbl,
530 WMA_MAX_VHT80_RATE_TBL_SIZE, &sgi_rate,
531 &sgi_idx);
532 else
533 wma_bin_search_rate(vht80_800ns_rate_tbl,
534 WMA_MAX_VHT80_RATE_TBL_SIZE, &lgi_rate,
535 &lgi_idx);
536
537 wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate,
538 lgi_idx, lgi_rate, 3, rate, streaming_rate);
539 if (nss == 1)
540 *streaming_rate = *streaming_rate << 1;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530541 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800542}
543
544/**
545 * wma_fill_ht_mcast_rate() - fill ht mcast rate
546 * @shortgi: short gaurd interval
547 * @chwidth: channel width
548 * @chanmode: channel mode
549 * @mhz: frequency
550 * @mbpsx10_rate: mbps rates
551 * @nss: nss
552 * @rate: rate
553 * @streaming_rate: streaming rate
554 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530555 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530557static QDF_STATUS wma_fill_ht_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800558 uint32_t chwidth, int32_t mbpsx10_rate,
559 uint8_t nss, WLAN_PHY_MODE chanmode,
560 uint8_t *rate,
561 int32_t *streaming_rate)
562{
563 int32_t ret = 0;
564
565 *streaming_rate = 0;
566 if (chwidth == 0)
567 ret = wma_fill_ht20_mcast_rate(shortgi, mbpsx10_rate,
568 nss, rate, streaming_rate);
569 else if (chwidth == 1)
570 ret = wma_fill_ht40_mcast_rate(shortgi, mbpsx10_rate,
571 nss, rate, streaming_rate);
572 else
573 WMA_LOGE("%s: Error, Invalid chwidth enum %d", __func__,
574 chwidth);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530575 return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800576}
577
578/**
579 * wma_fill_vht_mcast_rate() - fill vht mcast rate
580 * @shortgi: short gaurd interval
581 * @chwidth: channel width
582 * @chanmode: channel mode
583 * @mhz: frequency
584 * @mbpsx10_rate: mbps rates
585 * @nss: nss
586 * @rate: rate
587 * @streaming_rate: streaming rate
588 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530589 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800590 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530591static QDF_STATUS wma_fill_vht_mcast_rate(uint32_t shortgi,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800592 uint32_t chwidth,
593 int32_t mbpsx10_rate, uint8_t nss,
594 WLAN_PHY_MODE chanmode,
595 uint8_t *rate,
596 int32_t *streaming_rate)
597{
598 int32_t ret = 0;
599
600 *streaming_rate = 0;
601 if (chwidth == 0)
602 ret = wma_fill_vht20_mcast_rate(shortgi, mbpsx10_rate, nss,
603 rate, streaming_rate);
604 else if (chwidth == 1)
605 ret = wma_fill_vht40_mcast_rate(shortgi, mbpsx10_rate, nss,
606 rate, streaming_rate);
607 else if (chwidth == 2)
608 ret = wma_fill_vht80_mcast_rate(shortgi, mbpsx10_rate, nss,
609 rate, streaming_rate);
610 else
611 WMA_LOGE("%s: chwidth enum %d not supported",
612 __func__, chwidth);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530613 return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800614}
615
616#define WMA_MCAST_1X1_CUT_OFF_RATE 2000
617/**
618 * wma_encode_mc_rate() - fill mc rates
619 * @shortgi: short gaurd interval
620 * @chwidth: channel width
621 * @chanmode: channel mode
622 * @mhz: frequency
623 * @mbpsx10_rate: mbps rates
624 * @nss: nss
625 * @rate: rate
626 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530627 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800628 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530629static QDF_STATUS wma_encode_mc_rate(uint32_t shortgi, uint32_t chwidth,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800630 WLAN_PHY_MODE chanmode, A_UINT32 mhz,
631 int32_t mbpsx10_rate, uint8_t nss,
632 uint8_t *rate)
633{
634 int32_t ret = 0;
635
636 /* nss input value: 0 - 1x1; 1 - 2x2; 2 - 3x3
637 * the phymode selection is based on following assumption:
638 * (1) if the app specifically requested 1x1 or 2x2 we hornor it
639 * (2) if mbpsx10_rate <= 540: always use BG
640 * (3) 540 < mbpsx10_rate <= 2000: use 1x1 HT/VHT
641 * (4) 2000 < mbpsx10_rate: use 2x2 HT/VHT
642 */
643 WMA_LOGE("%s: Input: nss = %d, chanmode = %d, "
644 "mbpsx10 = 0x%x, chwidth = %d, shortgi = %d",
645 __func__, nss, chanmode, mbpsx10_rate, chwidth, shortgi);
646 if ((mbpsx10_rate & 0x40000000) && nss > 0) {
647 /* bit 30 indicates user inputed nss,
648 * bit 28 and 29 used to encode nss
649 */
650 uint8_t user_nss = (mbpsx10_rate & 0x30000000) >> 28;
651
652 nss = (user_nss < nss) ? user_nss : nss;
653 /* zero out bits 19 - 21 to recover the actual rate */
654 mbpsx10_rate &= ~0x70000000;
655 } else if (mbpsx10_rate <= WMA_MCAST_1X1_CUT_OFF_RATE) {
656 /* if the input rate is less or equal to the
657 * 1x1 cutoff rate we use 1x1 only
658 */
659 nss = 0;
660 }
661 /* encode NSS bits (bit 4, bit 5) */
662 *rate = (nss & 0x3) << 4;
663 /* if mcast input rate exceeds the ofdm/cck max rate 54mpbs
664 * we try to choose best ht/vht mcs rate
665 */
666 if (540 < mbpsx10_rate) {
667 /* cannot use ofdm/cck, choose closest ht/vht mcs rate */
668 uint8_t rate_ht = *rate;
669 uint8_t rate_vht = *rate;
670 int32_t stream_rate_ht = 0;
671 int32_t stream_rate_vht = 0;
672 int32_t stream_rate = 0;
673
674 ret = wma_fill_ht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
675 nss, chanmode, &rate_ht,
676 &stream_rate_ht);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530677 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800678 stream_rate_ht = 0;
679 }
680 if (mhz < WMA_2_4_GHZ_MAX_FREQ) {
681 /* not in 5 GHZ frequency */
682 *rate = rate_ht;
683 stream_rate = stream_rate_ht;
684 goto ht_vht_done;
685 }
686 /* capable doing 11AC mcast so that search vht tables */
687 ret = wma_fill_vht_mcast_rate(shortgi, chwidth, mbpsx10_rate,
688 nss, chanmode, &rate_vht,
689 &stream_rate_vht);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530690 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800691 if (stream_rate_ht != 0)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +0530692 ret = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800693 *rate = rate_ht;
694 stream_rate = stream_rate_ht;
695 goto ht_vht_done;
696 }
697 if (stream_rate_ht == 0) {
698 /* only vht rate available */
699 *rate = rate_vht;
700 stream_rate = stream_rate_vht;
701 } else {
702 /* set ht as default first */
703 *rate = rate_ht;
704 stream_rate = stream_rate_ht;
705 if (stream_rate < mbpsx10_rate) {
706 if (mbpsx10_rate <= stream_rate_vht ||
707 stream_rate < stream_rate_vht) {
708 *rate = rate_vht;
709 stream_rate = stream_rate_vht;
710 }
711 } else {
712 if (stream_rate_vht >= mbpsx10_rate &&
713 stream_rate_vht < stream_rate) {
714 *rate = rate_vht;
715 stream_rate = stream_rate_vht;
716 }
717 }
718 }
719ht_vht_done:
720 WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, "
721 "freq = %d, input_rate = %d, chwidth = %d "
722 "rate = 0x%x, streaming_rate = %d",
723 __func__, nss, chanmode, mhz,
724 mbpsx10_rate, chwidth, *rate, stream_rate);
725 } else {
726 if (mbpsx10_rate > 0)
727 ret = wma_fill_ofdm_cck_mcast_rate(mbpsx10_rate,
728 nss, rate);
729 else
730 *rate = 0xFF;
731
732 WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, "
733 "input_rate = %d, rate = 0x%x",
734 __func__, nss, chanmode, mbpsx10_rate, *rate);
735 }
736 return ret;
737}
738
739/**
740 * wma_set_bss_rate_flags() - set rate flags based on BSS capability
741 * @iface: txrx_node ctx
742 * @add_bss: add_bss params
743 *
744 * Return: none
745 */
746void wma_set_bss_rate_flags(struct wma_txrx_node *iface,
747 tpAddBssParams add_bss)
748{
749 iface->rate_flags = 0;
750
751 if (add_bss->vhtCapable) {
752 if (add_bss->ch_width == CH_WIDTH_80P80MHZ)
753 iface->rate_flags |= eHAL_TX_RATE_VHT80;
754 if (add_bss->ch_width == CH_WIDTH_160MHZ)
755 iface->rate_flags |= eHAL_TX_RATE_VHT80;
756 if (add_bss->ch_width == CH_WIDTH_80MHZ)
757 iface->rate_flags |= eHAL_TX_RATE_VHT80;
758 else if (add_bss->ch_width)
759 iface->rate_flags |= eHAL_TX_RATE_VHT40;
760 else
761 iface->rate_flags |= eHAL_TX_RATE_VHT20;
762 }
763 /* avoid to conflict with htCapable flag */
764 else if (add_bss->htCapable) {
765 if (add_bss->ch_width)
766 iface->rate_flags |= eHAL_TX_RATE_HT40;
767 else
768 iface->rate_flags |= eHAL_TX_RATE_HT20;
769 }
770
771 if (add_bss->staContext.fShortGI20Mhz ||
772 add_bss->staContext.fShortGI40Mhz)
773 iface->rate_flags |= eHAL_TX_RATE_SGI;
774
775 if (!add_bss->htCapable && !add_bss->vhtCapable)
776 iface->rate_flags = eHAL_TX_RATE_LEGACY;
777}
778
779/**
780 * wmi_unified_send_txbf() - set txbf parameter to fw
781 * @wma: wma handle
782 * @params: txbf parameters
783 *
784 * Return: 0 for success or error code
785 */
786int32_t wmi_unified_send_txbf(tp_wma_handle wma, tpAddStaParams params)
787{
Yingying Tangadfc2ac2016-09-29 16:41:26 +0800788 wmi_vdev_txbf_en txbf_en = {0};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800789
790 /* This is set when Other partner is Bformer
791 * and we are capable bformee(enabled both in ini and fw)
792 */
793 txbf_en.sutxbfee = params->vhtTxBFCapable;
794 txbf_en.mutxbfee = params->vhtTxMUBformeeCapable;
795 txbf_en.sutxbfer = params->enable_su_tx_bformer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800796
797 /* When MU TxBfee is set, SU TxBfee must be set by default */
798 if (txbf_en.mutxbfee)
799 txbf_en.sutxbfee = txbf_en.mutxbfee;
800
801 WMA_LOGD("txbf_en.sutxbfee %d txbf_en.mutxbfee %d, sutxbfer %d",
802 txbf_en.sutxbfee, txbf_en.mutxbfee, txbf_en.sutxbfer);
803
Govind Singhd76a5b02016-03-08 15:12:14 +0530804 return wma_vdev_set_param(wma->wmi_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800805 params->smesessionId,
806 WMI_VDEV_PARAM_TXBF,
807 *((A_UINT8 *) &txbf_en));
808}
809
810/**
811 * wma_data_tx_ack_work_handler() - process data tx ack
812 * @ack_work: work structure
813 *
814 * Return: none
815 */
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -0800816static void wma_data_tx_ack_work_handler(void *ack_work)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800817{
818 struct wma_tx_ack_work_ctx *work;
819 tp_wma_handle wma_handle;
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +0530820 wma_tx_ota_comp_callback ack_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800821
Rajeev Kumarfec3dbe2016-01-19 15:23:52 -0800822 if (cds_is_load_or_unload_in_progress()) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800823 WMA_LOGE("%s: Driver load/unload in progress", __func__);
824 return;
825 }
826
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -0800827 work = (struct wma_tx_ack_work_ctx *)ack_work;
828
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800829 wma_handle = work->wma_handle;
830 ack_cb = wma_handle->umac_data_ota_ack_cb;
831
832 if (work->status)
833 WMA_LOGE("Data Tx Ack Cb Status %d", work->status);
834 else
835 WMA_LOGD("Data Tx Ack Cb Status %d", work->status);
836
837 /* Call the Ack Cb registered by UMAC */
838 if (ack_cb)
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +0530839 ack_cb((tpAniSirGlobal) (wma_handle->mac_context), NULL,
840 work->status ? 0 : 1, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800841 else
842 WMA_LOGE("Data Tx Ack Cb is NULL");
843
844 wma_handle->umac_data_ota_ack_cb = NULL;
845 wma_handle->last_umac_data_nbuf = NULL;
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530846 qdf_mem_free(work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800847 wma_handle->ack_work_ctx = NULL;
848}
849
850/**
851 * wma_data_tx_ack_comp_hdlr() - handles tx data ack completion
852 * @context: context with which the handler is registered
853 * @netbuf: tx data nbuf
854 * @err: status of tx completion
855 *
856 * This is the cb registered with TxRx for
857 * Ack Complete
858 *
859 * Return: none
860 */
861void
Nirav Shahcbc6d722016-03-01 16:24:53 +0530862wma_data_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800863{
Leo Chang96464902016-10-28 11:10:54 -0700864 void *pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800865 tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
866
867 if (NULL == wma_handle) {
868 WMA_LOGE("%s: Invalid WMA Handle", __func__);
869 return;
870 }
871
Anurag Chouhan6d760662016-02-20 16:05:43 +0530872 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800873
874 if (NULL == pdev) {
875 WMA_LOGE("%s: Failed to get pdev", __func__);
876 return;
877 }
878
879 /*
880 * if netBuf does not match with pending nbuf then just free the
881 * netbuf and do not call ack cb
882 */
883 if (wma_handle->last_umac_data_nbuf != netbuf) {
884 if (wma_handle->umac_data_ota_ack_cb) {
885 WMA_LOGE("%s: nbuf does not match but umac_data_ota_ack_cb is not null",
886 __func__);
887 } else {
888 WMA_LOGE("%s: nbuf does not match and umac_data_ota_ack_cb is also null",
889 __func__);
890 }
891 goto free_nbuf;
892 }
893
894 if (wma_handle && wma_handle->umac_data_ota_ack_cb) {
895 struct wma_tx_ack_work_ctx *ack_work;
896
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530897 ack_work = qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800898 wma_handle->ack_work_ctx = ack_work;
899 if (ack_work) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800900 ack_work->wma_handle = wma_handle;
901 ack_work->sub_type = 0;
902 ack_work->status = status;
903
Anurag Chouhan42958bb2016-02-19 15:43:11 +0530904 qdf_create_work(0, &ack_work->ack_cmp_work,
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -0800905 wma_data_tx_ack_work_handler,
906 ack_work);
Anurag Chouhan42958bb2016-02-19 15:43:11 +0530907 qdf_sched_work(0, &ack_work->ack_cmp_work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800908 }
909 }
910
911free_nbuf:
912 /* unmap and freeing the tx buf as txrx is not taking care */
Leo Chang96464902016-10-28 11:10:54 -0700913 qdf_nbuf_unmap_single(wma_handle->qdf_dev, netbuf, QDF_DMA_TO_DEVICE);
Nirav Shahcbc6d722016-03-01 16:24:53 +0530914 qdf_nbuf_free(netbuf);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800915}
916
917/**
918 * wma_update_txrx_chainmask() - update txrx chainmask
919 * @num_rf_chains: number rf chains
920 * @cmd_value: command value
921 *
922 * Return: none
923 */
924void wma_update_txrx_chainmask(int num_rf_chains, int *cmd_value)
925{
926 if (*cmd_value > WMA_MAX_RF_CHAINS(num_rf_chains)) {
927 WMA_LOGE("%s: Chainmask value exceeds the maximum"
928 " supported range setting it to"
929 " maximum value. Requested value %d"
930 " Updated value %d", __func__, *cmd_value,
931 WMA_MAX_RF_CHAINS(num_rf_chains));
932 *cmd_value = WMA_MAX_RF_CHAINS(num_rf_chains);
933 } else if (*cmd_value < WMA_MIN_RF_CHAINS) {
934 WMA_LOGE("%s: Chainmask value is less than the minimum"
935 " supported range setting it to"
936 " minimum value. Requested value %d"
937 " Updated value %d", __func__, *cmd_value,
938 WMA_MIN_RF_CHAINS);
939 *cmd_value = WMA_MIN_RF_CHAINS;
940 }
941}
942
943/**
944 * wma_peer_state_change_event_handler() - peer state change event handler
945 * @handle: wma handle
946 * @event_buff: event buffer
947 * @len: length of buffer
948 *
949 * This event handler unpauses vdev if peer state change to AUTHORIZED STATE
950 *
951 * Return: 0 for success or error code
952 */
953int wma_peer_state_change_event_handler(void *handle,
954 uint8_t *event_buff,
955 uint32_t len)
956{
957 WMI_PEER_STATE_EVENTID_param_tlvs *param_buf;
958 wmi_peer_state_event_fixed_param *event;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800959 struct cdp_vdev *vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800960 tp_wma_handle wma_handle = (tp_wma_handle) handle;
961
962 if (!event_buff) {
963 WMA_LOGE("%s: Received NULL event ptr from FW", __func__);
964 return -EINVAL;
965 }
966 param_buf = (WMI_PEER_STATE_EVENTID_param_tlvs *) event_buff;
967 if (!param_buf) {
968 WMA_LOGE("%s: Received NULL buf ptr from FW", __func__);
969 return -ENOMEM;
970 }
971
972 event = param_buf->fixed_param;
973 vdev = wma_find_vdev_by_id(wma_handle, event->vdev_id);
974 if (NULL == vdev) {
975 WMA_LOGP("%s: Couldn't find vdev for vdev_id: %d",
976 __func__, event->vdev_id);
977 return -EINVAL;
978 }
979
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800980 if ((cdp_get_opmode(cds_get_context(QDF_MODULE_ID_SOC),
981 vdev) ==
Leo Chang96464902016-10-28 11:10:54 -0700982 wlan_op_mode_sta) &&
983 event->state == WMI_PEER_STATE_AUTHORIZED) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800984 /*
985 * set event so that hdd
986 * can procced and unpause tx queue
987 */
988#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
989 if (!wma_handle->peer_authorized_cb) {
990 WMA_LOGE("%s: peer authorized cb not registered",
991 __func__);
992 return -EINVAL;
993 }
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -0700994 wma_handle->peer_authorized_cb(event->vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800995#endif
996 }
997
998 return 0;
999}
1000
1001/**
1002 * wma_set_enable_disable_mcc_adaptive_scheduler() -enable/disable mcc scheduler
1003 * @mcc_adaptive_scheduler: enable/disable
1004 *
1005 * This function enable/disable mcc adaptive scheduler in fw.
1006 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301007 * Return: QDF_STATUS_SUCCESS for sucess or error code
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001008 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301009QDF_STATUS wma_set_enable_disable_mcc_adaptive_scheduler(uint32_t
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001010 mcc_adaptive_scheduler)
1011{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001012 tp_wma_handle wma = NULL;
Govind Singhefc5ccd2016-04-25 11:11:55 +05301013 uint32_t pdev_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001014
Anurag Chouhan6d760662016-02-20 16:05:43 +05301015 wma = cds_get_context(QDF_MODULE_ID_WMA);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001016 if (NULL == wma) {
1017 WMA_LOGE("%s : Failed to get wma", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301018 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001019 }
1020
Manishekar Chandrasekarand3ee9752016-08-09 18:52:50 +05301021 /*
1022 * Since there could be up to two instances of OCS in FW (one per MAC),
1023 * FW provides the option of enabling and disabling MAS on a per MAC
1024 * basis. But, Host does not have enable/disable option for individual
1025 * MACs. So, FW agreed for the Host to send down a 'pdev id' of 0.
1026 * When 'pdev id' of 0 is used, FW treats this as a SOC level command
1027 * and applies the same value to both MACs. Irrespective of the value
1028 * of 'WMI_SERVICE_DEPRECATED_REPLACE', the pdev id needs to be '0'
1029 * (SOC level) for WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID
Govind Singhefc5ccd2016-04-25 11:11:55 +05301030 */
Manishekar Chandrasekarand3ee9752016-08-09 18:52:50 +05301031 pdev_id = WMI_PDEV_ID_SOC;
Govind Singhefc5ccd2016-04-25 11:11:55 +05301032
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301033 return wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd(
Govind Singhefc5ccd2016-04-25 11:11:55 +05301034 wma->wmi_handle, mcc_adaptive_scheduler, pdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001035}
1036
1037/**
1038 * wma_set_mcc_channel_time_latency() -set MCC channel time latency
1039 * @wma: wma handle
1040 * @mcc_channel: mcc channel
1041 * @mcc_channel_time_latency: MCC channel time latency.
1042 *
1043 * Currently used to set time latency for an MCC vdev/adapter using operating
1044 * channel of it and channel number. The info is provided run time using
1045 * iwpriv command: iwpriv <wlan0 | p2p0> setMccLatency <latency in ms>.
1046 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301047 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001048 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301049QDF_STATUS wma_set_mcc_channel_time_latency
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001050 (tp_wma_handle wma,
1051 uint32_t mcc_channel, uint32_t mcc_channel_time_latency)
1052{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001053 uint32_t cfg_val = 0;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001054 struct sAniSirGlobal *pMac = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001055 uint32_t channel1 = mcc_channel;
1056 uint32_t chan1_freq = cds_chan_to_freq(channel1);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001057
1058 if (!wma) {
1059 WMA_LOGE("%s:NULL wma ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301060 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301061 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001062 }
Anurag Chouhan6d760662016-02-20 16:05:43 +05301063 pMac = cds_get_context(QDF_MODULE_ID_PE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001064 if (!pMac) {
1065 WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301066 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301067 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001068 }
1069
1070 /* First step is to confirm if MCC is active */
1071 if (!lim_is_in_mcc(pMac)) {
1072 WMA_LOGE("%s: MCC is not active. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301073 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301074 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001075 }
1076 /* Confirm MCC adaptive scheduler feature is disabled */
1077 if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED,
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301078 &cfg_val) == eSIR_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001079 if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) {
1080 WMA_LOGD("%s: Can't set channel latency while MCC "
1081 "ADAPTIVE SCHED is enabled. Exit", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301082 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001083 }
1084 } else {
1085 WMA_LOGE("%s: Failed to get value for MCC_ADAPTIVE_SCHED, "
1086 "Exit w/o setting latency", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301087 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301088 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001089 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001090
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301091 return wmi_unified_set_mcc_channel_time_latency_cmd(wma->wmi_handle,
1092 chan1_freq,
1093 mcc_channel_time_latency);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001094}
1095
1096/**
1097 * wma_set_mcc_channel_time_quota() -set MCC channel time quota
1098 * @wma: wma handle
1099 * @adapter_1_chan_number: adapter 1 channel number
1100 * @adapter_1_quota: adapter 1 quota
1101 * @adapter_2_chan_number: adapter 2 channel number
1102 *
1103 * Currently used to set time quota for 2 MCC vdevs/adapters using (operating
1104 * channel, quota) for each mode . The info is provided run time using
1105 * iwpriv command: iwpriv <wlan0 | p2p0> setMccQuota <quota in ms>.
1106 * Note: the quota provided in command is for the same mode in cmd. HDD
1107 * checks if MCC mode is active, gets the second mode and its operating chan.
1108 * Quota for the 2nd role is calculated as 100 - quota of first mode.
1109 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301110 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001111 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301112QDF_STATUS wma_set_mcc_channel_time_quota
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001113 (tp_wma_handle wma,
1114 uint32_t adapter_1_chan_number,
1115 uint32_t adapter_1_quota, uint32_t adapter_2_chan_number)
1116{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001117 uint32_t cfg_val = 0;
1118 struct sAniSirGlobal *pMac = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001119 uint32_t chan1_freq = cds_chan_to_freq(adapter_1_chan_number);
1120 uint32_t chan2_freq = cds_chan_to_freq(adapter_2_chan_number);
1121
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001122 if (!wma) {
1123 WMA_LOGE("%s:NULL wma ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301124 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301125 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001126 }
Anurag Chouhan6d760662016-02-20 16:05:43 +05301127 pMac = cds_get_context(QDF_MODULE_ID_PE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001128 if (!pMac) {
1129 WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301130 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301131 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001132 }
1133
1134 /* First step is to confirm if MCC is active */
1135 if (!lim_is_in_mcc(pMac)) {
1136 WMA_LOGD("%s: MCC is not active. Exiting", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301137 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301138 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001139 }
1140
1141 /* Confirm MCC adaptive scheduler feature is disabled */
1142 if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED,
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301143 &cfg_val) == eSIR_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001144 if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) {
1145 WMA_LOGD("%s: Can't set channel quota while "
1146 "MCC_ADAPTIVE_SCHED is enabled. Exit",
1147 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301148 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001149 }
1150 } else {
1151 WMA_LOGE("%s: Failed to retrieve "
1152 "WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED. Exit", __func__);
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +05301153 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301154 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001155 }
1156
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05301157 return wmi_unified_set_mcc_channel_time_quota_cmd(wma->wmi_handle,
1158 chan1_freq,
1159 adapter_1_quota,
1160 chan2_freq);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001161}
1162
1163/**
1164 * wma_set_linkstate() - set wma linkstate
1165 * @wma: wma handle
1166 * @params: link state params
1167 *
1168 * Return: none
1169 */
1170void wma_set_linkstate(tp_wma_handle wma, tpLinkStateParams params)
1171{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001172 struct cdp_pdev *pdev;
1173 struct cdp_vdev *vdev;
Leo Chang96464902016-10-28 11:10:54 -07001174 void *peer;
1175 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001176 uint8_t vdev_id, peer_id;
1177 bool roam_synch_in_progress = false;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301178 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001179
1180 params->status = true;
1181 WMA_LOGD("%s: state %d selfmac %pM", __func__,
1182 params->state, params->selfMacAddr);
Kiran Kumar Lokere92b1fca2016-05-23 15:28:15 -07001183 if ((params->state != eSIR_LINK_PREASSOC_STATE) &&
1184 (params->state != eSIR_LINK_DOWN_STATE)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001185 WMA_LOGD("%s: unsupported link state %d",
1186 __func__, params->state);
1187 goto out;
1188 }
1189
Anurag Chouhan6d760662016-02-20 16:05:43 +05301190 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001191
1192 if (NULL == pdev) {
1193 WMA_LOGE("%s: Unable to get TXRX context", __func__);
1194 goto out;
1195 }
1196
1197 vdev = wma_find_vdev_by_addr(wma, params->selfMacAddr, &vdev_id);
1198 if (!vdev) {
1199 WMA_LOGP("%s: vdev not found for addr: %pM",
1200 __func__, params->selfMacAddr);
1201 goto out;
1202 }
1203
1204 if (wma_is_vdev_in_ap_mode(wma, vdev_id)) {
1205 WMA_LOGD("%s: Ignoring set link req in ap mode", __func__);
1206 goto out;
1207 }
1208
1209 if (params->state == eSIR_LINK_PREASSOC_STATE) {
Varun Reddy Yeturud5939f82015-12-24 18:14:02 -08001210 if (wma_is_roam_synch_in_progress(wma, vdev_id))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001211 roam_synch_in_progress = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001212 status = wma_create_peer(wma, pdev, vdev, params->bssid,
1213 WMI_PEER_TYPE_DEFAULT, vdev_id,
1214 roam_synch_in_progress);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301215 if (status != QDF_STATUS_SUCCESS)
Varun Reddy Yeturud5939f82015-12-24 18:14:02 -08001216 WMA_LOGE("%s: Unable to create peer", __func__);
1217 if (roam_synch_in_progress)
1218 return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001219 } else {
1220 WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP",
1221 __func__, vdev_id);
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001222 cdp_fc_vdev_pause(soc,
1223 wma->interfaces[vdev_id].handle,
1224 OL_TXQ_PAUSE_REASON_VDEV_STOP);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001225 wma->interfaces[vdev_id].pause_bitmap |= (1 << PAUSE_TYPE_HOST);
Dustin Brownbf6d16b2017-03-03 11:41:05 -08001226 if (wma_send_vdev_stop_to_fw(wma, vdev_id)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001227 WMA_LOGP("%s: %d Failed to send vdev stop",
1228 __func__, __LINE__);
1229 }
Leo Chang96464902016-10-28 11:10:54 -07001230 peer = cdp_peer_find_by_addr(soc, pdev,
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001231 params->bssid, &peer_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001232 if (peer) {
1233 WMA_LOGP("%s: Deleting peer %pM vdev id %d",
1234 __func__, params->bssid, vdev_id);
1235 wma_remove_peer(wma, params->bssid, vdev_id, peer,
1236 roam_synch_in_progress);
1237 }
1238 }
1239out:
1240 wma_send_msg(wma, WMA_SET_LINK_STATE_RSP, (void *)params, 0);
1241}
1242
1243/**
1244 * wma_unpause_vdev - unpause all vdev
1245 * @wma: wma handle
1246 *
1247 * unpause all vdev aftter resume/coming out of wow mode
1248 *
1249 * Return: none
1250 */
1251void wma_unpause_vdev(tp_wma_handle wma)
1252{
1253 int8_t vdev_id;
1254 struct wma_txrx_node *iface;
1255
1256 for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
1257 if (!wma->interfaces[vdev_id].handle)
1258 continue;
1259
1260#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
1261 /* When host resume, by default, unpause all active vdev */
1262 if (wma->interfaces[vdev_id].pause_bitmap) {
Leo Chang96464902016-10-28 11:10:54 -07001263 cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001264 wma->interfaces[vdev_id].handle,
1265 0xffffffff);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001266 wma->interfaces[vdev_id].pause_bitmap = 0;
1267 }
1268#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1269
1270 iface = &wma->interfaces[vdev_id];
1271 iface->conn_state = false;
1272 }
1273}
1274
1275/**
1276 * wma_process_rate_update_indate() - rate update indication
1277 * @wma: wma handle
1278 * @pRateUpdateParams: Rate update params
1279 *
1280 * This function update rate & short GI interval to fw based on params
1281 * send by SME.
1282 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301283 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001284 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301285QDF_STATUS wma_process_rate_update_indicate(tp_wma_handle wma,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001286 tSirRateUpdateInd *
1287 pRateUpdateParams)
1288{
1289 int32_t ret = 0;
1290 uint8_t vdev_id = 0;
1291 void *pdev;
1292 int32_t mbpsx10_rate = -1;
1293 uint32_t paramId;
1294 uint8_t rate = 0;
1295 uint32_t short_gi;
1296 struct wma_txrx_node *intr = wma->interfaces;
Govind Singhd76a5b02016-03-08 15:12:14 +05301297 QDF_STATUS status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001298
1299 /* Get the vdev id */
Srinivas Girigowdaafede182015-11-18 22:36:12 -08001300 pdev = wma_find_vdev_by_addr(wma, pRateUpdateParams->bssid.bytes,
1301 &vdev_id);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001302 if (!pdev) {
1303 WMA_LOGE("vdev handle is invalid for %pM",
Srinivas Girigowdaafede182015-11-18 22:36:12 -08001304 pRateUpdateParams->bssid.bytes);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301305 qdf_mem_free(pRateUpdateParams);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301306 return QDF_STATUS_E_INVAL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001307 }
1308 short_gi = intr[vdev_id].config.shortgi;
1309 if (short_gi == 0)
1310 short_gi = (intr[vdev_id].rate_flags & eHAL_TX_RATE_SGI) ?
1311 true : false;
1312 /* first check if reliable TX mcast rate is used. If not check the bcast.
1313 * Then is mcast. Mcast rate is saved in mcastDataRate24GHz
1314 */
1315 if (pRateUpdateParams->reliableMcastDataRateTxFlag > 0) {
1316 mbpsx10_rate = pRateUpdateParams->reliableMcastDataRate;
1317 paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE;
1318 if (pRateUpdateParams->
1319 reliableMcastDataRateTxFlag & eHAL_TX_RATE_SGI)
1320 short_gi = 1; /* upper layer specified short GI */
1321 } else if (pRateUpdateParams->bcastDataRate > -1) {
1322 mbpsx10_rate = pRateUpdateParams->bcastDataRate;
1323 paramId = WMI_VDEV_PARAM_BCAST_DATA_RATE;
1324 } else {
1325 mbpsx10_rate = pRateUpdateParams->mcastDataRate24GHz;
1326 paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE;
1327 if (pRateUpdateParams->
1328 mcastDataRate24GHzTxFlag & eHAL_TX_RATE_SGI)
1329 short_gi = 1; /* upper layer specified short GI */
1330 }
1331 WMA_LOGE("%s: dev_id = %d, dev_type = %d, dev_mode = %d, "
1332 "mac = %pM, config.shortgi = %d, rate_flags = 0x%x",
1333 __func__, vdev_id, intr[vdev_id].type,
Srinivas Girigowdaafede182015-11-18 22:36:12 -08001334 pRateUpdateParams->dev_mode, pRateUpdateParams->bssid.bytes,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001335 intr[vdev_id].config.shortgi, intr[vdev_id].rate_flags);
1336 ret = wma_encode_mc_rate(short_gi, intr[vdev_id].config.chwidth,
1337 intr[vdev_id].chanmode, intr[vdev_id].mhz,
1338 mbpsx10_rate, pRateUpdateParams->nss, &rate);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301339 if (ret != QDF_STATUS_SUCCESS) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001340 WMA_LOGE("%s: Error, Invalid input rate value", __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301341 qdf_mem_free(pRateUpdateParams);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001342 return ret;
1343 }
Govind Singhd76a5b02016-03-08 15:12:14 +05301344 status = wma_vdev_set_param(wma->wmi_handle, vdev_id,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001345 WMI_VDEV_PARAM_SGI, short_gi);
Govind Singhd76a5b02016-03-08 15:12:14 +05301346 if (QDF_IS_STATUS_ERROR(status)) {
1347 WMA_LOGE("%s: Failed to Set WMI_VDEV_PARAM_SGI (%d), status = %d",
1348 __func__, short_gi, status);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301349 qdf_mem_free(pRateUpdateParams);
Govind Singhd76a5b02016-03-08 15:12:14 +05301350 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001351 }
Govind Singhd76a5b02016-03-08 15:12:14 +05301352 status = wma_vdev_set_param(wma->wmi_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001353 vdev_id, paramId, rate);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301354 qdf_mem_free(pRateUpdateParams);
Govind Singhd76a5b02016-03-08 15:12:14 +05301355 if (QDF_IS_STATUS_ERROR(status)) {
1356 WMA_LOGE("%s: Failed to Set rate, status = %d", __func__, status);
1357 return status;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001358 }
1359
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301360 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001361}
1362
1363/**
1364 * wma_mgmt_tx_ack_work_handler() - mgmt tx ack work queue
1365 * @ack_work: work structure
1366 *
1367 * Return: none
1368 */
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -08001369static void wma_mgmt_tx_ack_work_handler(void *ack_work)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001370{
1371 struct wma_tx_ack_work_ctx *work;
1372 tp_wma_handle wma_handle;
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +05301373 wma_tx_ota_comp_callback ack_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001374
Rajeev Kumarfec3dbe2016-01-19 15:23:52 -08001375 if (cds_is_load_or_unload_in_progress()) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001376 WMA_LOGE("%s: Driver load/unload in progress", __func__);
1377 return;
1378 }
1379
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -08001380 work = (struct wma_tx_ack_work_ctx *)ack_work;
1381
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382 wma_handle = work->wma_handle;
1383 ack_cb = wma_handle->umac_ota_ack_cb[work->sub_type];
1384
1385 WMA_LOGD("Tx Ack Cb SubType %d Status %d",
1386 work->sub_type, work->status);
1387
1388 /* Call the Ack Cb registered by UMAC */
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +05301389 ack_cb((tpAniSirGlobal) (wma_handle->mac_context), NULL,
1390 work->status ? 0 : 1, NULL);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001391
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301392 qdf_mem_free(work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001393 wma_handle->ack_work_ctx = NULL;
1394}
1395
1396/**
1397 * wma_mgmt_tx_comp_conf_ind() - Post mgmt tx complete indication to PE.
1398 * @wma_handle: Pointer to WMA handle
1399 * @sub_type: Tx mgmt frame sub type
1400 * @status: Mgmt frame tx status
1401 *
1402 * This function sends mgmt complition confirmation to PE for deauth
1403 * and deassoc frames.
1404 *
1405 * Return: none
1406 */
1407static void
1408wma_mgmt_tx_comp_conf_ind(tp_wma_handle wma_handle, uint8_t sub_type,
1409 int32_t status)
1410{
1411 int32_t tx_comp_status;
1412
1413 tx_comp_status = status ? 0 : 1;
1414 if (sub_type == SIR_MAC_MGMT_DISASSOC) {
1415 wma_send_msg(wma_handle, WMA_DISASSOC_TX_COMP, NULL,
1416 tx_comp_status);
1417 } else if (sub_type == SIR_MAC_MGMT_DEAUTH) {
1418 wma_send_msg(wma_handle, WMA_DEAUTH_TX_COMP, NULL,
1419 tx_comp_status);
1420 }
1421}
1422
1423/**
1424 * wma_mgmt_tx_ack_comp_hdlr() - handles tx ack mgmt completion
1425 * @context: context with which the handler is registered
1426 * @netbuf: tx mgmt nbuf
1427 * @status: status of tx completion
1428 *
1429 * This is callback registered with TxRx for
1430 * Ack Complete.
1431 *
1432 * Return: none
1433 */
1434static void
Nirav Shahcbc6d722016-03-01 16:24:53 +05301435wma_mgmt_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001436{
Nirav Shahcbc6d722016-03-01 16:24:53 +05301437 tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(netbuf));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001438 tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1439
1440 if (wma_handle && wma_handle->umac_ota_ack_cb[pFc->subType]) {
1441 if ((pFc->subType == SIR_MAC_MGMT_DISASSOC) ||
1442 (pFc->subType == SIR_MAC_MGMT_DEAUTH)) {
1443 wma_mgmt_tx_comp_conf_ind(wma_handle,
1444 (uint8_t) pFc->subType,
1445 status);
1446 } else {
1447 struct wma_tx_ack_work_ctx *ack_work;
1448
1449 ack_work =
Anurag Chouhan600c3a02016-03-01 10:33:54 +05301450 qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001451
1452 if (ack_work) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001453 ack_work->wma_handle = wma_handle;
1454 ack_work->sub_type = pFc->subType;
1455 ack_work->status = status;
1456
Anurag Chouhan42958bb2016-02-19 15:43:11 +05301457 qdf_create_work(0, &ack_work->ack_cmp_work,
Krishna Kumaar Natarajan9f421702015-11-10 14:56:16 -08001458 wma_mgmt_tx_ack_work_handler,
1459 ack_work);
1460
Anurag Chouhan42958bb2016-02-19 15:43:11 +05301461 qdf_sched_work(0, &ack_work->ack_cmp_work);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001462 }
1463 }
1464 }
1465}
1466
1467/**
1468 * wma_mgmt_tx_dload_comp_hldr() - handles tx mgmt completion
1469 * @context: context with which the handler is registered
1470 * @netbuf: tx mgmt nbuf
1471 * @status: status of tx completion
1472 *
1473 * This function calls registered download callback while sending mgmt packet.
1474 *
1475 * Return: none
1476 */
1477static void
Nirav Shahcbc6d722016-03-01 16:24:53 +05301478wma_mgmt_tx_dload_comp_hldr(void *wma_context, qdf_nbuf_t netbuf,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001479 int32_t status)
1480{
Anurag Chouhance0dc992016-02-16 18:18:03 +05301481 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001482
1483 tp_wma_handle wma_handle = (tp_wma_handle) wma_context;
1484 void *mac_context = wma_handle->mac_context;
1485
1486 WMA_LOGD("Tx Complete Status %d", status);
1487
1488 if (!wma_handle->tx_frm_download_comp_cb) {
1489 WMA_LOGE("Tx Complete Cb not registered by umac");
1490 return;
1491 }
1492
1493 /* Call Tx Mgmt Complete Callback registered by umac */
1494 wma_handle->tx_frm_download_comp_cb(mac_context, netbuf, 0);
1495
1496 /* Reset Callback */
1497 wma_handle->tx_frm_download_comp_cb = NULL;
1498
1499 /* Set the Tx Mgmt Complete Event */
Anurag Chouhance0dc992016-02-16 18:18:03 +05301500 qdf_status = qdf_event_set(&wma_handle->tx_frm_download_comp_event);
1501 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001502 WMA_LOGP("%s: Event Set failed - tx_frm_comp_event", __func__);
1503}
1504
1505/**
1506 * wma_tx_attach() - attach tx related callbacks
1507 * @pwmaCtx: wma context
1508 *
1509 * attaches tx fn with underlying layer.
1510 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301511 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001512 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301513QDF_STATUS wma_tx_attach(tp_wma_handle wma_handle)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001514{
1515 /* Get the Vos Context */
1516 p_cds_contextType cds_handle =
1517 (p_cds_contextType) (wma_handle->cds_context);
1518
1519 /* Get the txRx Pdev handle */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001520 struct cdp_pdev *txrx_pdev = cds_handle->pdev_txrx_ctx;
Leo Chang96464902016-10-28 11:10:54 -07001521 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001522
1523 /* Register for Tx Management Frames */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001524 cdp_mgmt_tx_cb_set(soc, txrx_pdev,
1525 GENERIC_NODOWLOAD_ACK_COMP_INDEX,
1526 NULL, wma_mgmt_tx_ack_comp_hdlr, wma_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001527
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001528 cdp_mgmt_tx_cb_set(soc, txrx_pdev,
1529 GENERIC_DOWNLD_COMP_NOACK_COMP_INDEX,
1530 wma_mgmt_tx_dload_comp_hldr, NULL, wma_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001531
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001532 cdp_mgmt_tx_cb_set(soc, txrx_pdev,
1533 GENERIC_DOWNLD_COMP_ACK_COMP_INDEX,
1534 wma_mgmt_tx_dload_comp_hldr,
1535 wma_mgmt_tx_ack_comp_hdlr, wma_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001536
1537 /* Store the Mac Context */
1538 wma_handle->mac_context = cds_handle->pMACContext;
1539
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301540 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001541}
1542
1543/**
1544 * wma_tx_detach() - detach tx related callbacks
1545 * @tp_wma_handle: wma context
1546 *
1547 * Deregister with TxRx for Tx Mgmt Download and Ack completion.
1548 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05301549 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001550 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301551QDF_STATUS wma_tx_detach(tp_wma_handle wma_handle)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001552{
1553 uint32_t frame_index = 0;
Nishank Aggarwala13b61d2016-12-01 12:53:58 +05301554 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001555
1556 /* Get the Vos Context */
1557 p_cds_contextType cds_handle =
1558 (p_cds_contextType) (wma_handle->cds_context);
1559
1560 /* Get the txRx Pdev handle */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001561 struct cdp_pdev *txrx_pdev = cds_handle->pdev_txrx_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001562
Nishank Aggarwala13b61d2016-12-01 12:53:58 +05301563 if (!soc) {
1564 WMA_LOGE("%s:SOC context is NULL", __func__);
1565 return QDF_STATUS_E_FAILURE;
1566 }
1567
Himanshu Agarwale1086fa2015-10-19 18:05:15 +05301568 if (txrx_pdev) {
1569 /* Deregister with TxRx for Tx Mgmt completion call back */
1570 for (frame_index = 0; frame_index < FRAME_INDEX_MAX;
1571 frame_index++) {
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001572 cdp_mgmt_tx_cb_set(soc,
1573 txrx_pdev,
1574 frame_index, NULL, NULL, txrx_pdev);
Himanshu Agarwale1086fa2015-10-19 18:05:15 +05301575 }
1576 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001577
1578 /* Reset Tx Frm Callbacks */
1579 wma_handle->tx_frm_download_comp_cb = NULL;
1580
1581 /* Reset Tx Data Frame Ack Cb */
1582 wma_handle->umac_data_ota_ack_cb = NULL;
1583
1584 /* Reset last Tx Data Frame nbuf ptr */
1585 wma_handle->last_umac_data_nbuf = NULL;
1586
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301587 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001588}
1589
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05301590#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
1591 defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT)
1592
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001593/**
1594 * wma_mcc_vdev_tx_pause_evt_handler() - pause event handler
1595 * @handle: wma handle
1596 * @event: event buffer
1597 * @len: data length
1598 *
1599 * This function handle pause event from fw and pause/unpause
1600 * vdev.
1601 *
1602 * Return: 0 for success or error code.
1603 */
1604int wma_mcc_vdev_tx_pause_evt_handler(void *handle, uint8_t *event,
1605 uint32_t len)
1606{
1607 tp_wma_handle wma = (tp_wma_handle) handle;
1608 WMI_TX_PAUSE_EVENTID_param_tlvs *param_buf;
1609 wmi_tx_pause_event_fixed_param *wmi_event;
1610 uint8_t vdev_id;
1611 A_UINT32 vdev_map;
Leo Chang96464902016-10-28 11:10:54 -07001612 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001613
1614 param_buf = (WMI_TX_PAUSE_EVENTID_param_tlvs *) event;
1615 if (!param_buf) {
1616 WMA_LOGE("Invalid roam event buffer");
1617 return -EINVAL;
1618 }
1619
Mukul Sharma4c60a7e2017-03-06 19:42:18 +05301620 if (pmo_ucfg_get_wow_bus_suspend(wma->psoc)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001621 WMA_LOGD(" Suspend is in progress: Pause/Unpause Tx is NoOp");
1622 return 0;
1623 }
1624
Nishank Aggarwala13b61d2016-12-01 12:53:58 +05301625 if (!soc) {
1626 WMA_LOGE("%s:SOC context is NULL", __func__);
1627 return -EINVAL;
1628 }
1629
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001630 wmi_event = param_buf->fixed_param;
1631 vdev_map = wmi_event->vdev_map;
1632 /* FW mapped vdev from ID
1633 * vdev_map = (1 << vdev_id)
1634 * So, host should unmap to ID */
1635 for (vdev_id = 0; vdev_map != 0; vdev_id++) {
1636 if (!(vdev_map & 0x1)) {
1637 /* No Vdev */
1638 } else {
1639 if (!wma->interfaces[vdev_id].handle) {
1640 WMA_LOGE("%s: invalid vdev ID %d", __func__,
1641 vdev_id);
1642 /* Test Next VDEV */
1643 vdev_map >>= 1;
1644 continue;
1645 }
1646
1647 /* PAUSE action, add bitmap */
1648 if (ACTION_PAUSE == wmi_event->action) {
1649 /*
1650 * Now only support per-dev pause so it is not
1651 * necessary to pause a paused queue again.
1652 */
1653 if (!wma->interfaces[vdev_id].pause_bitmap)
Leo Chang96464902016-10-28 11:10:54 -07001654 cdp_fc_vdev_pause(soc,
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001655 wma->
1656 interfaces[vdev_id].handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001657 OL_TXQ_PAUSE_REASON_FW);
1658 wma->interfaces[vdev_id].pause_bitmap |=
1659 (1 << wmi_event->pause_type);
1660 }
1661 /* UNPAUSE action, clean bitmap */
1662 else if (ACTION_UNPAUSE == wmi_event->action) {
1663 /* Handle unpause only if already paused */
1664 if (wma->interfaces[vdev_id].pause_bitmap) {
1665 wma->interfaces[vdev_id].pause_bitmap &=
1666 ~(1 << wmi_event->pause_type);
1667
1668 if (!wma->interfaces[vdev_id].
1669 pause_bitmap) {
1670 /* PAUSE BIT MAP is cleared
1671 * UNPAUSE VDEV */
Leo Chang96464902016-10-28 11:10:54 -07001672 cdp_fc_vdev_unpause(soc,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001673 wma->interfaces[vdev_id]
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001674 .handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001675 OL_TXQ_PAUSE_REASON_FW);
1676 }
1677 }
1678 } else {
1679 WMA_LOGE("Not Valid Action Type %d",
1680 wmi_event->action);
1681 }
1682
1683 WMA_LOGD
1684 ("vdev_id %d, pause_map 0x%x, pause type %d, action %d",
1685 vdev_id, wma->interfaces[vdev_id].pause_bitmap,
1686 wmi_event->pause_type, wmi_event->action);
1687 }
1688 /* Test Next VDEV */
1689 vdev_map >>= 1;
1690 }
1691
1692 return 0;
1693}
1694
1695#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
1696
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05301697#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
1698
1699/**
1700 * wma_set_peer_rate_report_condition -
1701 * this function set peer rate report
1702 * condition info to firmware.
1703 * @handle: Handle of WMA
1704 * @config: Bad peer configuration from SIR module
1705 *
1706 * It is a wrapper function to sent WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID
1707 * to the firmare\target.If the command sent to firmware failed, free the
1708 * buffer that allocated.
1709 *
1710 * Return: QDF_STATUS based on values sent to firmware
1711 */
1712static
1713QDF_STATUS wma_set_peer_rate_report_condition(WMA_HANDLE handle,
1714 struct t_bad_peer_txtcl_config *config)
1715{
1716 tp_wma_handle wma_handle = (tp_wma_handle)handle;
1717 struct wmi_peer_rate_report_params rate_report_params = {0};
1718 u_int32_t i, j;
1719
1720 rate_report_params.rate_report_enable = config->enable;
1721 rate_report_params.backoff_time = config->tgt_backoff;
1722 rate_report_params.timer_period = config->tgt_report_prd;
1723 for (i = 0; i < WMI_PEER_RATE_REPORT_COND_MAX_NUM; i++) {
1724 rate_report_params.report_per_phy[i].cond_flags =
1725 config->threshold[i].cond;
1726 rate_report_params.report_per_phy[i].delta.delta_min =
1727 config->threshold[i].delta;
1728 rate_report_params.report_per_phy[i].delta.percent =
1729 config->threshold[i].percentage;
1730 for (j = 0; j < WMI_MAX_NUM_OF_RATE_THRESH; j++) {
1731 rate_report_params.report_per_phy[i].
1732 report_rate_threshold[j] =
1733 config->threshold[i].thresh[j];
1734 }
1735 }
1736
1737 return wmi_unified_peer_rate_report_cmd(wma_handle->wmi_handle,
1738 &rate_report_params);
1739}
1740
1741/**
1742 * wma_process_init_bad_peer_tx_ctl_info -
1743 * this function to initialize peer rate report config info.
1744 * @handle: Handle of WMA
1745 * @config: Bad peer configuration from SIR module
1746 *
1747 * This function initializes the bad peer tx control data structure in WMA,
1748 * sends down the initial configuration to the firmware and configures
1749 * the peer status update seeting in the tx_rx module.
1750 *
1751 * Return: QDF_STATUS based on procedure status
1752 */
1753
1754QDF_STATUS wma_process_init_bad_peer_tx_ctl_info(tp_wma_handle wma,
1755 struct t_bad_peer_txtcl_config *config)
1756{
1757 /* Parameter sanity check */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001758 struct cdp_pdev *curr_pdev;
Leo Chang96464902016-10-28 11:10:54 -07001759 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05301760
1761 if (NULL == wma || NULL == config) {
1762 WMA_LOGE("%s Invalid input\n", __func__);
1763 return QDF_STATUS_E_FAILURE;
1764 }
1765
1766 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
1767 if (NULL == curr_pdev) {
1768 WMA_LOGE("%s: Failed to get pdev\n", __func__);
1769 return QDF_STATUS_E_FAILURE;
1770 }
1771
1772 WMA_LOGE("%s enable %d period %d txq limit %d\n", __func__,
1773 config->enable,
1774 config->period,
1775 config->txq_limit);
1776
1777 /* Only need to initialize the setting
1778 when the feature is enabled */
1779 if (config->enable) {
1780 int i = 0;
1781
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001782 cdp_bad_peer_txctl_set_setting(soc,
1783 curr_pdev,
Leo Chang96464902016-10-28 11:10:54 -07001784 config->enable,
1785 config->period,
1786 config->txq_limit);
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05301787
1788 for (i = 0; i < WLAN_WMA_IEEE80211_MAX_LEVEL; i++) {
1789 u_int32_t threshold, limit;
1790 threshold =
1791 config->threshold[i].thresh[0];
1792 limit = config->threshold[i].txlimit;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001793 cdp_bad_peer_txctl_update_threshold(soc,
1794 curr_pdev,
1795 i,
1796 threshold,
1797 limit);
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05301798 }
1799 }
1800
1801 return wma_set_peer_rate_report_condition(wma, config);
1802}
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05301803#endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */
1804
1805
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001806/**
1807 * wma_process_init_thermal_info() - initialize thermal info
1808 * @wma: Pointer to WMA handle
1809 * @pThermalParams: Pointer to thermal mitigation parameters
1810 *
1811 * This function initializes the thermal management table in WMA,
1812 * sends down the initial temperature thresholds to the firmware
1813 * and configures the throttle period in the tx rx module
1814 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301815 * Returns: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001816 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301817QDF_STATUS wma_process_init_thermal_info(tp_wma_handle wma,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001818 t_thermal_mgmt *pThermalParams)
1819{
1820 t_thermal_cmd_params thermal_params;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001821 struct cdp_pdev *curr_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001822
1823 if (NULL == wma || NULL == pThermalParams) {
1824 WMA_LOGE("TM Invalid input");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301825 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001826 }
1827
Anurag Chouhan6d760662016-02-20 16:05:43 +05301828 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001829 if (NULL == curr_pdev) {
1830 WMA_LOGE("%s: Failed to get pdev", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301831 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001832 }
1833
1834 WMA_LOGD("TM enable %d period %d", pThermalParams->thermalMgmtEnabled,
1835 pThermalParams->throttlePeriod);
1836
Poddar, Siddarth83905022016-04-16 17:56:08 -07001837 WMA_LOGD("Throttle Duty Cycle Level in percentage:\n"
1838 "0 %d\n"
1839 "1 %d\n"
1840 "2 %d\n"
1841 "3 %d",
1842 pThermalParams->throttle_duty_cycle_tbl[0],
1843 pThermalParams->throttle_duty_cycle_tbl[1],
1844 pThermalParams->throttle_duty_cycle_tbl[2],
1845 pThermalParams->throttle_duty_cycle_tbl[3]);
1846
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001847 wma->thermal_mgmt_info.thermalMgmtEnabled =
1848 pThermalParams->thermalMgmtEnabled;
1849 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold =
1850 pThermalParams->thermalLevels[0].minTempThreshold;
1851 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold =
1852 pThermalParams->thermalLevels[0].maxTempThreshold;
1853 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold =
1854 pThermalParams->thermalLevels[1].minTempThreshold;
1855 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold =
1856 pThermalParams->thermalLevels[1].maxTempThreshold;
1857 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold =
1858 pThermalParams->thermalLevels[2].minTempThreshold;
1859 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold =
1860 pThermalParams->thermalLevels[2].maxTempThreshold;
1861 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold =
1862 pThermalParams->thermalLevels[3].minTempThreshold;
1863 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold =
1864 pThermalParams->thermalLevels[3].maxTempThreshold;
1865 wma->thermal_mgmt_info.thermalCurrLevel = WLAN_WMA_THERMAL_LEVEL_0;
1866
1867 WMA_LOGD("TM level min max:\n"
1868 "0 %d %d\n"
1869 "1 %d %d\n"
1870 "2 %d %d\n"
1871 "3 %d %d",
1872 wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold,
1873 wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold,
1874 wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold,
1875 wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold,
1876 wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold,
1877 wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold,
1878 wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold,
1879 wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold);
1880
1881 if (wma->thermal_mgmt_info.thermalMgmtEnabled) {
Leo Chang96464902016-10-28 11:10:54 -07001882 cdp_throttle_init_period(cds_get_context(QDF_MODULE_ID_SOC),
1883 curr_pdev,
Poddar, Siddarth83905022016-04-16 17:56:08 -07001884 pThermalParams->throttlePeriod,
1885 &pThermalParams->throttle_duty_cycle_tbl[0]);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001886
1887 /* Get the temperature thresholds to set in firmware */
1888 thermal_params.minTemp =
1889 wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].minTempThreshold;
1890 thermal_params.maxTemp =
1891 wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].maxTempThreshold;
1892 thermal_params.thermalEnable =
1893 wma->thermal_mgmt_info.thermalMgmtEnabled;
1894
1895 WMA_LOGE("TM sending the following to firmware: min %d max %d enable %d",
1896 thermal_params.minTemp, thermal_params.maxTemp,
1897 thermal_params.thermalEnable);
1898
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301899 if (QDF_STATUS_SUCCESS !=
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001900 wma_set_thermal_mgmt(wma, thermal_params)) {
1901 WMA_LOGE("Could not send thermal mgmt command to the firmware!");
1902 }
1903 }
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301904 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001905}
1906
1907/**
1908 * wma_set_thermal_level_ind() - send SME set thermal level indication message
1909 * @level: thermal level
1910 *
1911 * Send SME SET_THERMAL_LEVEL_IND message
1912 *
1913 * Returns: none
1914 */
1915static void wma_set_thermal_level_ind(u_int8_t level)
1916{
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301917 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Rajeev Kumarb60abe42017-01-21 15:39:31 -08001918 struct scheduler_msg sme_msg = {0};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001919
1920 WMA_LOGI(FL("Thermal level: %d"), level);
1921
1922 sme_msg.type = eWNI_SME_SET_THERMAL_LEVEL_IND;
1923 sme_msg.bodyptr = NULL;
1924 sme_msg.bodyval = level;
1925
Rajeev Kumarb60abe42017-01-21 15:39:31 -08001926 qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301927 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001928 WMA_LOGE(FL(
1929 "Fail to post set thermal level ind msg"));
1930}
1931
1932/**
1933 * wma_process_set_thermal_level() - sets thermal level
1934 * @wma: Pointer to WMA handle
1935 * @thermal_level : Thermal level
1936 *
1937 * This function sets the new thermal throttle level in the
1938 * txrx module and sends down the corresponding temperature
1939 * thresholds to the firmware
1940 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301941 * Returns: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001942 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301943QDF_STATUS wma_process_set_thermal_level(tp_wma_handle wma,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001944 uint8_t thermal_level)
1945{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001946 struct cdp_pdev *curr_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001947
1948 if (NULL == wma) {
1949 WMA_LOGE("TM Invalid input");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301950 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001951 }
1952
Anurag Chouhan6d760662016-02-20 16:05:43 +05301953 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001954 if (NULL == curr_pdev) {
1955 WMA_LOGE("%s: Failed to get pdev", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301956 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001957 }
1958
1959 WMA_LOGE("TM set level %d", thermal_level);
1960
1961 /* Check if thermal mitigation is enabled */
1962 if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
1963 WMA_LOGE("Thermal mgmt is not enabled, ignoring set level command");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301964 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001965 }
1966
1967 if (thermal_level >= WLAN_WMA_MAX_THERMAL_LEVELS) {
1968 WMA_LOGE("Invalid thermal level set %d", thermal_level);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301969 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001970 }
1971
1972 if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
1973 WMA_LOGD("Current level %d is same as the set level, ignoring",
1974 wma->thermal_mgmt_info.thermalCurrLevel);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301975 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001976 }
1977
1978 wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
1979
Leo Chang96464902016-10-28 11:10:54 -07001980 cdp_throttle_set_level(cds_get_context(QDF_MODULE_ID_SOC),
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08001981 curr_pdev,
1982 thermal_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001983
1984 /* Send SME SET_THERMAL_LEVEL_IND message */
1985 wma_set_thermal_level_ind(thermal_level);
1986
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301987 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001988}
1989
1990
1991/**
1992 * wma_set_thermal_mgmt() - set thermal mgmt command to fw
1993 * @wma_handle: Pointer to WMA handle
1994 * @thermal_info: Thermal command information
1995 *
1996 * This function sends the thermal management command
1997 * to the firmware
1998 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05301999 * Return: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002000 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302001QDF_STATUS wma_set_thermal_mgmt(tp_wma_handle wma_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002002 t_thermal_cmd_params thermal_info)
2003{
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05302004 struct thermal_cmd_params mgmt_thermal_info = {0};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002005
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05302006 if (!wma_handle) {
2007 WMA_LOGE("%s:'wma_set_thermal_mgmt':invalid input", __func__);
2008 QDF_ASSERT(0);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302009 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002010 }
2011
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05302012 mgmt_thermal_info.min_temp = thermal_info.minTemp;
2013 mgmt_thermal_info.max_temp = thermal_info.maxTemp;
2014 mgmt_thermal_info.thermal_enable = thermal_info.thermalEnable;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002015
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05302016 return wmi_unified_set_thermal_mgmt_cmd(wma_handle->wmi_handle,
2017 &mgmt_thermal_info);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002018}
2019
2020/**
2021 * wma_thermal_mgmt_get_level() - returns throttle level
2022 * @handle: Pointer to WMA handle
2023 * @temp: temperature
2024 *
2025 * This function returns the thermal(throttle) level
2026 * given the temperature
2027 *
2028 * Return: thermal (throttle) level
2029 */
Jeff Johnsonc4b47a92016-10-07 12:34:41 -07002030static uint8_t wma_thermal_mgmt_get_level(void *handle, uint32_t temp)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002031{
2032 tp_wma_handle wma = (tp_wma_handle) handle;
2033 int i;
2034 uint8_t level;
2035
2036 level = i = wma->thermal_mgmt_info.thermalCurrLevel;
2037 while (temp < wma->thermal_mgmt_info.thermalLevels[i].minTempThreshold
2038 && i > 0) {
2039 i--;
2040 level = i;
2041 }
2042
2043 i = wma->thermal_mgmt_info.thermalCurrLevel;
2044 while (temp > wma->thermal_mgmt_info.thermalLevels[i].maxTempThreshold
2045 && i < (WLAN_WMA_MAX_THERMAL_LEVELS - 1)) {
2046 i++;
2047 level = i;
2048 }
2049
2050 WMA_LOGW("Change thermal level from %d -> %d\n",
2051 wma->thermal_mgmt_info.thermalCurrLevel, level);
2052
2053 return level;
2054}
2055
2056/**
2057 * wma_thermal_mgmt_evt_handler() - thermal mgmt event handler
2058 * @wma_handle: Pointer to WMA handle
2059 * @event: Thermal event information
2060 *
2061 * This function handles the thermal mgmt event from the firmware len
2062 *
2063 * Return: 0 for success otherwise failure
2064 */
2065int wma_thermal_mgmt_evt_handler(void *handle, uint8_t *event,
2066 uint32_t len)
2067{
2068 tp_wma_handle wma;
2069 wmi_thermal_mgmt_event_fixed_param *tm_event;
2070 uint8_t thermal_level;
2071 t_thermal_cmd_params thermal_params;
2072 WMI_THERMAL_MGMT_EVENTID_param_tlvs *param_buf;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002073 struct cdp_pdev *curr_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002074
2075 if (NULL == event || NULL == handle) {
2076 WMA_LOGE("Invalid thermal mitigation event buffer");
2077 return -EINVAL;
2078 }
2079
2080 wma = (tp_wma_handle) handle;
2081
2082 if (NULL == wma) {
2083 WMA_LOGE("%s: Failed to get wma handle", __func__);
2084 return -EINVAL;
2085 }
2086
2087 param_buf = (WMI_THERMAL_MGMT_EVENTID_param_tlvs *) event;
2088
Anurag Chouhan6d760662016-02-20 16:05:43 +05302089 curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002090 if (NULL == curr_pdev) {
2091 WMA_LOGE("%s: Failed to get pdev", __func__);
2092 return -EINVAL;
2093 }
2094
2095 /* Check if thermal mitigation is enabled */
2096 if (!wma->thermal_mgmt_info.thermalMgmtEnabled) {
2097 WMA_LOGE("Thermal mgmt is not enabled, ignoring event");
2098 return -EINVAL;
2099 }
2100
2101 tm_event = param_buf->fixed_param;
2102 WMA_LOGD("Thermal mgmt event received with temperature %d",
2103 tm_event->temperature_degreeC);
2104
2105 /* Get the thermal mitigation level for the reported temperature */
2106 thermal_level =
2107 wma_thermal_mgmt_get_level(handle, tm_event->temperature_degreeC);
2108 WMA_LOGD("Thermal mgmt level %d", thermal_level);
2109
2110 if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) {
2111 WMA_LOGD("Current level %d is same as the set level, ignoring",
2112 wma->thermal_mgmt_info.thermalCurrLevel);
2113 return 0;
2114 }
2115
2116 wma->thermal_mgmt_info.thermalCurrLevel = thermal_level;
2117
2118 /* Inform txrx */
Leo Chang96464902016-10-28 11:10:54 -07002119 cdp_throttle_set_level(cds_get_context(QDF_MODULE_ID_SOC),
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002120 curr_pdev,
2121 thermal_level);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002122
2123 /* Send SME SET_THERMAL_LEVEL_IND message */
2124 wma_set_thermal_level_ind(thermal_level);
2125
2126 /* Get the temperature thresholds to set in firmware */
2127 thermal_params.minTemp =
2128 wma->thermal_mgmt_info.thermalLevels[thermal_level].
2129 minTempThreshold;
2130 thermal_params.maxTemp =
2131 wma->thermal_mgmt_info.thermalLevels[thermal_level].
2132 maxTempThreshold;
2133 thermal_params.thermalEnable =
2134 wma->thermal_mgmt_info.thermalMgmtEnabled;
2135
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302136 if (QDF_STATUS_SUCCESS != wma_set_thermal_mgmt(wma, thermal_params)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002137 WMA_LOGE("Could not send thermal mgmt command to the firmware!");
2138 return -EINVAL;
2139 }
2140
2141 return 0;
2142}
2143
2144/**
Rajeev Kumar8e3e2832015-11-06 16:02:54 -08002145 * wma_ibss_peer_info_event_handler() - IBSS peer info event handler
2146 * @handle: wma handle
2147 * @data: event data
2148 * @len: length of data
2149 *
2150 * This function handles IBSS peer info event from FW.
2151 *
2152 * Return: 0 for success or error code
2153 */
2154int wma_ibss_peer_info_event_handler(void *handle, uint8_t *data,
2155 uint32_t len)
2156{
Rajeev Kumarb60abe42017-01-21 15:39:31 -08002157 struct scheduler_msg cds_msg;
Rajeev Kumar8e3e2832015-11-06 16:02:54 -08002158 wmi_peer_info *peer_info;
Leo Chang96464902016-10-28 11:10:54 -07002159 void *pdev;
Rajeev Kumar8e3e2832015-11-06 16:02:54 -08002160 tSirIbssPeerInfoParams *pSmeRsp;
2161 uint32_t count, num_peers, status;
2162 tSirIbssGetPeerInfoRspParams *pRsp;
2163 WMI_PEER_INFO_EVENTID_param_tlvs *param_tlvs;
2164 wmi_peer_info_event_fixed_param *fix_param;
Rajeev Kumar94c9b452016-03-24 12:58:47 -07002165 uint8_t peer_mac[IEEE80211_ADDR_LEN];
Rajeev Kumar8e3e2832015-11-06 16:02:54 -08002166
2167 pdev = cds_get_context(QDF_MODULE_ID_TXRX);
2168 if (NULL == pdev) {
2169 WMA_LOGE("%s: could not get pdev context", __func__);
2170 return 0;
2171 }
2172
2173 param_tlvs = (WMI_PEER_INFO_EVENTID_param_tlvs *) data;
2174 fix_param = param_tlvs->fixed_param;
2175 peer_info = param_tlvs->peer_info;
2176 num_peers = fix_param->num_peers;
2177 status = 0;
2178
2179 WMA_LOGE("%s: num_peers %d", __func__, num_peers);
2180
2181 pRsp = qdf_mem_malloc(sizeof(tSirIbssGetPeerInfoRspParams));
2182 if (NULL == pRsp) {
2183 WMA_LOGE("%s: could not allocate memory for ibss peer info rsp len %zu",
2184 __func__, sizeof(tSirIbssGetPeerInfoRspParams));
2185 return 0;
2186 }
2187
2188 /*sanity check */
2189 if ((num_peers > 32) || (NULL == peer_info)) {
2190 WMA_LOGE("%s: Invalid event data from target num_peers %d peer_info %p",
2191 __func__, num_peers, peer_info);
2192 status = 1;
2193 goto send_response;
2194 }
2195
yeshwanth sriram guntuka0255f852016-08-31 17:18:19 +05302196 /*
2197 *For displaying only connected IBSS peer info, iterate till
2198 *last but one entry only as last entry is used for IBSS creator
2199 */
2200 for (count = 0; count < num_peers-1; count++) {
Rajeev Kumar8e3e2832015-11-06 16:02:54 -08002201 pSmeRsp = &pRsp->ibssPeerInfoRspParams.peerInfoParams[count];
2202
2203 WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_info->peer_mac_address,
2204 peer_mac);
Rajeev Kumar94c9b452016-03-24 12:58:47 -07002205 qdf_mem_copy(pSmeRsp->mac_addr, peer_mac,
2206 sizeof(pSmeRsp->mac_addr));
Rajeev Kumar8e3e2832015-11-06 16:02:54 -08002207 pSmeRsp->mcsIndex = 0;
2208 pSmeRsp->rssi = peer_info->rssi + WMA_TGT_NOISE_FLOOR_DBM;
2209 pSmeRsp->txRate = peer_info->data_rate;
2210 pSmeRsp->txRateFlags = 0;
2211
Rajeev Kumar94c9b452016-03-24 12:58:47 -07002212 WMA_LOGE("peer " MAC_ADDRESS_STR "rssi %d txRate %d",
2213 MAC_ADDR_ARRAY(peer_mac),
2214 pSmeRsp->rssi, pSmeRsp->txRate);
Rajeev Kumar8e3e2832015-11-06 16:02:54 -08002215
2216 peer_info++;
2217 }
2218
2219send_response:
2220 /* message header */
2221 pRsp->mesgType = eWNI_SME_IBSS_PEER_INFO_RSP;
2222 pRsp->mesgLen = sizeof(tSirIbssGetPeerInfoRspParams);
2223 pRsp->ibssPeerInfoRspParams.status = status;
2224 pRsp->ibssPeerInfoRspParams.numPeers = num_peers;
2225
2226 /* cds message wrapper */
2227 cds_msg.type = eWNI_SME_IBSS_PEER_INFO_RSP;
2228 cds_msg.bodyptr = (void *)pRsp;
2229 cds_msg.bodyval = 0;
2230
2231 if (QDF_STATUS_SUCCESS !=
Rajeev Kumar156188e2017-01-21 17:23:52 -08002232 scheduler_post_msg(QDF_MODULE_ID_SME, &cds_msg)) {
Rajeev Kumar8e3e2832015-11-06 16:02:54 -08002233 WMA_LOGE("%s: could not post peer info rsp msg to SME",
2234 __func__);
2235 /* free the mem and return */
2236 qdf_mem_free((void *)pRsp);
2237 }
2238
2239 return 0;
2240}
2241
2242/**
2243 * wma_fast_tx_fail_event_handler() -tx failure event handler
2244 * @handle: wma handle
2245 * @data: event data
2246 * @len: data length
2247 *
2248 * Handle fast tx failure indication event from FW
2249 *
2250 * Return: 0 for success or error code.
2251 */
2252int wma_fast_tx_fail_event_handler(void *handle, uint8_t *data,
2253 uint32_t len)
2254{
2255 uint8_t tx_fail_cnt;
2256 uint8_t peer_mac[IEEE80211_ADDR_LEN];
2257 tp_wma_handle wma = (tp_wma_handle) handle;
2258 WMI_PEER_TX_FAIL_CNT_THR_EVENTID_param_tlvs *param_tlvs;
2259 wmi_peer_tx_fail_cnt_thr_event_fixed_param *fix_param;
2260
2261 param_tlvs = (WMI_PEER_TX_FAIL_CNT_THR_EVENTID_param_tlvs *) data;
2262 fix_param = param_tlvs->fixed_param;
2263
2264 WMI_MAC_ADDR_TO_CHAR_ARRAY(&fix_param->peer_mac_address, peer_mac);
2265 WMA_LOGE("%s: received fast tx failure event for peer"
2266 " 0x:%2x:0x%2x:0x%2x:0x%2x:0x%2x:0x%2x seq No %d", __func__,
2267 peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2268 peer_mac[4], peer_mac[5], fix_param->seq_no);
2269
2270 tx_fail_cnt = fix_param->seq_no;
2271
2272 /*call HDD callback */
2273 if (NULL != wma->hddTxFailCb) {
2274 wma->hddTxFailCb(peer_mac, tx_fail_cnt);
2275 } else {
2276 WMA_LOGE("%s: HDD callback is %p", __func__, wma->hddTxFailCb);
2277 }
2278
2279 return 0;
2280}
2281
2282/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002283 * wma_decap_to_8023() - Decapsulate to 802.3 format
2284 * @msdu: skb buffer
2285 * @info: decapsulate info
2286 *
2287 * Return: none
2288 */
Nirav Shahcbc6d722016-03-01 16:24:53 +05302289static void wma_decap_to_8023(qdf_nbuf_t msdu, struct wma_decap_info_t *info)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002290{
2291 struct llc_snap_hdr_t *llc_hdr;
2292 uint16_t ether_type;
2293 uint16_t l2_hdr_space;
2294 struct ieee80211_qosframe_addr4 *wh;
2295 uint8_t local_buf[ETHERNET_HDR_LEN];
2296 uint8_t *buf;
2297 struct ethernet_hdr_t *ethr_hdr;
2298
Nirav Shahcbc6d722016-03-01 16:24:53 +05302299 buf = (uint8_t *) qdf_nbuf_data(msdu);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002300 llc_hdr = (struct llc_snap_hdr_t *)buf;
2301 ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
2302 /* do llc remove if needed */
2303 l2_hdr_space = 0;
2304 if (IS_SNAP(llc_hdr)) {
2305 if (IS_BTEP(llc_hdr)) {
2306 /* remove llc */
2307 l2_hdr_space += sizeof(struct llc_snap_hdr_t);
2308 llc_hdr = NULL;
2309 } else if (IS_RFC1042(llc_hdr)) {
2310 if (!(ether_type == ETHERTYPE_AARP ||
2311 ether_type == ETHERTYPE_IPX)) {
2312 /* remove llc */
2313 l2_hdr_space += sizeof(struct llc_snap_hdr_t);
2314 llc_hdr = NULL;
2315 }
2316 }
2317 }
2318 if (l2_hdr_space > ETHERNET_HDR_LEN) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302319 buf = qdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002320 } else if (l2_hdr_space < ETHERNET_HDR_LEN) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302321 buf = qdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002322 }
2323
2324 /* mpdu hdr should be present in info,re-create ethr_hdr based on mpdu hdr */
2325 wh = (struct ieee80211_qosframe_addr4 *)info->hdr;
2326 ethr_hdr = (struct ethernet_hdr_t *)local_buf;
2327 switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
2328 case IEEE80211_FC1_DIR_NODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302329 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002330 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302331 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002332 ETHERNET_ADDR_LEN);
2333 break;
2334 case IEEE80211_FC1_DIR_TODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302335 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002336 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302337 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002338 ETHERNET_ADDR_LEN);
2339 break;
2340 case IEEE80211_FC1_DIR_FROMDS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302341 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002342 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302343 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002344 ETHERNET_ADDR_LEN);
2345 break;
2346 case IEEE80211_FC1_DIR_DSTODS:
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302347 qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002348 ETHERNET_ADDR_LEN);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302349 qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr4,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002350 ETHERNET_ADDR_LEN);
2351 break;
2352 }
2353
2354 if (llc_hdr == NULL) {
2355 ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2356 ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2357 } else {
2358 uint32_t pktlen =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302359 qdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002360 ether_type = (uint16_t) pktlen;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302361 ether_type = qdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002362 ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
2363 ethr_hdr->ethertype[1] = (ether_type) & 0xff;
2364 }
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302365 qdf_mem_copy(buf, ethr_hdr, ETHERNET_HDR_LEN);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002366}
2367
2368/**
2369 * wma_ieee80211_hdrsize() - get 802.11 header size
2370 * @data: 80211 frame
2371 *
2372 * Return: size of header
2373 */
2374static int32_t wma_ieee80211_hdrsize(const void *data)
2375{
2376 const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
2377 int32_t size = sizeof(struct ieee80211_frame);
2378
2379 if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2380 size += IEEE80211_ADDR_LEN;
2381 if (IEEE80211_QOS_HAS_SEQ(wh))
2382 size += sizeof(uint16_t);
2383 return size;
2384}
2385
2386/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002387 * wma_tx_packet() - Sends Tx Frame to TxRx
2388 * @wma_context: wma context
2389 * @tx_frame: frame buffer
2390 * @frmLen: frame length
2391 * @frmType: frame type
2392 * @txDir: tx diection
2393 * @tid: TID
2394 * @tx_frm_download_comp_cb: tx download callback handler
2395 * @tx_frm_ota_comp_cb: OTA complition handler
2396 * @tx_flag: tx flag
2397 * @vdev_id: vdev id
2398 * @tdlsFlag: tdls flag
2399 *
2400 * This function sends the frame corresponding to the
2401 * given vdev id.
2402 * This is blocking call till the downloading of frame is complete.
2403 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05302404 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002405 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302406QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002407 eFrameType frmType, eFrameTxDir txDir, uint8_t tid,
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +05302408 wma_tx_dwnld_comp_callback tx_frm_download_comp_cb,
2409 void *pData,
2410 wma_tx_ota_comp_callback tx_frm_ota_comp_cb,
2411 uint8_t tx_flag, uint8_t vdev_id, bool tdlsFlag,
2412 uint16_t channel_freq)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002413{
2414 tp_wma_handle wma_handle = (tp_wma_handle) (wma_context);
2415 int32_t status;
Anurag Chouhance0dc992016-02-16 18:18:03 +05302416 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002417 int32_t is_high_latency;
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002418 struct cdp_vdev *txrx_vdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002419 enum frame_index tx_frm_index = GENERIC_NODOWNLD_NOACK_COMP_INDEX;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302420 tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002421 uint8_t use_6mbps = 0;
2422 uint8_t downld_comp_required = 0;
2423 uint16_t chanfreq;
2424#ifdef WLAN_FEATURE_11W
2425 uint8_t *pFrame = NULL;
2426 void *pPacket = NULL;
2427 uint16_t newFrmLen = 0;
2428#endif /* WLAN_FEATURE_11W */
2429 struct wma_txrx_node *iface;
2430 tpAniSirGlobal pMac;
2431 tpSirMacMgmtHdr mHdr;
Govind Singh09c3b492016-03-08 16:05:14 +05302432 struct wmi_mgmt_params mgmt_param = {0};
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002433 struct cdp_cfg *ctrl_pdev;
Leo Chang96464902016-10-28 11:10:54 -07002434 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +05302435 struct ieee80211_frame *wh;
2436 struct wlan_objmgr_peer *peer = NULL;
2437 struct wlan_objmgr_psoc *psoc;
2438 void *mac_addr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002439
2440 if (NULL == wma_handle) {
2441 WMA_LOGE("wma_handle is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302442 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002443 }
2444 iface = &wma_handle->interfaces[vdev_id];
2445 /* Get the vdev handle from vdev id */
2446 txrx_vdev = wma_handle->interfaces[vdev_id].handle;
2447
2448 if (!txrx_vdev) {
2449 WMA_LOGE("TxRx Vdev Handle is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302450 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002451 }
2452
Nishank Aggarwala13b61d2016-12-01 12:53:58 +05302453 if (!soc) {
2454 WMA_LOGE("%s:SOC context is NULL", __func__);
2455 return QDF_STATUS_E_FAILURE;
2456 }
2457
Leo Chang96464902016-10-28 11:10:54 -07002458 cdp_hl_tdls_flag_reset(soc, txrx_vdev, false);
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05302459
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002460 if (frmType >= TXRX_FRM_MAX) {
2461 WMA_LOGE("Invalid Frame Type Fail to send Frame");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302462 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002463 }
2464
Anurag Chouhan6d760662016-02-20 16:05:43 +05302465 pMac = cds_get_context(QDF_MODULE_ID_PE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002466 if (!pMac) {
2467 WMA_LOGE("pMac Handle is NULL");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302468 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002469 }
2470 /*
2471 * Currently only support to
2472 * send 80211 Mgmt and 80211 Data are added.
2473 */
2474 if (!((frmType == TXRX_FRM_802_11_MGMT) ||
2475 (frmType == TXRX_FRM_802_11_DATA))) {
2476 WMA_LOGE("No Support to send other frames except 802.11 Mgmt/Data");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302477 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002478 }
Nirav Shahcbc6d722016-03-01 16:24:53 +05302479 mHdr = (tpSirMacMgmtHdr)qdf_nbuf_data(tx_frame);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002480#ifdef WLAN_FEATURE_11W
2481 if ((iface && iface->rmfEnabled) &&
2482 (frmType == TXRX_FRM_802_11_MGMT) &&
2483 (pFc->subType == SIR_MAC_MGMT_DISASSOC ||
2484 pFc->subType == SIR_MAC_MGMT_DEAUTH ||
2485 pFc->subType == SIR_MAC_MGMT_ACTION)) {
2486 struct ieee80211_frame *wh =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302487 (struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002488 if (!IEEE80211_IS_BROADCAST(wh->i_addr1) &&
2489 !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2490 if (pFc->wep) {
2491 /* Allocate extra bytes for privacy header and trailer */
2492 newFrmLen = frmLen + IEEE80211_CCMP_HEADERLEN +
2493 IEEE80211_CCMP_MICLEN;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302494 qdf_status =
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002495 cds_packet_alloc((uint16_t) newFrmLen,
2496 (void **)&pFrame,
2497 (void **)&pPacket);
2498
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302499 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002500 WMA_LOGP("%s: Failed to allocate %d bytes for RMF status "
2501 "code (%x)", __func__, newFrmLen,
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302502 qdf_status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002503 /* Free the original packet memory */
2504 cds_packet_free((void *)tx_frame);
2505 goto error;
2506 }
2507
2508 /*
2509 * Initialize the frame with 0's and only fill
2510 * MAC header and data, Keep the CCMP header and
2511 * trailer as 0's, firmware shall fill this
2512 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302513 qdf_mem_set(pFrame, newFrmLen, 0);
2514 qdf_mem_copy(pFrame, wh, sizeof(*wh));
2515 qdf_mem_copy(pFrame + sizeof(*wh) +
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002516 IEEE80211_CCMP_HEADERLEN,
2517 pData + sizeof(*wh),
2518 frmLen - sizeof(*wh));
2519
2520 cds_packet_free((void *)tx_frame);
2521 tx_frame = pPacket;
Naveen Rawat67d60b32017-01-10 17:54:36 -08002522 pData = pFrame;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002523 frmLen = newFrmLen;
2524 }
2525 } else {
2526 /* Allocate extra bytes for MMIE */
2527 newFrmLen = frmLen + IEEE80211_MMIE_LEN;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302528 qdf_status = cds_packet_alloc((uint16_t) newFrmLen,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002529 (void **)&pFrame,
2530 (void **)&pPacket);
2531
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302532 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002533 WMA_LOGP("%s: Failed to allocate %d bytes for RMF status "
2534 "code (%x)", __func__, newFrmLen,
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302535 qdf_status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002536 /* Free the original packet memory */
2537 cds_packet_free((void *)tx_frame);
2538 goto error;
2539 }
2540 /*
2541 * Initialize the frame with 0's and only fill
2542 * MAC header and data. MMIE field will be
2543 * filled by cds_attach_mmie API
2544 */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302545 qdf_mem_set(pFrame, newFrmLen, 0);
2546 qdf_mem_copy(pFrame, wh, sizeof(*wh));
2547 qdf_mem_copy(pFrame + sizeof(*wh),
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002548 pData + sizeof(*wh), frmLen - sizeof(*wh));
2549 if (!cds_attach_mmie(iface->key.key,
2550 iface->key.key_id[0].ipn,
2551 WMA_IGTK_KEY_INDEX_4,
2552 pFrame,
2553 pFrame + newFrmLen, newFrmLen)) {
2554 WMA_LOGP("%s: Failed to attach MMIE at the end of "
2555 "frame", __func__);
2556 /* Free the original packet memory */
2557 cds_packet_free((void *)tx_frame);
2558 goto error;
2559 }
2560 cds_packet_free((void *)tx_frame);
2561 tx_frame = pPacket;
Naveen Rawat67d60b32017-01-10 17:54:36 -08002562 pData = pFrame;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002563 frmLen = newFrmLen;
2564 }
2565 }
2566#endif /* WLAN_FEATURE_11W */
2567
2568 if ((frmType == TXRX_FRM_802_11_MGMT) &&
2569 (pFc->subType == SIR_MAC_MGMT_PROBE_RSP)) {
2570 uint64_t adjusted_tsf_le;
2571 struct ieee80211_frame *wh =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302572 (struct ieee80211_frame *)qdf_nbuf_data(tx_frame);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002573
2574 /* Make the TSF offset negative to match TSF in beacons */
2575 adjusted_tsf_le = cpu_to_le64(0ULL -
2576 wma_handle->interfaces[vdev_id].
2577 tsfadjust);
2578 A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le));
2579 }
2580 if (frmType == TXRX_FRM_802_11_DATA) {
Nirav Shahcbc6d722016-03-01 16:24:53 +05302581 qdf_nbuf_t ret;
2582 qdf_nbuf_t skb = (qdf_nbuf_t) tx_frame;
Leo Chang96464902016-10-28 11:10:54 -07002583 void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002584
2585 struct wma_decap_info_t decap_info;
2586 struct ieee80211_frame *wh =
Nirav Shahcbc6d722016-03-01 16:24:53 +05302587 (struct ieee80211_frame *)qdf_nbuf_data(skb);
Anurag Chouhan210db072016-02-22 18:42:15 +05302588 unsigned long curr_timestamp = qdf_mc_timer_get_system_ticks();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002589
2590 if (pdev == NULL) {
2591 WMA_LOGE("%s: pdev pointer is not available", __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302592 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002593 }
2594
2595 /*
2596 * 1) TxRx Module expects data input to be 802.3 format
2597 * So Decapsulation has to be done.
2598 * 2) Only one Outstanding Data pending for Ack is allowed
2599 */
2600 if (tx_frm_ota_comp_cb) {
2601 if (wma_handle->umac_data_ota_ack_cb) {
2602 /*
2603 * If last data frame was sent more than 5 seconds
2604 * ago and still we did not receive ack/nack from
2605 * fw then allow Tx of this data frame
2606 */
2607 if (curr_timestamp >=
2608 wma_handle->last_umac_data_ota_timestamp +
2609 500) {
2610 WMA_LOGE("%s: No Tx Ack for last data frame for more than 5 secs, allow Tx of current data frame",
2611 __func__);
2612 } else {
2613 WMA_LOGE("%s: Already one Data pending for Ack, reject Tx of data frame",
2614 __func__);
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302615 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002616 }
2617 }
2618 } else {
2619 /*
2620 * Data Frames are sent through TxRx Non Standard Data Path
2621 * so Ack Complete Cb is must
2622 */
2623 WMA_LOGE("No Ack Complete Cb. Don't Allow");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302624 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002625 }
2626
2627 /* Take out 802.11 header from skb */
2628 decap_info.hdr_len = wma_ieee80211_hdrsize(wh);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302629 qdf_mem_copy(decap_info.hdr, wh, decap_info.hdr_len);
Nirav Shahcbc6d722016-03-01 16:24:53 +05302630 qdf_nbuf_pull_head(skb, decap_info.hdr_len);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002631
2632 /* Decapsulate to 802.3 format */
2633 wma_decap_to_8023(skb, &decap_info);
2634
2635 /* Zero out skb's context buffer for the driver to use */
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302636 qdf_mem_set(skb->cb, sizeof(skb->cb), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002637
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002638 /* Terminate the (single-element) list of tx frames */
2639 skb->next = NULL;
2640
2641 /* Store the Ack Complete Cb */
2642 wma_handle->umac_data_ota_ack_cb = tx_frm_ota_comp_cb;
2643
2644 /* Store the timestamp and nbuf for this data Tx */
2645 wma_handle->last_umac_data_ota_timestamp = curr_timestamp;
2646 wma_handle->last_umac_data_nbuf = skb;
2647
2648 /* Send the Data frame to TxRx in Non Standard Path */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002649 cdp_hl_tdls_flag_reset(soc,
2650 txrx_vdev, tdlsFlag);
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05302651
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002652 ret = cdp_tx_non_std(soc,
2653 txrx_vdev,
2654 OL_TX_SPEC_NO_FREE, skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002655
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002656 cdp_hl_tdls_flag_reset(soc,
2657 txrx_vdev, false);
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05302658
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002659 if (ret) {
2660 WMA_LOGE("TxRx Rejected. Fail to do Tx");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002661 /* Call Download Cb so that umac can free the buffer */
2662 if (tx_frm_download_comp_cb)
2663 tx_frm_download_comp_cb(wma_handle->mac_context,
2664 tx_frame,
2665 WMA_TX_FRAME_BUFFER_FREE);
2666 wma_handle->umac_data_ota_ack_cb = NULL;
2667 wma_handle->last_umac_data_nbuf = NULL;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302668 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002669 }
2670
2671 /* Call Download Callback if passed */
2672 if (tx_frm_download_comp_cb)
2673 tx_frm_download_comp_cb(wma_handle->mac_context,
2674 tx_frame,
2675 WMA_TX_FRAME_BUFFER_NO_FREE);
2676
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302677 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002678 }
2679
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002680 ctrl_pdev = cdp_get_ctrl_pdev_from_vdev(soc,
2681 txrx_vdev);
Manjunathappa Prakash10d357a2016-03-31 19:20:49 -07002682 if (ctrl_pdev == NULL) {
2683 WMA_LOGE("ol_pdev_handle is NULL\n");
2684 return QDF_STATUS_E_FAILURE;
2685 }
Leo Chang96464902016-10-28 11:10:54 -07002686 is_high_latency = cdp_cfg_is_high_latency(soc, ctrl_pdev);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002687
Mukul Sharmadfc804c2016-09-03 16:31:20 +05302688 downld_comp_required = tx_frm_download_comp_cb && is_high_latency &&
2689 tx_frm_ota_comp_cb;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002690
2691 /* Fill the frame index to send */
2692 if (pFc->type == SIR_MAC_MGMT_FRAME) {
2693 if (tx_frm_ota_comp_cb) {
2694 if (downld_comp_required)
2695 tx_frm_index =
2696 GENERIC_DOWNLD_COMP_ACK_COMP_INDEX;
2697 else
2698 tx_frm_index = GENERIC_NODOWLOAD_ACK_COMP_INDEX;
2699
2700 /* Store the Ack Cb sent by UMAC */
2701 if (pFc->subType < SIR_MAC_MGMT_RESERVED15) {
2702 wma_handle->umac_ota_ack_cb[pFc->subType] =
2703 tx_frm_ota_comp_cb;
2704 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002705 } else {
2706 if (downld_comp_required)
2707 tx_frm_index =
2708 GENERIC_DOWNLD_COMP_NOACK_COMP_INDEX;
2709 else
2710 tx_frm_index =
2711 GENERIC_NODOWNLD_NOACK_COMP_INDEX;
2712 }
2713 }
2714
2715 /*
2716 * If Dowload Complete is required
2717 * Wait for download complete
2718 */
2719 if (downld_comp_required) {
2720 /* Store Tx Comp Cb */
2721 wma_handle->tx_frm_download_comp_cb = tx_frm_download_comp_cb;
2722
2723 /* Reset the Tx Frame Complete Event */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302724 qdf_status =
Anurag Chouhance0dc992016-02-16 18:18:03 +05302725 qdf_event_reset(&wma_handle->tx_frm_download_comp_event);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002726
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302727 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002728 WMA_LOGP("%s: Event Reset failed tx comp event %x",
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302729 __func__, qdf_status);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002730 goto error;
2731 }
2732 }
2733
2734 /* If the frame has to be sent at BD Rate2 inform TxRx */
2735 if (tx_flag & HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME)
2736 use_6mbps = 1;
2737
Deepak Dhamdhered97bfb32015-10-11 15:16:18 -07002738 if (wma_handle->interfaces[vdev_id].scan_info.chan_freq != 0) {
2739 chanfreq = wma_handle->interfaces[vdev_id].scan_info.chan_freq;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002740 WMA_LOGI("%s: Preauth frame on channel %d", __func__, chanfreq);
2741 } else if (pFc->subType == SIR_MAC_MGMT_PROBE_RSP) {
Manishekar Chandrasekaran7edffe02016-04-28 20:52:14 +05302742 if ((wma_is_vdev_in_ap_mode(wma_handle, vdev_id)) &&
2743 (0 != wma_handle->interfaces[vdev_id].mhz))
2744 chanfreq = wma_handle->interfaces[vdev_id].mhz;
2745 else
2746 chanfreq = channel_freq;
2747 WMA_LOGI("%s: Probe response frame on channel %d vdev:%d",
2748 __func__, chanfreq, vdev_id);
2749 if (wma_is_vdev_in_ap_mode(wma_handle, vdev_id) && !chanfreq)
2750 WMA_LOGE("%s: AP oper chan is zero", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002751 } else if (pFc->subType == SIR_MAC_MGMT_ACTION) {
2752 chanfreq = channel_freq;
2753 } else {
2754 chanfreq = 0;
2755 }
2756 if (pMac->fEnableDebugLog & 0x1) {
2757 if ((pFc->type == SIR_MAC_MGMT_FRAME) &&
2758 (pFc->subType != SIR_MAC_MGMT_PROBE_REQ) &&
2759 (pFc->subType != SIR_MAC_MGMT_PROBE_RSP)) {
2760 WMA_LOGE("TX MGMT - Type %hu, SubType %hu seq_num[%d]",
2761 pFc->type, pFc->subType,
2762 ((mHdr->seqControl.seqNumHi << 4) |
2763 mHdr->seqControl.seqNumLo));
2764 }
2765 }
2766
2767 if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
2768 WMI_SERVICE_MGMT_TX_WMI)) {
Govind Singh09c3b492016-03-08 16:05:14 +05302769 mgmt_param.tx_frame = tx_frame;
2770 mgmt_param.frm_len = frmLen;
2771 mgmt_param.vdev_id = vdev_id;
Houston Hoffman5649db92016-06-14 14:44:48 -07002772 mgmt_param.pdata = pData;
Govind Singhfe9ab252016-06-21 14:35:35 +05302773 mgmt_param.chanfreq = chanfreq;
Govind Singh09c3b492016-03-08 16:05:14 +05302774 mgmt_param.qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +05302775
2776 psoc = wma_handle->psoc;
2777 if (!psoc) {
2778 WMA_LOGE("%s: psoc ctx is NULL", __func__);
2779 goto error;
2780 }
2781
2782 wh = (struct ieee80211_frame *)(qdf_nbuf_data(tx_frame));
2783 mac_addr = wh->i_addr1;
Selvaraj, Sridhar3a1823f2017-02-01 17:32:21 +05302784 peer = wlan_objmgr_get_peer(psoc, mac_addr, WLAN_MGMT_NB_ID);
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +05302785 if (!peer) {
2786 mac_addr = wh->i_addr2;
Selvaraj, Sridhar3a1823f2017-02-01 17:32:21 +05302787 peer = wlan_objmgr_get_peer(psoc, mac_addr,
2788 WLAN_MGMT_NB_ID);
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +05302789 }
2790
2791 status = wlan_mgmt_txrx_mgmt_frame_tx(peer,
2792 (tpAniSirGlobal)wma_handle->mac_context,
2793 (qdf_nbuf_t)tx_frame,
Himanshu Agarwal8b472bc2017-01-20 20:49:41 +05302794 NULL, tx_frm_ota_comp_cb,
Himanshu Agarwal2fdf77a2016-12-29 11:41:00 +05302795 WLAN_UMAC_COMP_MLME, &mgmt_param);
2796 if (status != QDF_STATUS_SUCCESS) {
2797 WMA_LOGE("%s: mgmt tx failed", __func__);
2798 goto error;
Sandeep Puligillaeca12f22016-04-28 11:40:26 -07002799 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002800 } else {
2801 /* Hand over the Tx Mgmt frame to TxRx */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08002802 status = cdp_mgmt_send_ext(soc,
2803 txrx_vdev, tx_frame,
2804 tx_frm_index, use_6mbps, chanfreq);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002805 }
2806
2807 /*
2808 * Failed to send Tx Mgmt Frame
2809 */
2810 if (status) {
2811 /* Call Download Cb so that umac can free the buffer */
2812 if (tx_frm_download_comp_cb)
2813 tx_frm_download_comp_cb(wma_handle->mac_context,
2814 tx_frame,
2815 WMA_TX_FRAME_BUFFER_FREE);
2816 WMA_LOGP("%s: Failed to send Mgmt Frame", __func__);
2817 goto error;
2818 }
2819
2820 if (!tx_frm_download_comp_cb)
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302821 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002822
2823 /*
2824 * Wait for Download Complete
2825 * if required
2826 */
2827 if (downld_comp_required) {
2828 /*
2829 * Wait for Download Complete
2830 * @ Integrated : Dxe Complete
2831 * @ Discrete : Target Download Complete
2832 */
Anurag Chouhance0dc992016-02-16 18:18:03 +05302833 qdf_status =
2834 qdf_wait_single_event(&wma_handle->
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002835 tx_frm_download_comp_event,
2836 WMA_TX_FRAME_COMPLETE_TIMEOUT);
2837
Anurag Chouhance0dc992016-02-16 18:18:03 +05302838 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002839 WMA_LOGP("Wait Event failed txfrm_comp_event");
2840 /*
2841 * @Integrated: Something Wrong with Dxe
2842 * TODO: Some Debug Code
2843 * Here We need to trigger SSR since
2844 * since system went into a bad state where
2845 * we didn't get Download Complete for almost
2846 * WMA_TX_FRAME_COMPLETE_TIMEOUT (1 sec)
2847 */
Poddar, Siddarth5a91f5b2016-04-28 12:24:10 +05302848 /* display scheduler stats */
Leo Chang96464902016-10-28 11:10:54 -07002849 cdp_display_stats(soc, WLAN_SCHEDULER_STATS);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002850 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002851 }
2852
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302853 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002854
2855error:
2856 wma_handle->tx_frm_download_comp_cb = NULL;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302857 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002858}
2859
2860/**
2861 * wma_ds_peek_rx_packet_info() - peek rx packet info
2862 * @pkt: packet
2863 * @pkt_meta: packet meta
2864 * @bSwap: byte swap
2865 *
2866 * Function fills the rx packet meta info from the the cds packet
2867 *
Anurag Chouhanf04e84f2016-03-03 10:12:12 +05302868 * Return: QDF status
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002869 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302870QDF_STATUS wma_ds_peek_rx_packet_info(cds_pkt_t *pkt, void **pkt_meta,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002871 bool bSwap)
2872{
2873 /* Sanity Check */
2874 if (pkt == NULL) {
2875 WMA_LOGE("wma:Invalid parameter sent on wma_peek_rx_pkt_info");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302876 return QDF_STATUS_E_FAULT;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002877 }
2878
2879 *pkt_meta = &(pkt->pkt_meta);
2880
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302881 return QDF_STATUS_SUCCESS;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002882}
2883
2884/**
2885 * ol_rx_err() - ol rx err handler
2886 * @pdev: ol pdev
2887 * @vdev_id: vdev id
2888 * @peer_mac_addr: peer mac address
2889 * @tid: TID
2890 * @tsf32: TSF
2891 * @err_type: error type
2892 * @rx_frame: rx frame
2893 * @pn: PN Number
2894 * @key_id: key id
2895 *
2896 * This function handles rx error and send MIC error failure to LIM
2897 *
2898 * Return: none
2899 */
Jeff Johnsonbd6ebd22017-01-17 13:46:38 -08002900/*
2901 * Local prototype added to temporarily address warning caused by
2902 * -Wmissing-prototypes. A more correct solution will come later
2903 * as a solution to IR-196435 at whihc point this prototype will
2904 * be removed.
2905 */
2906void ol_rx_err(void *pdev, uint8_t vdev_id,
2907 uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
2908 enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
2909 uint64_t *pn, uint8_t key_id);
Leo Chang96464902016-10-28 11:10:54 -07002910void ol_rx_err(void *pdev, uint8_t vdev_id,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002911 uint8_t *peer_mac_addr, int tid, uint32_t tsf32,
Nirav Shahcbc6d722016-03-01 16:24:53 +05302912 enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002913 uint64_t *pn, uint8_t key_id)
2914{
Anurag Chouhan6d760662016-02-20 16:05:43 +05302915 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002916 tpSirSmeMicFailureInd mic_err_ind;
2917 struct ether_header *eth_hdr;
Rajeev Kumarb60abe42017-01-21 15:39:31 -08002918 struct scheduler_msg cds_msg;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002919
2920 if (NULL == wma) {
2921 WMA_LOGE("%s: Failed to get wma", __func__);
2922 return;
2923 }
2924
2925 if (err_type != OL_RX_ERR_TKIP_MIC)
2926 return;
2927
Nirav Shahcbc6d722016-03-01 16:24:53 +05302928 if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002929 return;
Nirav Shahcbc6d722016-03-01 16:24:53 +05302930 eth_hdr = (struct ether_header *)qdf_nbuf_data(rx_frame);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302931 mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002932 if (!mic_err_ind) {
2933 WMA_LOGE("%s: Failed to allocate memory for MIC indication message",
2934 __func__);
2935 return;
2936 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002937
2938 mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND;
2939 mic_err_ind->length = sizeof(*mic_err_ind);
2940 mic_err_ind->sessionId = vdev_id;
Anurag Chouhanc5548422016-02-24 18:33:27 +05302941 qdf_copy_macaddr(&mic_err_ind->bssId,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302942 (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302943 qdf_mem_copy(mic_err_ind->info.taMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302944 (struct qdf_mac_addr *) peer_mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002945 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302946 qdf_mem_copy(mic_err_ind->info.srcMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302947 (struct qdf_mac_addr *) eth_hdr->ether_shost,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002948 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302949 qdf_mem_copy(mic_err_ind->info.dstMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05302950 (struct qdf_mac_addr *) eth_hdr->ether_dhost,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002951 sizeof(tSirMacAddr));
2952 mic_err_ind->info.keyId = key_id;
2953 mic_err_ind->info.multicast =
2954 IEEE80211_IS_MULTICAST(eth_hdr->ether_dhost);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302955 qdf_mem_copy(mic_err_ind->info.TSC, pn, SIR_CIPHER_SEQ_CTR_SIZE);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002956
Rajeev Kumarb60abe42017-01-21 15:39:31 -08002957 qdf_mem_set(&cds_msg, sizeof(struct scheduler_msg), 0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002958 cds_msg.type = eWNI_SME_MIC_FAILURE_IND;
2959 cds_msg.bodyptr = (void *) mic_err_ind;
2960
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05302961 if (QDF_STATUS_SUCCESS !=
Rajeev Kumarb60abe42017-01-21 15:39:31 -08002962 scheduler_post_msg(QDF_MODULE_ID_SME,
Rajeev Kumar156188e2017-01-21 17:23:52 -08002963 &cds_msg)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002964 WMA_LOGE("%s: could not post mic failure indication to SME",
2965 __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05302966 qdf_mem_free((void *)mic_err_ind);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002967 }
2968}
2969
2970/**
2971 * wma_tx_abort() - abort tx
2972 * @vdev_id: vdev id
2973 *
2974 * In case of deauth host abort transmitting packet.
2975 *
2976 * Return: none
2977 */
2978void wma_tx_abort(uint8_t vdev_id)
2979{
2980#define PEER_ALL_TID_BITMASK 0xffffffff
2981 tp_wma_handle wma;
2982 uint32_t peer_tid_bitmap = PEER_ALL_TID_BITMASK;
2983 struct wma_txrx_node *iface;
Govind Singhd76a5b02016-03-08 15:12:14 +05302984 struct peer_flush_params param = {0};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002985
Anurag Chouhan6d760662016-02-20 16:05:43 +05302986 wma = cds_get_context(QDF_MODULE_ID_WMA);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002987 if (NULL == wma) {
2988 WMA_LOGE("%s: wma is NULL", __func__);
2989 return;
2990 }
2991
2992 iface = &wma->interfaces[vdev_id];
2993 if (!iface->handle) {
2994 WMA_LOGE("%s: Failed to get iface handle: %p",
2995 __func__, iface->handle);
2996 return;
2997 }
yeshwanth sriram guntukae83d8ff2017-02-07 12:18:18 +05302998 WMA_LOGI("%s: vdevid %d bssid %pM", __func__, vdev_id, iface->bssid);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002999 iface->pause_bitmap |= (1 << PAUSE_TYPE_HOST);
Leo Chang96464902016-10-28 11:10:54 -07003000 cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC),
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -08003001 iface->handle,
3002 OL_TXQ_PAUSE_REASON_TX_ABORT);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003003
3004 /* Flush all TIDs except MGMT TID for this peer in Target */
3005 peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID);
Govind Singhd76a5b02016-03-08 15:12:14 +05303006 param.peer_tid_bitmap = peer_tid_bitmap;
3007 param.vdev_id = vdev_id;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003008 wmi_unified_peer_flush_tids_send(wma->wmi_handle, iface->bssid,
Govind Singhd76a5b02016-03-08 15:12:14 +05303009 &param);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003010}
3011
3012#if defined(FEATURE_LRO)
3013/**
3014 * wma_lro_config_cmd() - process the LRO config command
3015 * @wma: Pointer to WMA handle
3016 * @wma_lro_cmd: Pointer to LRO configuration parameters
3017 *
3018 * This function sends down the LRO configuration parameters to
3019 * the firmware to enable LRO, sets the TCP flags and sets the
3020 * seed values for the toeplitz hash generation
3021 *
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303022 * Return: QDF_STATUS_SUCCESS for success otherwise failure
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003023 */
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303024QDF_STATUS wma_lro_config_cmd(tp_wma_handle wma_handle,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003025 struct wma_lro_config_cmd_t *wma_lro_cmd)
3026{
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05303027 struct wmi_lro_config_cmd_t wmi_lro_cmd = {0};
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003028
3029 if (NULL == wma_handle || NULL == wma_lro_cmd) {
3030 WMA_LOGE("wma_lro_config_cmd': invalid input!");
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303031 return QDF_STATUS_E_FAILURE;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003032 }
3033
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05303034 wmi_lro_cmd.lro_enable = wma_lro_cmd->lro_enable;
3035 wmi_lro_cmd.tcp_flag = wma_lro_cmd->tcp_flag;
3036 wmi_lro_cmd.tcp_flag_mask = wma_lro_cmd->tcp_flag_mask;
3037 qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv4,
3038 wma_lro_cmd->toeplitz_hash_ipv4,
3039 LRO_IPV4_SEED_ARR_SZ * sizeof(uint32_t));
3040 qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv6,
3041 wma_lro_cmd->toeplitz_hash_ipv6,
3042 LRO_IPV6_SEED_ARR_SZ * sizeof(uint32_t));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003043
Himanshu Agarwal17dea6e2016-03-09 12:11:22 +05303044 return wmi_unified_lro_config_cmd(wma_handle->wmi_handle,
3045 &wmi_lro_cmd);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003046}
3047#endif
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003048
3049/**
3050 * wma_indicate_err() - indicate an error to the protocol stack
3051 * @err_type: error type
3052 * @err_info: information associated with the error
3053 *
3054 * This function indicates an error encountered in the data path
3055 * to the protocol stack
3056 *
3057 * Return: none
3058 */
3059void
3060wma_indicate_err(
3061 enum ol_rx_err_type err_type,
3062 struct ol_error_info *err_info)
3063{
3064 switch (err_type) {
3065 case OL_RX_ERR_TKIP_MIC:
3066 {
Anurag Chouhan6d760662016-02-20 16:05:43 +05303067 tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003068 tpSirSmeMicFailureInd mic_err_ind;
Rajeev Kumarb60abe42017-01-21 15:39:31 -08003069 struct scheduler_msg cds_msg;
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003070 uint8_t vdev_id;
3071
3072 if (NULL == wma) {
3073 WMA_LOGE("%s: Failed to get wma context",
3074 __func__);
3075 return;
3076 }
3077
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303078 mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind));
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003079 if (!mic_err_ind) {
3080 WMA_LOGE("%s: MIC indication mem alloc failed",
3081 __func__);
3082 return;
3083 }
3084
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303085 qdf_mem_set((void *) mic_err_ind, 0,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003086 sizeof(*mic_err_ind));
3087 mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND;
3088 mic_err_ind->length = sizeof(*mic_err_ind);
3089 vdev_id = err_info->u.mic_err.vdev_id;
Anurag Chouhanc5548422016-02-24 18:33:27 +05303090 qdf_copy_macaddr(&mic_err_ind->bssId,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303091 (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003092 WMA_LOGE("MIC error: BSSID:%02x:%02x:%02x:%02x:%02x:%02x\n",
3093 mic_err_ind->bssId.bytes[0], mic_err_ind->bssId.bytes[1],
3094 mic_err_ind->bssId.bytes[2], mic_err_ind->bssId.bytes[3],
3095 mic_err_ind->bssId.bytes[4], mic_err_ind->bssId.bytes[5]);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303096 qdf_mem_copy(mic_err_ind->info.taMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303097 (struct qdf_mac_addr *) err_info->u.mic_err.ta,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003098 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303099 qdf_mem_copy(mic_err_ind->info.srcMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303100 (struct qdf_mac_addr *) err_info->u.mic_err.sa,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003101 sizeof(tSirMacAddr));
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303102 qdf_mem_copy(mic_err_ind->info.dstMacAddr,
Anurag Chouhan6d760662016-02-20 16:05:43 +05303103 (struct qdf_mac_addr *) err_info->u.mic_err.da,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003104 sizeof(tSirMacAddr));
3105 mic_err_ind->info.keyId = err_info->u.mic_err.key_id;
3106 mic_err_ind->info.multicast =
3107 IEEE80211_IS_MULTICAST(err_info->u.mic_err.da);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303108 qdf_mem_copy(mic_err_ind->info.TSC,
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003109 (void *)&err_info->
3110 u.mic_err.pn, SIR_CIPHER_SEQ_CTR_SIZE);
3111
Rajeev Kumarb60abe42017-01-21 15:39:31 -08003112 qdf_mem_set(&cds_msg, sizeof(struct scheduler_msg), 0);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003113 cds_msg.type = eWNI_SME_MIC_FAILURE_IND;
3114 cds_msg.bodyptr = (void *) mic_err_ind;
Anurag Chouhanfb54ab02016-02-18 18:00:46 +05303115 if (QDF_STATUS_SUCCESS !=
Rajeev Kumarb60abe42017-01-21 15:39:31 -08003116 scheduler_post_msg(QDF_MODULE_ID_SME,
Rajeev Kumar156188e2017-01-21 17:23:52 -08003117 &cds_msg)) {
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003118 WMA_LOGE("%s: mic failure ind post to SME failed",
3119 __func__);
Anurag Chouhan600c3a02016-03-01 10:33:54 +05303120 qdf_mem_free((void *)mic_err_ind);
Dhanashri Atre1f0cbe42015-11-19 10:56:53 -08003121 }
3122 break;
3123 }
3124 default:
3125 {
3126 WMA_LOGE("%s: unhandled ol error type %d", __func__, err_type);
3127 break;
3128 }
3129 }
3130}