blob: 1411a3511e4e355d291273039ae05414c8d04f3c [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wma_utis.c
30 * This file contains utilities and stats related functions.
31 */
32
33/* Header files */
34
35#include "wma.h"
36#include "wma_api.h"
37#include "cds_api.h"
38#include "wmi_unified_api.h"
39#include "wlan_qct_sys.h"
40#include "wni_api.h"
41#include "ani_global.h"
42#include "wmi_unified.h"
43#include "wni_cfg.h"
44#include "cfg_api.h"
45#include "ol_txrx_ctrl_api.h"
46#include "wlan_tgt_def_config.h"
47
48#include "cdf_nbuf.h"
49#include "cdf_types.h"
50#include "ol_txrx_api.h"
51#include "cdf_memory.h"
52#include "ol_txrx_types.h"
53#include "ol_txrx_peer_find.h"
54
55#include "wma_types.h"
56#include "lim_api.h"
57#include "lim_session_utils.h"
58
59#include "cds_utils.h"
60
61#if !defined(REMOVE_PKT_LOG)
62#include "pktlog_ac.h"
63#endif /* REMOVE_PKT_LOG */
64
65#include "dbglog_host.h"
66#include "csr_api.h"
67#include "ol_fw.h"
68
69#include "dfs.h"
70#include "wma_internal.h"
71
72/* MCS Based rate table */
73/* HT MCS parameters with Nss = 1 */
74static struct index_data_rate_type supported_mcs_rate_nss1[] = {
75 /* MCS L20 L40 S20 S40 */
76 {0, {65, 135, 72, 150} },
77 {1, {130, 270, 144, 300} },
78 {2, {195, 405, 217, 450} },
79 {3, {260, 540, 289, 600} },
80 {4, {390, 810, 433, 900} },
81 {5, {520, 1080, 578, 1200} },
82 {6, {585, 1215, 650, 1350} },
83 {7, {650, 1350, 722, 1500} }
84};
85
86/* HT MCS parameters with Nss = 2 */
87static struct index_data_rate_type supported_mcs_rate_nss2[] = {
88 /* MCS L20 L40 S20 S40 */
89 {0, {130, 270, 144, 300} },
90 {1, {260, 540, 289, 600} },
91 {2, {390, 810, 433, 900} },
92 {3, {520, 1080, 578, 1200} },
93 {4, {780, 1620, 867, 1800} },
94 {5, {1040, 2160, 1156, 2400} },
95 {6, {1170, 2430, 1300, 2700} },
96 {7, {1300, 2700, 1444, 3000} }
97};
98
99#ifdef WLAN_FEATURE_11AC
100/* MCS Based VHT rate table */
101/* MCS parameters with Nss = 1*/
102static struct index_vht_data_rate_type supported_vht_mcs_rate_nss1[] = {
103 /* MCS L80 S80 L40 S40 L20 S40 */
104 {0, {293, 325}, {135, 150}, {65, 72} },
105 {1, {585, 650}, {270, 300}, {130, 144} },
106 {2, {878, 975}, {405, 450}, {195, 217} },
107 {3, {1170, 1300}, {540, 600}, {260, 289} },
108 {4, {1755, 1950}, {810, 900}, {390, 433} },
109 {5, {2340, 2600}, {1080, 1200}, {520, 578} },
110 {6, {2633, 2925}, {1215, 1350}, {585, 650} },
111 {7, {2925, 3250}, {1350, 1500}, {650, 722} },
112 {8, {3510, 3900}, {1620, 1800}, {780, 867} },
113 {9, {3900, 4333}, {1800, 2000}, {780, 867} }
114};
115
116/*MCS parameters with Nss = 2*/
117static struct index_vht_data_rate_type supported_vht_mcs_rate_nss2[] = {
118 /* MCS L80 S80 L40 S40 L20 S40 */
119 {0, {585, 650}, {270, 300}, {130, 144} },
120 {1, {1170, 1300}, {540, 600}, {260, 289} },
121 {2, {1755, 1950}, {810, 900}, {390, 433} },
122 {3, {2340, 2600}, {1080, 1200}, {520, 578} },
123 {4, {3510, 3900}, {1620, 1800}, {780, 867} },
124 {5, {4680, 5200}, {2160, 2400}, {1040, 1156} },
125 {6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
126 {7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
127 {8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
128 {9, {7800, 8667}, {3600, 4000}, {1560, 1733} }
129};
130#endif /* WLAN_FEATURE_11AC */
131
132#ifdef BIG_ENDIAN_HOST
133
134/* ############# function definitions ############ */
135
136/**
137 * wma_swap_bytes() - swap bytes
138 * @pv: buffer
139 * @n: swap bytes
140 *
141 * Return: none
142 */
143void wma_swap_bytes(void *pv, uint32_t n)
144{
145 int32_t no_words;
146 int32_t i;
147 uint32_t *word_ptr;
148
149 no_words = n / sizeof(uint32_t);
150 word_ptr = (uint32_t *) pv;
151 for (i = 0; i < no_words; i++) {
152 *(word_ptr + i) = __cpu_to_le32(*(word_ptr + i));
153 }
154}
155
156#define SWAPME(x, len) wma_swap_bytes(&x, len);
157#endif /* BIG_ENDIAN_HOST */
158
159/**
160 * wma_get_mcs_idx() - get mcs index
161 * @maxRate: max rate
162 * @rate_flags: rate flags
163 * @nss: number of nss
164 * @mcsRateFlag: mcs rate flag
165 *
166 * Return: return mcs index
167 */
168static uint8_t wma_get_mcs_idx(uint16_t maxRate, uint8_t rate_flags,
169 uint8_t nss, uint8_t *mcsRateFlag)
170{
171 uint8_t rateFlag = 0, curIdx = 0;
172 uint16_t curRate;
173 bool found = false;
174#ifdef WLAN_FEATURE_11AC
175 struct index_vht_data_rate_type *supported_vht_mcs_rate;
176#endif /* WLAN_FEATURE_11AC */
177 struct index_data_rate_type *supported_mcs_rate;
178
179 WMA_LOGD("%s rate:%d rate_flgs:%d", __func__, maxRate, rate_flags);
180#ifdef WLAN_FEATURE_11AC
181 supported_vht_mcs_rate = (struct index_vht_data_rate_type *)
182 ((nss == 1) ? &supported_vht_mcs_rate_nss1 :
183 &supported_vht_mcs_rate_nss2);
184#endif /* WLAN_FEATURE_11AC */
185 supported_mcs_rate = (struct index_data_rate_type *)
186 ((nss == 1) ? &supported_mcs_rate_nss1 : &supported_mcs_rate_nss2);
187
188 *mcsRateFlag = rate_flags;
189 *mcsRateFlag &= ~eHAL_TX_RATE_SGI;
190#ifdef WLAN_FEATURE_11AC
191 if (rate_flags &
192 (eHAL_TX_RATE_VHT20 | eHAL_TX_RATE_VHT40 | eHAL_TX_RATE_VHT80)) {
193
194 if (rate_flags & eHAL_TX_RATE_VHT80) {
195 for (curIdx = 0; curIdx < MAX_VHT_MCS_IDX; curIdx++) {
196 rateFlag = 0;
197 if (curIdx >= 7) {
198 if (rate_flags & eHAL_TX_RATE_SGI)
199 rateFlag |= 0x1;
200 }
201
202 curRate = supported_vht_mcs_rate[curIdx].supported_VHT80_rate[rateFlag];
203 if (curRate == maxRate) {
204 found = true;
205 break;
206 }
207 }
208 }
209
210 if ((found == false) &&
211 ((rate_flags & eHAL_TX_RATE_VHT80) ||
212 (rate_flags & eHAL_TX_RATE_VHT40))) {
213 for (curIdx = 0; curIdx < MAX_VHT_MCS_IDX; curIdx++) {
214 rateFlag = 0;
215 if (curIdx >= 7) {
216 if (rate_flags & eHAL_TX_RATE_SGI)
217 rateFlag |= 0x1;
218 }
219
220 curRate = supported_vht_mcs_rate[curIdx].supported_VHT40_rate[rateFlag];
221 if (curRate == maxRate) {
222 found = true;
223 *mcsRateFlag &= ~eHAL_TX_RATE_VHT80;
224 break;
225 }
226 }
227 }
228
229 if ((found == false) &&
230 ((rate_flags & eHAL_TX_RATE_VHT80) ||
231 (rate_flags & eHAL_TX_RATE_VHT40) ||
232 (rate_flags & eHAL_TX_RATE_VHT20))) {
233 for (curIdx = 0; curIdx < MAX_VHT_MCS_IDX; curIdx++) {
234 rateFlag = 0;
235 if (curIdx >= 7) {
236 if (rate_flags & eHAL_TX_RATE_SGI)
237 rateFlag |= 0x1;
238 }
239
240 curRate = supported_vht_mcs_rate[curIdx].supported_VHT20_rate[rateFlag];
241 if (curRate == maxRate) {
242 found = true;
243 *mcsRateFlag &=
244 ~(eHAL_TX_RATE_VHT80 |
245 eHAL_TX_RATE_VHT40);
246 break;
247 }
248 }
249 }
250 }
251#endif /* WLAN_FEATURE_11AC */
252 if ((found == false) &&
253 (rate_flags & (eHAL_TX_RATE_HT40 | eHAL_TX_RATE_HT20))) {
254 if (rate_flags & eHAL_TX_RATE_HT40) {
255 rateFlag = 0x1;
256
257 for (curIdx = 0; curIdx < MAX_HT_MCS_IDX; curIdx++) {
258 if (curIdx == 7) {
259 if (rate_flags & eHAL_TX_RATE_SGI)
260 rateFlag |= 0x2;
261 }
262
263 curRate = supported_mcs_rate[curIdx].supported_rate[rateFlag];
264 if (curRate == maxRate) {
265 found = true;
266 *mcsRateFlag = eHAL_TX_RATE_HT40;
267 break;
268 }
269 }
270 }
271
272 if (found == false) {
273 rateFlag = 0;
274 for (curIdx = 0; curIdx < MAX_HT_MCS_IDX; curIdx++) {
275 if (curIdx == 7) {
276 if (rate_flags & eHAL_TX_RATE_SGI)
277 rateFlag |= 0x2;
278 }
279
280 curRate = supported_mcs_rate[curIdx].supported_rate[rateFlag];
281 if (curRate == maxRate) {
282 found = true;
283 *mcsRateFlag = eHAL_TX_RATE_HT20;
284 break;
285 }
286 }
287 }
288 }
289
290 /*SGI rates are used by firmware only for MCS >= 7 */
291 if (found && (curIdx >= 7))
292 *mcsRateFlag |= eHAL_TX_RATE_SGI;
293
294 return found ? curIdx : INVALID_MCS_IDX;
295}
296
297/**
298 * host_map_smps_mode() - map fw smps mode to tSmpsModeValue
299 * @fw_smps_mode: fw smps mode
300 *
301 * Return: return tSmpsModeValue
302 */
303tSmpsModeValue host_map_smps_mode(A_UINT32 fw_smps_mode)
304{
305 tSmpsModeValue smps_mode = SMPS_MODE_DISABLED;
306 switch (fw_smps_mode) {
307 case WMI_SMPS_FORCED_MODE_STATIC:
308 smps_mode = STATIC_SMPS_MODE;
309 break;
310 case WMI_SMPS_FORCED_MODE_DYNAMIC:
311 smps_mode = DYNAMIC_SMPS_MODE;
312 break;
313 default:
314 smps_mode = SMPS_MODE_DISABLED;
315 }
316
317 return smps_mode;
318}
319
320#ifdef WLAN_FEATURE_STATS_EXT
321/**
322 * wma_stats_ext_event_handler() - extended stats event handler
323 * @handle: wma handle
324 * @event_buf: event buffer received from fw
325 * @len: length of data
326 *
327 * Return: 0 for success or error code
328 */
329int wma_stats_ext_event_handler(void *handle, uint8_t *event_buf,
330 uint32_t len)
331{
332 WMI_STATS_EXT_EVENTID_param_tlvs *param_buf;
333 tSirStatsExtEvent *stats_ext_event;
334 wmi_stats_ext_event_fixed_param *stats_ext_info;
335 CDF_STATUS status;
336 cds_msg_t cds_msg;
337 uint8_t *buf_ptr;
338 uint32_t alloc_len;
339
340 WMA_LOGD("%s: Posting stats ext event to SME", __func__);
341
342 param_buf = (WMI_STATS_EXT_EVENTID_param_tlvs *) event_buf;
343 if (!param_buf) {
344 WMA_LOGE("%s: Invalid stats ext event buf", __func__);
345 return -EINVAL;
346 }
347
348 stats_ext_info = param_buf->fixed_param;
349 buf_ptr = (uint8_t *) stats_ext_info;
350
351 alloc_len = sizeof(tSirStatsExtEvent);
352 alloc_len += stats_ext_info->data_len;
353
354 stats_ext_event = (tSirStatsExtEvent *) cdf_mem_malloc(alloc_len);
355 if (NULL == stats_ext_event) {
356 WMA_LOGE("%s: Memory allocation failure", __func__);
357 return -ENOMEM;
358 }
359
360 buf_ptr += sizeof(wmi_stats_ext_event_fixed_param) + WMI_TLV_HDR_SIZE;
361
362 stats_ext_event->vdev_id = stats_ext_info->vdev_id;
363 stats_ext_event->event_data_len = stats_ext_info->data_len;
364 cdf_mem_copy(stats_ext_event->event_data,
365 buf_ptr, stats_ext_event->event_data_len);
366
367 cds_msg.type = eWNI_SME_STATS_EXT_EVENT;
368 cds_msg.bodyptr = (void *)stats_ext_event;
369 cds_msg.bodyval = 0;
370
371 status = cds_mq_post_message(CDS_MQ_ID_SME, &cds_msg);
372 if (status != CDF_STATUS_SUCCESS) {
373 WMA_LOGE("%s: Failed to post stats ext event to SME", __func__);
374 cdf_mem_free(stats_ext_event);
375 return -EFAULT;
376 }
377
378 WMA_LOGD("%s: stats ext event Posted to SME", __func__);
379 return 0;
380}
381#endif /* WLAN_FEATURE_STATS_EXT */
382
383
384#ifdef WLAN_FEATURE_LINK_LAYER_STATS
385
386/**
387 * wma_unified_link_peer_stats_event_handler() - peer stats event handler
388 * @handle: wma handle
389 * @cmd_param_info: data received with event from fw
390 * @len: length of data
391 *
392 * Return: 0 for success or error code
393 */
394static int wma_unified_link_peer_stats_event_handler(void *handle,
395 uint8_t *cmd_param_info,
396 uint32_t len)
397{
398 WMI_PEER_LINK_STATS_EVENTID_param_tlvs *param_tlvs;
399 wmi_peer_stats_event_fixed_param *fixed_param;
400 wmi_peer_link_stats *peer_stats, *temp_peer_stats;
401 wmi_rate_stats *rate_stats;
402 tSirLLStatsResults *link_stats_results;
403 uint8_t *results, *t_peer_stats, *t_rate_stats;
404 uint32_t count, num_rates = 0;
405 uint32_t next_res_offset, next_peer_offset, next_rate_offset;
406 size_t peer_info_size, peer_stats_size, rate_stats_size;
407 size_t link_stats_results_size;
408
409 tpAniSirGlobal pMac = cds_get_context(CDF_MODULE_ID_PE);
410
411 if (!pMac) {
412 WMA_LOGD("%s: NULL pMac ptr. Exiting", __func__);
413 return -EINVAL;
414 }
415
416 if (!pMac->sme.pLinkLayerStatsIndCallback) {
417 WMA_LOGD("%s: HDD callback is null", __func__);
418 return -EINVAL;
419 }
420
421 WMA_LOGD("%s: Posting Peer Stats event to HDD", __func__);
422 param_tlvs = (WMI_PEER_LINK_STATS_EVENTID_param_tlvs *) cmd_param_info;
423 if (!param_tlvs) {
424 WMA_LOGA("%s: Invalid stats event", __func__);
425 return -EINVAL;
426 }
427 /*
428 * cmd_param_info contains
429 * wmi_peer_stats_event_fixed_param fixed_param;
430 * num_peers * size of(struct wmi_peer_link_stats)
431 * num_rates * size of(struct wmi_rate_stats)
432 * num_rates is the sum of the rates of all the peers.
433 */
434 fixed_param = param_tlvs->fixed_param;
435 peer_stats = param_tlvs->peer_stats;
436 rate_stats = param_tlvs->peer_rate_stats;
437
438 if (!fixed_param || !peer_stats ||
439 (peer_stats->num_rates && !rate_stats)) {
440 WMA_LOGA("%s: Invalid param_tlvs for Peer Stats", __func__);
441 return -EINVAL;
442 }
443
444 /*
445 * num_rates - sum of the rates of all the peers
446 */
447 temp_peer_stats = (wmi_peer_link_stats *) peer_stats;
448 for (count = 0; count < fixed_param->num_peers; count++) {
449 num_rates += temp_peer_stats->num_rates;
450 temp_peer_stats++;
451 }
452
453 peer_stats_size = sizeof(tSirWifiPeerStat);
454 peer_info_size = sizeof(tSirWifiPeerInfo);
455 rate_stats_size = sizeof(tSirWifiRateStat);
456 link_stats_results_size =
457 sizeof(*link_stats_results) + peer_stats_size +
458 (fixed_param->num_peers * peer_info_size) +
459 (num_rates * rate_stats_size);
460
461 link_stats_results = cdf_mem_malloc(link_stats_results_size);
462 if (NULL == link_stats_results) {
463 WMA_LOGD("%s: could not allocate mem for stats results-len %zu",
464 __func__, link_stats_results_size);
465 return -ENOMEM;
466 }
467
468 WMA_LOGD("Peer stats from FW event buf");
469 WMA_LOGD("Fixed Param:");
470 WMA_LOGD("request_id %u num_peers %u peer_event_number %u more_data %u",
471 fixed_param->request_id, fixed_param->num_peers,
472 fixed_param->peer_event_number, fixed_param->more_data);
473
474 cdf_mem_zero(link_stats_results, link_stats_results_size);
475
476 link_stats_results->paramId = WMI_LINK_STATS_ALL_PEER;
477 link_stats_results->rspId = fixed_param->request_id;
478 link_stats_results->ifaceId = 0;
479 link_stats_results->num_peers = fixed_param->num_peers;
480 link_stats_results->peer_event_number = fixed_param->peer_event_number;
481 link_stats_results->moreResultToFollow = fixed_param->more_data;
482
483 cdf_mem_copy(link_stats_results->results,
484 &fixed_param->num_peers, peer_stats_size);
485
486 results = (uint8_t *) link_stats_results->results;
487 t_peer_stats = (uint8_t *) peer_stats;
488 t_rate_stats = (uint8_t *) rate_stats;
489 next_res_offset = peer_stats_size;
490 next_peer_offset = WMI_TLV_HDR_SIZE;
491 next_rate_offset = WMI_TLV_HDR_SIZE;
492 for (count = 0; count < fixed_param->num_peers; count++) {
493 WMA_LOGD("Peer Info:");
494 WMA_LOGD("peer_type %u capabilities %u num_rates %u",
495 peer_stats->peer_type, peer_stats->capabilities,
496 peer_stats->num_rates);
497
498 cdf_mem_copy(results + next_res_offset,
499 t_peer_stats + next_peer_offset, peer_info_size);
500 next_res_offset += peer_info_size;
501
502 /* Copy rate stats associated with this peer */
503 for (count = 0; count < peer_stats->num_rates; count++) {
504 WMA_LOGD("Rate Stats Info:");
505 WMA_LOGD("rate %u bitrate %u tx_mpdu %u rx_mpdu %u "
506 "mpdu_lost %u retries %u retries_short %u "
507 "retries_long %u", rate_stats->rate,
508 rate_stats->bitrate, rate_stats->tx_mpdu,
509 rate_stats->rx_mpdu, rate_stats->mpdu_lost,
510 rate_stats->retries, rate_stats->retries_short,
511 rate_stats->retries_long);
512 rate_stats++;
513
514 cdf_mem_copy(results + next_res_offset,
515 t_rate_stats + next_rate_offset,
516 rate_stats_size);
517 next_res_offset += rate_stats_size;
518 next_rate_offset += sizeof(*rate_stats);
519 }
520 next_peer_offset += sizeof(*peer_stats);
521 peer_stats++;
522 }
523
524 /* call hdd callback with Link Layer Statistics
525 * vdev_id/ifacId in link_stats_results will be
526 * used to retrieve the correct HDD context
527 */
528 pMac->sme.pLinkLayerStatsIndCallback(pMac->hHdd,
529 WMA_LINK_LAYER_STATS_RESULTS_RSP,
530 link_stats_results);
531 WMA_LOGD("%s: Peer Stats event posted to HDD", __func__);
532 cdf_mem_free(link_stats_results);
533
534 return 0;
535}
536
537
538/**
539 * wma_unified_link_radio_stats_event_handler() - radio link stats event handler
540 * @handle: wma handle
541 * @cmd_param_info: data received with event from fw
542 * @len: length of data
543 *
544 * Return: 0 for success or error code
545 */
546static int wma_unified_link_radio_stats_event_handler(void *handle,
547 uint8_t *cmd_param_info,
548 uint32_t len)
549{
550 WMI_RADIO_LINK_STATS_EVENTID_param_tlvs *param_tlvs;
551 wmi_radio_link_stats_event_fixed_param *fixed_param;
552 wmi_radio_link_stats *radio_stats;
553 wmi_channel_stats *channel_stats;
554 tSirLLStatsResults *link_stats_results;
555 uint8_t *results, *t_radio_stats, *t_channel_stats;
556 uint32_t next_res_offset, next_chan_offset, count;
557 size_t radio_stats_size, chan_stats_size;
558 size_t link_stats_results_size;
559
560 tpAniSirGlobal pMac = cds_get_context(CDF_MODULE_ID_PE);
561
562 if (!pMac) {
563 WMA_LOGD("%s: NULL pMac ptr. Exiting", __func__);
564 return -EINVAL;
565 }
566
567 if (!pMac->sme.pLinkLayerStatsIndCallback) {
568 WMA_LOGD("%s: HDD callback is null", __func__);
569 return -EINVAL;
570 }
571
572 WMA_LOGD("%s: Posting Radio Stats event to HDD", __func__);
573 param_tlvs = (WMI_RADIO_LINK_STATS_EVENTID_param_tlvs *) cmd_param_info;
574 if (!param_tlvs) {
575 WMA_LOGA("%s: Invalid stats event", __func__);
576 return -EINVAL;
577 }
578
579 /*
580 * cmd_param_info contains
581 * wmi_radio_link_stats_event_fixed_param fixed_param;
582 * size of(struct wmi_radio_link_stats);
583 * num_channels * size of(struct wmi_channel_stats)
584 */
585 fixed_param = param_tlvs->fixed_param;
586 radio_stats = param_tlvs->radio_stats;
587 channel_stats = param_tlvs->channel_stats;
588
589 if (!fixed_param || !radio_stats ||
590 (radio_stats->num_channels && !channel_stats)) {
591 WMA_LOGA("%s: Invalid param_tlvs for Radio Stats", __func__);
592 return -EINVAL;
593 }
594
595 radio_stats_size = sizeof(tSirWifiRadioStat);
596 chan_stats_size = sizeof(tSirWifiChannelStats);
597 link_stats_results_size = sizeof(*link_stats_results) +
598 radio_stats_size + (radio_stats->num_channels * chan_stats_size);
599
600 link_stats_results = cdf_mem_malloc(link_stats_results_size);
601 if (NULL == link_stats_results) {
602 WMA_LOGD("%s: could not allocate mem for stats results-len %zu",
603 __func__, link_stats_results_size);
604 return -ENOMEM;
605 }
606
607 WMA_LOGD("Radio stats from FW event buf");
608 WMA_LOGD("Fixed Param:");
609 WMA_LOGD("request_id %u num_radio %u more_radio_events %u",
610 fixed_param->request_id, fixed_param->num_radio,
611 fixed_param->more_radio_events);
612
613 WMA_LOGD("Radio Info");
614 WMA_LOGD("radio_id %u on_time %u tx_time %u rx_time %u on_time_scan %u "
615 "on_time_nbd %u on_time_gscan %u on_time_roam_scan %u "
616 "on_time_pno_scan %u on_time_hs20 %u num_channels %u",
617 radio_stats->radio_id, radio_stats->on_time,
618 radio_stats->tx_time, radio_stats->rx_time,
619 radio_stats->on_time_scan, radio_stats->on_time_nbd,
620 radio_stats->on_time_gscan,
621 radio_stats->on_time_roam_scan,
622 radio_stats->on_time_pno_scan,
623 radio_stats->on_time_hs20, radio_stats->num_channels);
624
625 cdf_mem_zero(link_stats_results, link_stats_results_size);
626
627 link_stats_results->paramId = WMI_LINK_STATS_RADIO;
628 link_stats_results->rspId = fixed_param->request_id;
629 link_stats_results->ifaceId = 0;
630 link_stats_results->num_radio = fixed_param->num_radio;
631 link_stats_results->peer_event_number = 0;
632 link_stats_results->moreResultToFollow = fixed_param->more_radio_events;
633
634 results = (uint8_t *) link_stats_results->results;
635 t_radio_stats = (uint8_t *) radio_stats;
636 t_channel_stats = (uint8_t *) channel_stats;
637
638 cdf_mem_copy(results, t_radio_stats + WMI_TLV_HDR_SIZE,
639 radio_stats_size);
640
641 next_res_offset = radio_stats_size;
642 next_chan_offset = WMI_TLV_HDR_SIZE;
643 WMA_LOGD("Channel Stats Info");
644 for (count = 0; count < radio_stats->num_channels; count++) {
645 WMA_LOGD("channel_width %u center_freq %u center_freq0 %u "
646 "center_freq1 %u radio_awake_time %u cca_busy_time %u",
647 channel_stats->channel_width,
648 channel_stats->center_freq,
649 channel_stats->center_freq0,
650 channel_stats->center_freq1,
651 channel_stats->radio_awake_time,
652 channel_stats->cca_busy_time);
653 channel_stats++;
654
655 cdf_mem_copy(results + next_res_offset,
656 t_channel_stats + next_chan_offset,
657 chan_stats_size);
658 next_res_offset += chan_stats_size;
659 next_chan_offset += sizeof(*channel_stats);
660 }
661
662 /* call hdd callback with Link Layer Statistics
663 * vdev_id/ifacId in link_stats_results will be
664 * used to retrieve the correct HDD context
665 */
666 pMac->sme.pLinkLayerStatsIndCallback(pMac->hHdd,
667 WMA_LINK_LAYER_STATS_RESULTS_RSP,
668 link_stats_results);
669 WMA_LOGD("%s: Radio Stats event posted to HDD", __func__);
670 cdf_mem_free(link_stats_results);
671
672 return 0;
673}
674
675/**
676 * wma_register_ll_stats_event_handler() - register link layer stats related
677 * event handler
678 * @wma_handle: wma handle
679 *
680 * Return: none
681 */
682void wma_register_ll_stats_event_handler(tp_wma_handle wma_handle)
683{
684 if (NULL == wma_handle) {
685 WMA_LOGE("%s: wma_handle is NULL", __func__);
686 return;
687 }
688
689 wmi_unified_register_event_handler(wma_handle->wmi_handle,
690 WMI_IFACE_LINK_STATS_EVENTID,
691 wma_unified_link_iface_stats_event_handler);
692 wmi_unified_register_event_handler(wma_handle->wmi_handle,
693 WMI_PEER_LINK_STATS_EVENTID,
694 wma_unified_link_peer_stats_event_handler);
695 wmi_unified_register_event_handler(wma_handle->wmi_handle,
696 WMI_RADIO_LINK_STATS_EVENTID,
697 wma_unified_link_radio_stats_event_handler);
698
699 return;
700}
701
702
703/**
704 * wma_process_ll_stats_clear_req() - clear link layer stats
705 * @wma: wma handle
706 * @clearReq: ll stats clear request command params
707 *
708 * Return: CDF_STATUS_SUCCESS for success or error code
709 */
710CDF_STATUS wma_process_ll_stats_clear_req
711 (tp_wma_handle wma, const tpSirLLStatsClearReq clearReq)
712{
713 wmi_clear_link_stats_cmd_fixed_param *cmd;
714 int32_t len;
715 wmi_buf_t buf;
716 uint8_t *buf_ptr;
717 int ret;
718
719 if (!clearReq || !wma) {
720 WMA_LOGE("%s: input pointer is NULL", __func__);
721 return CDF_STATUS_E_FAILURE;
722 }
723
724 len = sizeof(*cmd);
725 buf = wmi_buf_alloc(wma->wmi_handle, len);
726
727 if (!buf) {
728 WMA_LOGE("%s: Failed allocate wmi buffer", __func__);
729 return CDF_STATUS_E_NOMEM;
730 }
731
732 buf_ptr = (uint8_t *) wmi_buf_data(buf);
733 cdf_mem_zero(buf_ptr, len);
734 cmd = (wmi_clear_link_stats_cmd_fixed_param *) buf_ptr;
735
736 WMITLV_SET_HDR(&cmd->tlv_header,
737 WMITLV_TAG_STRUC_wmi_clear_link_stats_cmd_fixed_param,
738 WMITLV_GET_STRUCT_TLVLEN
739 (wmi_clear_link_stats_cmd_fixed_param));
740
741 cmd->stop_stats_collection_req = clearReq->stopReq;
742 cmd->vdev_id = clearReq->staId;
743 cmd->stats_clear_req_mask = clearReq->statsClearReqMask;
744
745 WMI_CHAR_ARRAY_TO_MAC_ADDR(wma->interfaces[clearReq->staId].addr,
746 &cmd->peer_macaddr);
747
748 WMA_LOGD("LINK_LAYER_STATS - Clear Request Params");
749 WMA_LOGD("StopReq : %d", cmd->stop_stats_collection_req);
750 WMA_LOGD("Vdev Id : %d", cmd->vdev_id);
751 WMA_LOGD("Clear Stat Mask : %d", cmd->stats_clear_req_mask);
752 WMA_LOGD("Peer MAC Addr : %pM",
753 wma->interfaces[clearReq->staId].addr);
754
755 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
756 WMI_CLEAR_LINK_STATS_CMDID);
757 if (ret) {
758 WMA_LOGE("%s: Failed to send clear link stats req", __func__);
759 wmi_buf_free(buf);
760 return CDF_STATUS_E_FAILURE;
761 }
762
763 WMA_LOGD("Clear Link Layer Stats request sent successfully");
764 return CDF_STATUS_SUCCESS;
765}
766
767/**
768 * wma_process_ll_stats_set_req() - link layer stats set request
769 * @wma: wma handle
770 * @setReq: ll stats set request command params
771 *
772 * Return: CDF_STATUS_SUCCESS for success or error code
773 */
774CDF_STATUS wma_process_ll_stats_set_req
775 (tp_wma_handle wma, const tpSirLLStatsSetReq setReq)
776{
777 wmi_start_link_stats_cmd_fixed_param *cmd;
778 int32_t len;
779 wmi_buf_t buf;
780 uint8_t *buf_ptr;
781 int ret;
782
783 if (!setReq || !wma) {
784 WMA_LOGE("%s: input pointer is NULL", __func__);
785 return CDF_STATUS_E_FAILURE;
786 }
787
788 len = sizeof(*cmd);
789 buf = wmi_buf_alloc(wma->wmi_handle, len);
790
791 if (!buf) {
792 WMA_LOGE("%s: Failed allocate wmi buffer", __func__);
793 return CDF_STATUS_E_NOMEM;
794 }
795
796 buf_ptr = (uint8_t *) wmi_buf_data(buf);
797 cdf_mem_zero(buf_ptr, len);
798 cmd = (wmi_start_link_stats_cmd_fixed_param *) buf_ptr;
799
800 WMITLV_SET_HDR(&cmd->tlv_header,
801 WMITLV_TAG_STRUC_wmi_start_link_stats_cmd_fixed_param,
802 WMITLV_GET_STRUCT_TLVLEN
803 (wmi_start_link_stats_cmd_fixed_param));
804
805 cmd->mpdu_size_threshold = setReq->mpduSizeThreshold;
806 cmd->aggressive_statistics_gathering =
807 setReq->aggressiveStatisticsGathering;
808
809 WMA_LOGD("LINK_LAYER_STATS - Start/Set Request Params");
810 WMA_LOGD("MPDU Size Thresh : %d", cmd->mpdu_size_threshold);
811 WMA_LOGD("Aggressive Gather: %d", cmd->aggressive_statistics_gathering);
812
813 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
814 WMI_START_LINK_STATS_CMDID);
815 if (ret) {
816 WMA_LOGE("%s: Failed to send set link stats request", __func__);
817 wmi_buf_free(buf);
818 return CDF_STATUS_E_FAILURE;
819 }
820
821 WMA_LOGD("Set Link Layer Stats request sent successfully");
822 return CDF_STATUS_SUCCESS;
823}
824
825/**
826 * wma_process_ll_stats_get_req() - link layer stats get request
827 * @wma:wma handle
828 * @getReq:ll stats get request command params
829 *
830 * Return: CDF_STATUS_SUCCESS for success or error code
831 */
832CDF_STATUS wma_process_ll_stats_get_req
833 (tp_wma_handle wma, const tpSirLLStatsGetReq getReq)
834{
835 wmi_request_link_stats_cmd_fixed_param *cmd;
836 int32_t len;
837 wmi_buf_t buf;
838 uint8_t *buf_ptr;
839 int ret;
840
841 if (!getReq || !wma) {
842 WMA_LOGE("%s: input pointer is NULL", __func__);
843 return CDF_STATUS_E_FAILURE;
844 }
845
846 len = sizeof(*cmd);
847 buf = wmi_buf_alloc(wma->wmi_handle, len);
848
849 if (!buf) {
850 WMA_LOGE("%s: Failed allocate wmi buffer", __func__);
851 return CDF_STATUS_E_NOMEM;
852 }
853
854 buf_ptr = (uint8_t *) wmi_buf_data(buf);
855 cdf_mem_zero(buf_ptr, len);
856 cmd = (wmi_request_link_stats_cmd_fixed_param *) buf_ptr;
857
858 WMITLV_SET_HDR(&cmd->tlv_header,
859 WMITLV_TAG_STRUC_wmi_request_link_stats_cmd_fixed_param,
860 WMITLV_GET_STRUCT_TLVLEN
861 (wmi_request_link_stats_cmd_fixed_param));
862
863 cmd->request_id = getReq->reqId;
864 cmd->stats_type = getReq->paramIdMask;
865 cmd->vdev_id = getReq->staId;
866
867 WMI_CHAR_ARRAY_TO_MAC_ADDR(wma->interfaces[getReq->staId].addr,
868 &cmd->peer_macaddr);
869
870 WMA_LOGD("LINK_LAYER_STATS - Get Request Params");
871 WMA_LOGD("Request ID : %d", cmd->request_id);
872 WMA_LOGD("Stats Type : %d", cmd->stats_type);
873 WMA_LOGD("Vdev ID : %d", cmd->vdev_id);
874 WMA_LOGD("Peer MAC Addr : %pM", wma->interfaces[getReq->staId].addr);
875
876 ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
877 WMI_REQUEST_LINK_STATS_CMDID);
878 if (ret) {
879 WMA_LOGE("%s: Failed to send get link stats request", __func__);
880 wmi_buf_free(buf);
881 return CDF_STATUS_E_FAILURE;
882 }
883
884 WMA_LOGD("Get Link Layer Stats request sent successfully");
885 return CDF_STATUS_SUCCESS;
886}
887
888/**
889 * wma_unified_link_iface_stats_event_handler() - link iface stats event handler
890 * @wma:wma handle
891 * @cmd_param_info: data from event
892 * @len: length
893 *
894 * Return: 0 for success or error code
895 */
896int wma_unified_link_iface_stats_event_handler(void *handle,
897 uint8_t *cmd_param_info,
898 uint32_t len)
899{
900 WMI_IFACE_LINK_STATS_EVENTID_param_tlvs *param_tlvs;
901 wmi_iface_link_stats_event_fixed_param *fixed_param;
902 wmi_iface_link_stats *link_stats;
903 wmi_wmm_ac_stats *ac_stats;
904 tSirLLStatsResults *link_stats_results;
905 uint8_t *results, *t_link_stats, *t_ac_stats;
906 uint32_t next_res_offset, next_ac_offset, count;
907 uint32_t roaming_offset, roaming_size;
908 size_t link_stats_size, ac_stats_size, iface_info_size;
909 size_t link_stats_results_size;
910
911 tpAniSirGlobal pMac = cds_get_context(CDF_MODULE_ID_PE);
912
913 if (!pMac) {
914 WMA_LOGD("%s: NULL pMac ptr. Exiting", __func__);
915 return -EINVAL;
916 }
917
918 if (!pMac->sme.pLinkLayerStatsIndCallback) {
919 WMA_LOGD("%s: HDD callback is null", __func__);
920 return -EINVAL;
921 }
922
923 WMA_LOGD("%s: Posting Iface Stats event to HDD", __func__);
924 param_tlvs = (WMI_IFACE_LINK_STATS_EVENTID_param_tlvs *) cmd_param_info;
925 if (!param_tlvs) {
926 WMA_LOGA("%s: Invalid stats event", __func__);
927 return -EINVAL;
928 }
929
930 /*
931 * cmd_param_info contains
932 * wmi_iface_link_stats_event_fixed_param fixed_param;
933 * wmi_iface_link_stats iface_link_stats;
934 * iface_link_stats->num_ac * size of(struct wmi_wmm_ac_stats)
935 */
936 fixed_param = param_tlvs->fixed_param;
937 link_stats = param_tlvs->iface_link_stats;
938 ac_stats = param_tlvs->ac;
939
940 if (!fixed_param || !link_stats || (link_stats->num_ac && !ac_stats)) {
941 WMA_LOGA("%s: Invalid param_tlvs for Iface Stats", __func__);
942 return -EINVAL;
943 }
944
945 link_stats_size = sizeof(tSirWifiIfaceStat);
946 iface_info_size = sizeof(tSirWifiInterfaceInfo);
947 ac_stats_size = sizeof(tSirWifiWmmAcStat);
948 link_stats_results_size = sizeof(*link_stats_results) + link_stats_size;
949
950 link_stats_results = cdf_mem_malloc(link_stats_results_size);
951 if (!link_stats_results) {
952 WMA_LOGD("%s: could not allocate mem for stats results-len %zu",
953 __func__, link_stats_results_size);
954 return -ENOMEM;
955 }
956
957 WMA_LOGD("Interface stats from FW event buf");
958 WMA_LOGD("Fixed Param:");
959 WMA_LOGD("request_id %u vdev_id %u",
960 fixed_param->request_id, fixed_param->vdev_id);
961
962 WMA_LOGD("Iface Stats:");
963 WMA_LOGD("beacon_rx %u mgmt_rx %u mgmt_action_rx %u mgmt_action_tx %u "
964 "rssi_mgmt %u rssi_data %u rssi_ack %u num_peers %u "
965 "num_peer_events %u num_ac %u roam_state %u"
966 " avg_bcn_spread_offset_high %u"
967 " avg_bcn_spread_offset_low %u"
968 " is leaky_ap %u"
969 " avg_rx_frames_leaked %u"
970 " rx_leak_window %u",
971 link_stats->beacon_rx, link_stats->mgmt_rx,
972 link_stats->mgmt_action_rx, link_stats->mgmt_action_tx,
973 link_stats->rssi_mgmt, link_stats->rssi_data,
974 link_stats->rssi_ack, link_stats->num_peers,
975 link_stats->num_peer_events, link_stats->num_ac,
976 link_stats->roam_state,
977 link_stats->avg_bcn_spread_offset_high,
978 link_stats->avg_bcn_spread_offset_low,
979 link_stats->is_leaky_ap,
980 link_stats->avg_rx_frms_leaked,
981 link_stats->rx_leak_window);
982
983 cdf_mem_zero(link_stats_results, link_stats_results_size);
984
985 link_stats_results->paramId = WMI_LINK_STATS_IFACE;
986 link_stats_results->rspId = fixed_param->request_id;
987 link_stats_results->ifaceId = fixed_param->vdev_id;
988 link_stats_results->num_peers = link_stats->num_peers;
989 link_stats_results->peer_event_number = 0;
990 link_stats_results->moreResultToFollow = 0;
991
992 results = (uint8_t *) link_stats_results->results;
993 t_link_stats = (uint8_t *) link_stats;
994 t_ac_stats = (uint8_t *) ac_stats;
995
996 /* Copy roaming state */
997 roaming_offset = offsetof(tSirWifiInterfaceInfo, roaming);
998 roaming_size = member_size(tSirWifiInterfaceInfo, roaming);
999
1000 cdf_mem_copy(results + roaming_offset, &link_stats->roam_state,
1001 roaming_size);
1002
1003 cdf_mem_copy(results + iface_info_size,
1004 t_link_stats + WMI_TLV_HDR_SIZE,
1005 link_stats_size - iface_info_size -
1006 WIFI_AC_MAX * ac_stats_size);
1007
1008 next_res_offset = link_stats_size - WIFI_AC_MAX * ac_stats_size;
1009 next_ac_offset = WMI_TLV_HDR_SIZE;
1010
1011 WMA_LOGD("AC Stats:");
1012 for (count = 0; count < link_stats->num_ac; count++) {
1013 WMA_LOGD("ac_type %u tx_mpdu %u rx_mpdu %u tx_mcast %u "
1014 "rx_mcast %u rx_ampdu %u tx_ampdu %u mpdu_lost %u "
1015 "retries %u retries_short %u retries_long %u "
1016 "contention_time_min %u contention_time_max %u "
1017 "contention_time_avg %u contention_num_samples %u",
1018 ac_stats->ac_type, ac_stats->tx_mpdu,
1019 ac_stats->rx_mpdu, ac_stats->tx_mcast,
1020 ac_stats->rx_mcast, ac_stats->rx_ampdu,
1021 ac_stats->tx_ampdu, ac_stats->mpdu_lost,
1022 ac_stats->retries, ac_stats->retries_short,
1023 ac_stats->retries_long, ac_stats->contention_time_min,
1024 ac_stats->contention_time_max,
1025 ac_stats->contention_time_avg,
1026 ac_stats->contention_num_samples);
1027 ac_stats++;
1028
1029 cdf_mem_copy(results + next_res_offset,
1030 t_ac_stats + next_ac_offset, ac_stats_size);
1031 next_res_offset += ac_stats_size;
1032 next_ac_offset += sizeof(*ac_stats);
1033 }
1034
1035 /* call hdd callback with Link Layer Statistics
1036 * vdev_id/ifacId in link_stats_results will be
1037 * used to retrieve the correct HDD context
1038 */
1039 pMac->sme.pLinkLayerStatsIndCallback(pMac->hHdd,
1040 WMA_LINK_LAYER_STATS_RESULTS_RSP,
1041 link_stats_results);
1042 WMA_LOGD("%s: Iface Stats event posted to HDD", __func__);
1043 cdf_mem_free(link_stats_results);
1044
1045 return 0;
1046}
1047
1048#endif /* WLAN_FEATURE_LINK_LAYER_STATS */
1049
1050/**
1051 * wma_update_pdev_stats() - update pdev stats
1052 * @wma: wma handle
1053 * @pdev_stats: pdev stats
1054 *
1055 * Return: none
1056 */
1057static void wma_update_pdev_stats(tp_wma_handle wma,
1058 wmi_pdev_stats *pdev_stats)
1059{
1060 tAniGetPEStatsRsp *stats_rsp_params;
1061 uint32_t temp_mask;
1062 uint8_t *stats_buf;
1063 tCsrGlobalClassAStatsInfo *classa_stats = NULL;
1064 struct wma_txrx_node *node;
1065 uint8_t i;
1066
1067 for (i = 0; i < wma->max_bssid; i++) {
1068 node = &wma->interfaces[i];
1069 stats_rsp_params = node->stats_rsp;
1070 if (stats_rsp_params) {
1071 node->fw_stats_set |= FW_PDEV_STATS_SET;
1072 WMA_LOGD("<---FW PDEV STATS received for vdevId:%d", i);
1073 stats_buf = (uint8_t *) (stats_rsp_params + 1);
1074 temp_mask = stats_rsp_params->statsMask;
1075 if (temp_mask & (1 << eCsrSummaryStats))
1076 stats_buf += sizeof(tCsrSummaryStatsInfo);
1077
1078 if (temp_mask & (1 << eCsrGlobalClassAStats)) {
1079 classa_stats =
1080 (tCsrGlobalClassAStatsInfo *) stats_buf;
1081 classa_stats->max_pwr = pdev_stats->chan_tx_pwr;
1082 }
1083 }
1084 }
1085}
1086
1087/**
1088 * wma_update_vdev_stats() - update vdev stats
1089 * @wma: wma handle
1090 * @vdev_stats: vdev stats
1091 *
1092 * Return: none
1093 */
1094static void wma_update_vdev_stats(tp_wma_handle wma,
1095 wmi_vdev_stats *vdev_stats)
1096{
1097 tAniGetPEStatsRsp *stats_rsp_params;
1098 tCsrSummaryStatsInfo *summary_stats = NULL;
1099 uint8_t *stats_buf;
1100 struct wma_txrx_node *node;
1101 uint8_t i;
1102 int8_t rssi = 0;
1103 CDF_STATUS cdf_status;
1104 tAniGetRssiReq *pGetRssiReq = (tAniGetRssiReq *) wma->pGetRssiReq;
1105 cds_msg_t sme_msg = { 0 };
1106
1107 node = &wma->interfaces[vdev_stats->vdev_id];
1108 stats_rsp_params = node->stats_rsp;
1109 if (stats_rsp_params) {
1110 stats_buf = (uint8_t *) (stats_rsp_params + 1);
1111 node->fw_stats_set |= FW_VDEV_STATS_SET;
1112 WMA_LOGD("<---FW VDEV STATS received for vdevId:%d",
1113 vdev_stats->vdev_id);
1114 if (stats_rsp_params->statsMask & (1 << eCsrSummaryStats)) {
1115 summary_stats = (tCsrSummaryStatsInfo *) stats_buf;
1116 for (i = 0; i < 4; i++) {
1117 summary_stats->tx_frm_cnt[i] =
1118 vdev_stats->tx_frm_cnt[i];
1119 summary_stats->fail_cnt[i] =
1120 vdev_stats->fail_cnt[i];
1121 summary_stats->multiple_retry_cnt[i] =
1122 vdev_stats->multiple_retry_cnt[i];
1123 }
1124
1125 summary_stats->rx_frm_cnt = vdev_stats->rx_frm_cnt;
1126 summary_stats->rx_error_cnt = vdev_stats->rx_err_cnt;
1127 summary_stats->rx_discard_cnt =
1128 vdev_stats->rx_discard_cnt;
1129 summary_stats->ack_fail_cnt = vdev_stats->ack_fail_cnt;
1130 summary_stats->rts_succ_cnt = vdev_stats->rts_succ_cnt;
1131 summary_stats->rts_fail_cnt = vdev_stats->rts_fail_cnt;
1132 }
1133 }
1134
1135 WMA_LOGD("vdev id %d beancon snr %d data snr %d",
1136 vdev_stats->vdev_id,
1137 vdev_stats->vdev_snr.bcn_snr, vdev_stats->vdev_snr.dat_snr);
1138
1139 if (pGetRssiReq && pGetRssiReq->sessionId == vdev_stats->vdev_id) {
1140 if ((vdev_stats->vdev_snr.bcn_snr == WMA_TGT_INVALID_SNR) &&
1141 (vdev_stats->vdev_snr.dat_snr == WMA_TGT_INVALID_SNR)) {
1142 /*
1143 * Firmware sends invalid snr till it sees
1144 * Beacon/Data after connection since after
1145 * vdev up fw resets the snr to invalid.
1146 * In this duartion Host will return the last know
1147 * rssi during connection.
1148 */
1149 WMA_LOGE("Invalid SNR from firmware");
1150
1151 } else {
1152 if (vdev_stats->vdev_snr.bcn_snr != WMA_TGT_INVALID_SNR) {
1153 rssi = vdev_stats->vdev_snr.bcn_snr;
1154 } else if (vdev_stats->vdev_snr.dat_snr !=
1155 WMA_TGT_INVALID_SNR) {
1156 rssi = vdev_stats->vdev_snr.dat_snr;
1157 }
1158
1159 /*
1160 * Get the absolute rssi value from the current rssi value
1161 * the sinr value is hardcoded into 0 in the core stack
1162 */
1163 rssi = rssi + WMA_TGT_NOISE_FLOOR_DBM;
1164 }
1165
1166 WMA_LOGD("Average Rssi = %d, vdev id= %d", rssi,
1167 pGetRssiReq->sessionId);
1168
1169 /* update the average rssi value to UMAC layer */
1170 if (NULL != pGetRssiReq->rssiCallback) {
1171 ((tCsrRssiCallback) (pGetRssiReq->rssiCallback))(rssi,
1172 pGetRssiReq->staId,
1173 pGetRssiReq->pDevContext);
1174 }
1175
1176 cdf_mem_free(pGetRssiReq);
1177 wma->pGetRssiReq = NULL;
1178 }
1179
1180 if (node->psnr_req) {
1181 tAniGetSnrReq *p_snr_req = node->psnr_req;
1182
1183 if (vdev_stats->vdev_snr.bcn_snr != WMA_TGT_INVALID_SNR)
1184 p_snr_req->snr = vdev_stats->vdev_snr.bcn_snr;
1185 else if (vdev_stats->vdev_snr.dat_snr != WMA_TGT_INVALID_SNR)
1186 p_snr_req->snr = vdev_stats->vdev_snr.dat_snr;
1187 else
1188 p_snr_req->snr = WMA_TGT_INVALID_SNR;
1189
1190 sme_msg.type = eWNI_SME_SNR_IND;
1191 sme_msg.bodyptr = p_snr_req;
1192 sme_msg.bodyval = 0;
1193
1194 cdf_status = cds_mq_post_message(CDF_MODULE_ID_SME, &sme_msg);
1195 if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
1196 WMA_LOGE("%s: Fail to post snr ind msg", __func__);
1197 cdf_mem_free(p_snr_req);
1198 }
1199
1200 node->psnr_req = NULL;
1201 }
1202}
1203
1204/**
1205 * wma_post_stats() - update stats to PE
1206 * @wma: wma handle
1207 * @node: txrx node
1208 *
1209 * Return: none
1210 */
1211static void wma_post_stats(tp_wma_handle wma, struct wma_txrx_node *node)
1212{
1213 tAniGetPEStatsRsp *stats_rsp_params;
1214
1215 stats_rsp_params = node->stats_rsp;
1216 /* send response to UMAC */
1217 wma_send_msg(wma, WMA_GET_STATISTICS_RSP, (void *)stats_rsp_params, 0);
1218 node->stats_rsp = NULL;
1219 node->fw_stats_set = 0;
1220}
1221
1222/**
1223 * wma_update_peer_stats() - update peer stats
1224 * @wma: wma handle
1225 * @peer_stats: peer stats
1226 *
1227 * Return: none
1228 */
1229static void wma_update_peer_stats(tp_wma_handle wma,
1230 wmi_peer_stats *peer_stats)
1231{
1232 tAniGetPEStatsRsp *stats_rsp_params;
1233 tCsrGlobalClassAStatsInfo *classa_stats = NULL;
1234 struct wma_txrx_node *node;
1235 uint8_t *stats_buf, vdev_id, macaddr[IEEE80211_ADDR_LEN], mcsRateFlags;
1236 uint32_t temp_mask;
1237
1238 WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr, &macaddr[0]);
1239 if (!wma_find_vdev_by_bssid(wma, macaddr, &vdev_id))
1240 return;
1241
1242 node = &wma->interfaces[vdev_id];
1243 if (node->stats_rsp) {
1244 node->fw_stats_set |= FW_PEER_STATS_SET;
1245 WMA_LOGD("<-- FW PEER STATS received for vdevId:%d", vdev_id);
1246 stats_rsp_params = (tAniGetPEStatsRsp *) node->stats_rsp;
1247 stats_buf = (uint8_t *) (stats_rsp_params + 1);
1248 temp_mask = stats_rsp_params->statsMask;
1249 if (temp_mask & (1 << eCsrSummaryStats))
1250 stats_buf += sizeof(tCsrSummaryStatsInfo);
1251
1252 if (temp_mask & (1 << eCsrGlobalClassAStats)) {
1253 classa_stats = (tCsrGlobalClassAStatsInfo *) stats_buf;
1254 WMA_LOGD("peer tx rate:%d", peer_stats->peer_tx_rate);
1255 /*The linkspeed returned by fw is in kbps so convert
1256 *it in to units of 500kbps which is expected by UMAC*/
1257 if (peer_stats->peer_tx_rate) {
1258 classa_stats->tx_rate =
1259 peer_stats->peer_tx_rate / 500;
1260 }
1261
1262 classa_stats->tx_rate_flags = node->rate_flags;
1263 if (!(node->rate_flags & eHAL_TX_RATE_LEGACY)) {
1264 classa_stats->mcs_index =
1265 wma_get_mcs_idx((peer_stats->peer_tx_rate /
1266 100), node->rate_flags,
1267 node->nss, &mcsRateFlags);
1268 /* rx_frag_cnt and promiscuous_rx_frag_cnt
1269 * parameter is currently not used. lets use the
1270 * same parameter to hold the nss value and mcs
1271 * rate flags */
1272 classa_stats->rx_frag_cnt = node->nss;
1273 classa_stats->promiscuous_rx_frag_cnt =
1274 mcsRateFlags;
1275 WMA_LOGD("Computed mcs_idx:%d mcs_rate_flags:%d",
1276 classa_stats->mcs_index, mcsRateFlags);
1277 }
1278 /* FW returns tx power in intervals of 0.5 dBm
1279 Convert it back to intervals of 1 dBm */
1280 classa_stats->max_pwr =
1281 roundup(classa_stats->max_pwr, 2) >> 1;
1282 WMA_LOGD("peer tx rate flags:%d nss:%d max_txpwr:%d",
1283 node->rate_flags, node->nss,
1284 classa_stats->max_pwr);
1285 }
1286
1287 if (node->fw_stats_set & FW_STATS_SET) {
1288 WMA_LOGD("<--STATS RSP VDEV_ID:%d", vdev_id);
1289 wma_post_stats(wma, node);
1290 }
1291 }
1292}
1293
1294/**
1295 * wma_post_link_status() - post link status to SME
1296 * @pGetLinkStatus: SME Link status
1297 * @link_status: Link status
1298 *
1299 * Return: none
1300 */
1301void wma_post_link_status(tAniGetLinkStatus *pGetLinkStatus,
1302 uint8_t link_status)
1303{
1304 CDF_STATUS cdf_status = CDF_STATUS_SUCCESS;
1305 cds_msg_t sme_msg = { 0 };
1306
1307 pGetLinkStatus->linkStatus = link_status;
1308 sme_msg.type = eWNI_SME_LINK_STATUS_IND;
1309 sme_msg.bodyptr = pGetLinkStatus;
1310 sme_msg.bodyval = 0;
1311
1312 cdf_status = cds_mq_post_message(CDF_MODULE_ID_SME, &sme_msg);
1313 if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
1314 WMA_LOGE("%s: Fail to post link status ind msg", __func__);
1315 cdf_mem_free(pGetLinkStatus);
1316 }
1317}
1318
1319/**
1320 * wma_link_status_event_handler() - link status event handler
1321 * @handle: wma handle
1322 * @cmd_param_info: data from event
1323 * @len: length
1324 *
1325 * Return: 0 for success or error code
1326 */
1327int wma_link_status_event_handler(void *handle, uint8_t *cmd_param_info,
1328 uint32_t len)
1329{
1330 tp_wma_handle wma = (tp_wma_handle) handle;
1331 WMI_UPDATE_VDEV_RATE_STATS_EVENTID_param_tlvs *param_buf;
1332 wmi_vdev_rate_stats_event_fixed_param *event;
1333 wmi_vdev_rate_ht_info *ht_info;
1334 struct wma_txrx_node *intr = wma->interfaces;
1335 uint8_t link_status = LINK_STATUS_LEGACY;
1336 int i;
1337
1338 param_buf =
1339 (WMI_UPDATE_VDEV_RATE_STATS_EVENTID_param_tlvs *) cmd_param_info;
1340 if (!param_buf) {
1341 WMA_LOGA("%s: Invalid stats event", __func__);
1342 return -EINVAL;
1343 }
1344
1345 event = (wmi_vdev_rate_stats_event_fixed_param *) param_buf->fixed_param;
1346 ht_info = (wmi_vdev_rate_ht_info *) param_buf->ht_info;
1347
1348 WMA_LOGD("num_vdev_stats: %d", event->num_vdev_stats);
1349 for (i = 0; (i < event->num_vdev_stats) && ht_info; i++) {
1350 WMA_LOGD("%s vdevId:%d tx_nss:%d rx_nss:%d tx_preamble:%d rx_preamble:%d",
1351 __func__, ht_info->vdevid, ht_info->tx_nss,
1352 ht_info->rx_nss, ht_info->tx_preamble,
1353 ht_info->rx_preamble);
1354 if (ht_info->vdevid < wma->max_bssid
1355 && intr[ht_info->vdevid].plink_status_req) {
1356 if (ht_info->tx_nss || ht_info->rx_nss)
1357 link_status = LINK_STATUS_MIMO;
1358
1359 if ((ht_info->tx_preamble == LINK_RATE_VHT) ||
1360 (ht_info->rx_preamble == LINK_RATE_VHT))
1361 link_status |= LINK_STATUS_VHT;
1362
1363 if (intr[ht_info->vdevid].nss == 2)
1364 link_status |= LINK_SUPPORT_MIMO;
1365
1366 if (intr[ht_info->vdevid].rate_flags &
1367 (eHAL_TX_RATE_VHT20 | eHAL_TX_RATE_VHT40 |
1368 eHAL_TX_RATE_VHT80))
1369 link_status |= LINK_SUPPORT_VHT;
1370
1371 wma_post_link_status(intr[ht_info->vdevid].plink_status_req,
1372 link_status);
1373 intr[ht_info->vdevid].plink_status_req = NULL;
1374 link_status = LINK_STATUS_LEGACY;
1375 }
1376
1377 ht_info++;
1378 }
1379
1380 return 0;
1381}
1382
1383/**
1384 * wma_stats_event_handler() - stats event handler
1385 * @handle: wma handle
1386 * @cmd_param_info: data from event
1387 * @len: length
1388 *
1389 * Return: 0 for success or error code
1390 */
1391int wma_stats_event_handler(void *handle, uint8_t *cmd_param_info,
1392 uint32_t len)
1393{
1394 tp_wma_handle wma = (tp_wma_handle) handle;
1395 WMI_UPDATE_STATS_EVENTID_param_tlvs *param_buf;
1396 wmi_stats_event_fixed_param *event;
1397 wmi_pdev_stats *pdev_stats;
1398 wmi_vdev_stats *vdev_stats;
1399 wmi_peer_stats *peer_stats;
1400 uint8_t i, *temp;
1401
1402
1403 param_buf = (WMI_UPDATE_STATS_EVENTID_param_tlvs *) cmd_param_info;
1404 if (!param_buf) {
1405 WMA_LOGA("%s: Invalid stats event", __func__);
1406 return -EINVAL;
1407 }
1408 event = param_buf->fixed_param;
1409 temp = (uint8_t *) param_buf->data;
1410
1411 WMA_LOGD("%s: num_stats: pdev: %u vdev: %u peer %u",
1412 __func__, event->num_pdev_stats, event->num_vdev_stats,
1413 event->num_peer_stats);
1414 if (event->num_pdev_stats > 0) {
1415 for (i = 0; i < event->num_pdev_stats; i++) {
1416 pdev_stats = (wmi_pdev_stats *) temp;
1417 wma_update_pdev_stats(wma, pdev_stats);
1418 temp += sizeof(wmi_pdev_stats);
1419 }
1420 }
1421
1422 if (event->num_vdev_stats > 0) {
1423 for (i = 0; i < event->num_vdev_stats; i++) {
1424 vdev_stats = (wmi_vdev_stats *) temp;
1425 wma_update_vdev_stats(wma, vdev_stats);
1426 temp += sizeof(wmi_vdev_stats);
1427 }
1428 }
1429
1430 if (event->num_peer_stats > 0) {
1431 for (i = 0; i < event->num_peer_stats; i++) {
1432 peer_stats = (wmi_peer_stats *) temp;
1433 wma_update_peer_stats(wma, peer_stats);
1434 temp += sizeof(wmi_peer_stats);
1435 }
1436 }
1437
1438 WMA_LOGI("%s: Exit", __func__);
1439 return 0;
1440}
1441
1442/**
1443 * wma_send_link_speed() - send link speed to SME
1444 * @link_speed: link speed
1445 *
1446 * Return: CDF_STATUS_SUCCESS for success or error code
1447 */
1448CDF_STATUS wma_send_link_speed(uint32_t link_speed)
1449{
1450 CDF_STATUS cdf_status = CDF_STATUS_SUCCESS;
1451 cds_msg_t sme_msg = { 0 };
1452 tSirLinkSpeedInfo *ls_ind =
1453 (tSirLinkSpeedInfo *) cdf_mem_malloc(sizeof(tSirLinkSpeedInfo));
1454 if (!ls_ind) {
1455 WMA_LOGE("%s: Memory allocation failed.", __func__);
1456 cdf_status = CDF_STATUS_E_NOMEM;
1457 } else {
1458 ls_ind->estLinkSpeed = link_speed;
1459 sme_msg.type = eWNI_SME_LINK_SPEED_IND;
1460 sme_msg.bodyptr = ls_ind;
1461 sme_msg.bodyval = 0;
1462
1463 cdf_status = cds_mq_post_message(CDF_MODULE_ID_SME, &sme_msg);
1464 if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
1465 WMA_LOGE("%s: Fail to post linkspeed ind msg",
1466 __func__);
1467 cdf_mem_free(ls_ind);
1468 }
1469 }
1470 return cdf_status;
1471}
1472
1473/**
1474 * wma_link_speed_event_handler() - link speed event handler
1475 * @handle: wma handle
1476 * @cmd_param_info: event data
1477 * @len: length
1478 *
1479 * Return: 0 for success or error code
1480 */
1481int wma_link_speed_event_handler(void *handle, uint8_t *cmd_param_info,
1482 uint32_t len)
1483{
1484 WMI_PEER_ESTIMATED_LINKSPEED_EVENTID_param_tlvs *param_buf;
1485 wmi_peer_estimated_linkspeed_event_fixed_param *event;
1486 CDF_STATUS cdf_status;
1487
1488 param_buf =
1489 (WMI_PEER_ESTIMATED_LINKSPEED_EVENTID_param_tlvs *) cmd_param_info;
1490 if (!param_buf) {
1491 WMA_LOGE("%s: Invalid linkspeed event", __func__);
1492 return -EINVAL;
1493 }
1494 event = param_buf->fixed_param;
1495 cdf_status = wma_send_link_speed(event->est_linkspeed_kbps);
1496 if (!CDF_IS_STATUS_SUCCESS(cdf_status)) {
1497 return -EINVAL;
1498 }
1499 return 0;
1500}
1501
1502/**
1503 * wma_wni_cfg_dnld() - cfg download request
1504 * @handle: wma handle
1505 *
1506 * Return: CDF_STATUS_SUCCESS for success or error code
1507 */
1508CDF_STATUS wma_wni_cfg_dnld(tp_wma_handle wma_handle)
1509{
1510 CDF_STATUS cdf_status = CDF_STATUS_SUCCESS;
1511 void *mac = cds_get_context(CDF_MODULE_ID_PE);
1512
1513 WMA_LOGD("%s: Enter", __func__);
1514
1515 if (NULL == mac) {
1516 WMA_LOGP("%s: Invalid context", __func__);
1517 CDF_ASSERT(0);
1518 return CDF_STATUS_E_FAILURE;
1519 }
1520
1521 process_cfg_download_req(mac);
1522
1523 WMA_LOGD("%s: Exit", __func__);
1524 return cdf_status;
1525}
1526
1527/**
1528 * wma_unified_debug_print_event_handler() - debug print event handler
1529 * @handle: wma handle
1530 * @datap: data pointer
1531 * @len: length
1532 *
1533 * Return: 0 for success or error code
1534 */
1535int wma_unified_debug_print_event_handler(void *handle, uint8_t *datap,
1536 uint32_t len)
1537{
1538 WMI_DEBUG_PRINT_EVENTID_param_tlvs *param_buf;
1539 uint8_t *data;
1540 uint32_t datalen;
1541
1542 param_buf = (WMI_DEBUG_PRINT_EVENTID_param_tlvs *) datap;
1543 if (!param_buf) {
1544 WMA_LOGE("Get NULL point message from FW");
1545 return -ENOMEM;
1546 }
1547 data = param_buf->data;
1548 datalen = param_buf->num_data;
1549
1550#ifdef BIG_ENDIAN_HOST
1551 {
1552 char dbgbuf[500] = { 0 };
1553 memcpy(dbgbuf, data, datalen);
1554 SWAPME(dbgbuf, datalen);
1555 WMA_LOGD("FIRMWARE:%s", dbgbuf);
1556 return 0;
1557 }
1558#else
1559 WMA_LOGD("FIRMWARE:%s", data);
1560 return 0;
1561#endif /* BIG_ENDIAN_HOST */
1562}
1563
1564/**
1565 * wma_check_scan_in_progress() - check scan is progress or not
1566 * @handle: wma handle
1567 *
1568 * Return: true/false
1569 */
1570bool wma_check_scan_in_progress(WMA_HANDLE handle)
1571{
1572 tp_wma_handle wma_handle = handle;
1573 int i;
1574
1575 for (i = 0; i < wma_handle->max_bssid; i++) {
1576 if (wma_handle->interfaces[i].scan_info.scan_id) {
1577
1578 WMA_LOGE("%s: scan in progress on interface[%d],scanid = %d",
1579 __func__, i,
1580 wma_handle->interfaces[i].scan_info.scan_id);
1581 return true;
1582 }
1583 }
1584 return false;
1585}
1586
1587/**
1588 * wma_is_sap_active() - check sap is active or not
1589 * @handle: wma handle
1590 *
1591 * Return: true/false
1592 */
1593bool wma_is_sap_active(tp_wma_handle wma_handle)
1594{
1595 int i;
1596
1597 for (i = 0; i < wma_handle->max_bssid; i++) {
1598 if (!wma_handle->interfaces[i].vdev_up)
1599 continue;
1600 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_AP &&
1601 wma_handle->interfaces[i].sub_type == 0)
1602 return true;
1603 }
1604 return false;
1605}
1606
1607/**
1608 * wma_is_p2p_go_active() - check p2p go is active or not
1609 * @handle: wma handle
1610 *
1611 * Return: true/false
1612 */
1613bool wma_is_p2p_go_active(tp_wma_handle wma_handle)
1614{
1615 int i;
1616
1617 for (i = 0; i < wma_handle->max_bssid; i++) {
1618 if (!wma_handle->interfaces[i].vdev_up)
1619 continue;
1620 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_AP &&
1621 wma_handle->interfaces[i].sub_type ==
1622 WMI_UNIFIED_VDEV_SUBTYPE_P2P_GO)
1623 return true;
1624 }
1625 return false;
1626}
1627
1628/**
1629 * wma_is_p2p_cli_active() - check p2p cli is active or not
1630 * @handle: wma handle
1631 *
1632 * Return: true/false
1633 */
1634bool wma_is_p2p_cli_active(tp_wma_handle wma_handle)
1635{
1636 int i;
1637
1638 for (i = 0; i < wma_handle->max_bssid; i++) {
1639 if (!wma_handle->interfaces[i].vdev_up)
1640 continue;
1641 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_STA &&
1642 wma_handle->interfaces[i].sub_type ==
1643 WMI_UNIFIED_VDEV_SUBTYPE_P2P_CLIENT)
1644 return true;
1645 }
1646 return false;
1647}
1648
1649/**
1650 * wma_is_sta_active() - check sta is active or not
1651 * @handle: wma handle
1652 *
1653 * Return: true/false
1654 */
1655bool wma_is_sta_active(tp_wma_handle wma_handle)
1656{
1657 int i;
1658
1659 for (i = 0; i < wma_handle->max_bssid; i++) {
1660 if (!wma_handle->interfaces[i].vdev_up)
1661 continue;
1662 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_STA &&
1663 wma_handle->interfaces[i].sub_type == 0)
1664 return true;
1665 if (wma_handle->interfaces[i].type == WMI_VDEV_TYPE_IBSS)
1666 return true;
1667 }
1668 return false;
1669}
1670
1671/**
1672 * wma_peer_phymode() - get phymode
1673 * @nw_type: nw type
1674 * @sta_type: sta type
1675 * @is_ht: is ht supported
1676 * @is_cw40: is channel width 40 supported
1677 * @is_vht: is vht supported
1678 * @is_cw_vht: is channel width 80 supported
1679 *
1680 * Return: WLAN_PHY_MODE
1681 */
1682WLAN_PHY_MODE wma_peer_phymode(tSirNwType nw_type, uint8_t sta_type,
1683 uint8_t is_ht, uint8_t ch_width,
1684 uint8_t is_vht)
1685{
1686 WLAN_PHY_MODE phymode = MODE_UNKNOWN;
1687
1688 switch (nw_type) {
1689 case eSIR_11B_NW_TYPE:
1690 phymode = MODE_11B;
1691 if (is_ht || is_vht)
1692 WMA_LOGE("HT/VHT is enabled with 11B NW type");
1693 break;
1694 case eSIR_11G_NW_TYPE:
1695 if (!(is_ht || is_vht)) {
1696 phymode = MODE_11G;
1697 break;
1698 }
1699 if (CH_WIDTH_40MHZ < ch_width)
1700 WMA_LOGE("80/160 MHz BW sent in 11G, configured 40MHz");
1701 if (ch_width)
1702 phymode = (is_vht) ?
1703 MODE_11AC_VHT40 : MODE_11NG_HT40;
1704 else
1705 phymode = (is_vht) ?
1706 MODE_11AC_VHT20 : MODE_11NG_HT20;
1707 break;
1708 case eSIR_11A_NW_TYPE:
1709 if (!(is_ht || is_vht)) {
1710 phymode = MODE_11A;
1711 break;
1712 }
1713 if (is_vht) {
1714#if CONFIG_160MHZ_SUPPORT != 0
1715 if (ch_width == CH_WIDTH_160MHZ)
1716 phymode = MODE_11AC_VHT160;
1717 else if (ch_width == CH_WIDTH_80P80MHZ)
1718 phymode = MODE_11AC_VHT80_80;
1719 else
1720#endif
1721 if (ch_width == CH_WIDTH_80MHZ)
1722 phymode = MODE_11AC_VHT80;
1723 else
1724 phymode = (ch_width) ?
1725 MODE_11AC_VHT40 : MODE_11AC_VHT20;
1726 } else
1727 phymode = (ch_width) ? MODE_11NA_HT40 : MODE_11NA_HT20;
1728 break;
1729 default:
1730 WMA_LOGP("%s: Invalid nw type %d", __func__, nw_type);
1731 break;
1732 }
1733 WMA_LOGD("%s: nw_type %d is_ht %d ch_width %d is_vht %d phymode %d",
1734 __func__, nw_type, is_ht, ch_width, is_vht, phymode);
1735
1736 return phymode;
1737}
1738
1739/**
1740 * wma_txrx_fw_stats_reset() - reset txrx fw statistics
1741 * @wma_handle: wma handle
1742 * @vdev_id: vdev id
1743 * @value: value
1744 *
1745 * Return: 0 for success or return error
1746 */
1747int32_t wma_txrx_fw_stats_reset(tp_wma_handle wma_handle,
1748 uint8_t vdev_id, uint32_t value)
1749{
1750 struct ol_txrx_stats_req req;
1751 ol_txrx_vdev_handle vdev;
1752
1753 vdev = wma_find_vdev_by_id(wma_handle, vdev_id);
1754 if (!vdev) {
1755 WMA_LOGE("%s:Invalid vdev handle", __func__);
1756 return -EINVAL;
1757 }
1758 cdf_mem_zero(&req, sizeof(req));
1759 req.stats_type_reset_mask = value;
1760 ol_txrx_fw_stats_get(vdev, &req);
1761
1762 return 0;
1763}
1764
1765#ifdef HELIUMPLUS
1766#define SET_UPLOAD_MASK(_mask, _rate_info) \
1767 ((_mask) = 1 << (_rate_info ## _V2))
1768#else /* !HELIUMPLUS */
1769#define SET_UPLOAD_MASK(_mask, _rate_info) \
1770 ((_mask) = 1 << (_rate_info))
1771#endif
1772
1773/**
1774 * wma_set_txrx_fw_stats_level() - set txrx fw stats level
1775 * @wma_handle: wma handle
1776 * @vdev_id: vdev id
1777 * @value: value
1778 *
1779 * Return: 0 for success or return error
1780 */
1781int32_t wma_set_txrx_fw_stats_level(tp_wma_handle wma_handle,
1782 uint8_t vdev_id, uint32_t value)
1783{
1784 struct ol_txrx_stats_req req;
1785 ol_txrx_vdev_handle vdev;
1786 uint32_t l_up_mask;
1787
1788 vdev = wma_find_vdev_by_id(wma_handle, vdev_id);
1789 if (!vdev) {
1790 WMA_LOGE("%s:Invalid vdev handle", __func__);
1791 return -EINVAL;
1792 }
1793 cdf_mem_zero(&req, sizeof(req));
1794 req.print.verbose = 1;
1795
1796 switch (value) {
1797 /* txrx_fw_stats 1 */
1798 case WMA_FW_PHY_STATS:
1799 l_up_mask = 1 << HTT_DBG_STATS_WAL_PDEV_TXRX;
1800 break;
1801
1802 /* txrx_fw_stats 2 */
1803 case WMA_FW_RX_REORDER_STATS:
1804 l_up_mask = 1 << HTT_DBG_STATS_RX_REORDER;
1805 break;
1806
1807 /* txrx_fw_stats 3 */
1808 case WMA_FW_RX_RC_STATS:
1809 SET_UPLOAD_MASK(l_up_mask, HTT_DBG_STATS_RX_RATE_INFO);
1810 break;
1811
1812 /* txrx_fw_stats 5 */
1813 case WMA_FW_TX_CONCISE_STATS:
1814 req.print.concise = 1;
1815 /* No break here, since l_up_mask is same for
1816 * both WMA_FW_TX_CONCISE_STATS & WMA_FW_TX_PPDU_STATS */
1817
1818 /* txrx_fw_stats 4 */
1819 case WMA_FW_TX_PPDU_STATS:
1820 l_up_mask = 1 << HTT_DBG_STATS_TX_PPDU_LOG;
1821 break;
1822
1823 /* txrx_fw_stats 6 */
1824 case WMA_FW_TX_RC_STATS:
1825 SET_UPLOAD_MASK(l_up_mask, HTT_DBG_STATS_TX_RATE_INFO);
1826 break;
1827
1828 /* txrx_fw_stats 12 */
1829 /*
1830 * This is 1:1 correspondence with WMA defined value
1831 * and the f/w bitmask.
1832 */
1833 case WMA_FW_RX_REM_RING_BUF:
1834 l_up_mask = 1 << HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO;
1835 break;
1836
1837 /* txrx_fw_stats 7 */
1838 case WMA_FW_TXBF_INFO_STATS:
1839 l_up_mask = 1 << HTT_DBG_STATS_TXBF_INFO;
1840 break;
1841
1842 /* txrx_fw_stats 8 */
1843 case WMA_FW_SND_INFO_STATS:
1844 l_up_mask = 1 << HTT_DBG_STATS_SND_INFO;
1845 break;
1846
1847 /* txrx_fw_stats 9 */
1848 case WMA_FW_ERROR_INFO_STATS:
1849 l_up_mask = 1 << HTT_DBG_STATS_ERROR_INFO;
1850 break;
1851
1852 /* txrx_fw_stats 10 */
1853 case WMA_FW_TX_SELFGEN_INFO_STATS:
1854 l_up_mask = 1 << HTT_DBG_STATS_TX_SELFGEN_INFO;
1855 break;
1856
1857 /* txrx_fw_stats 15 */
1858 /*
1859 * This is 1:1 correspondence with WMA defined value
1860 * and the f/w bitmask.
1861 */
1862 case WMA_FW_RX_TXBF_MUSU_NDPA:
1863 l_up_mask = 1 << HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT;
1864 break;
1865
1866 default:
1867 cdf_print("%s %d Invalid value %d\n",
1868 __func__, __LINE__, value);
1869 return 0;
1870 }
1871 req.stats_type_upload_mask = l_up_mask;
1872
1873 ol_txrx_fw_stats_get(vdev, &req);
1874
1875 return 0;
1876}
1877
1878/**
1879 * wmi_crash_inject() - inject fw crash
1880 * @wma_handle: wma handle
1881 * @type: type
1882 * @delay_time_ms: delay time in ms
1883 *
1884 * Return: 0 for success or return error
1885 */
1886int wmi_crash_inject(wmi_unified_t wmi_handle, uint32_t type,
1887 uint32_t delay_time_ms)
1888{
1889 int ret = 0;
1890 WMI_FORCE_FW_HANG_CMD_fixed_param *cmd;
1891 uint16_t len = sizeof(*cmd);
1892 wmi_buf_t buf;
1893
1894 buf = wmi_buf_alloc(wmi_handle, len);
1895 if (!buf) {
1896 WMA_LOGE("%s: wmi_buf_alloc failed!", __func__);
1897 return -ENOMEM;
1898 }
1899
1900 cmd = (WMI_FORCE_FW_HANG_CMD_fixed_param *) wmi_buf_data(buf);
1901 WMITLV_SET_HDR(&cmd->tlv_header,
1902 WMITLV_TAG_STRUC_WMI_FORCE_FW_HANG_CMD_fixed_param,
1903 WMITLV_GET_STRUCT_TLVLEN
1904 (WMI_FORCE_FW_HANG_CMD_fixed_param));
1905 cmd->type = type;
1906 cmd->delay_time_ms = delay_time_ms;
1907
1908 ret = wmi_unified_cmd_send(wmi_handle, buf, len, WMI_FORCE_FW_HANG_CMDID);
1909 if (ret < 0) {
1910 WMA_LOGE("%s: Failed to send set param command, ret = %d",
1911 __func__, ret);
1912 wmi_buf_free(buf);
1913 }
1914
1915 return ret;
1916}
1917
1918/**
1919 * wma_get_stats_rsp_buf() - fill get stats response buffer
1920 * @get_stats_param: get stats parameters
1921 *
1922 * Return: stats response buffer
1923 */
1924static tAniGetPEStatsRsp *wma_get_stats_rsp_buf
1925 (tAniGetPEStatsReq *get_stats_param)
1926{
1927 tAniGetPEStatsRsp *stats_rsp_params;
1928 uint32_t len, temp_mask, counter = 0;
1929
1930 len = sizeof(tAniGetPEStatsRsp);
1931 temp_mask = get_stats_param->statsMask;
1932
1933 while (temp_mask) {
1934 if (temp_mask & 1) {
1935 switch (counter) {
1936 case eCsrSummaryStats:
1937 len += sizeof(tCsrSummaryStatsInfo);
1938 break;
1939 case eCsrGlobalClassAStats:
1940 len += sizeof(tCsrGlobalClassAStatsInfo);
1941 break;
1942 case eCsrGlobalClassBStats:
1943 len += sizeof(tCsrGlobalClassBStatsInfo);
1944 break;
1945 case eCsrGlobalClassCStats:
1946 len += sizeof(tCsrGlobalClassCStatsInfo);
1947 break;
1948 case eCsrGlobalClassDStats:
1949 len += sizeof(tCsrGlobalClassDStatsInfo);
1950 break;
1951 case eCsrPerStaStats:
1952 len += sizeof(tCsrPerStaStatsInfo);
1953 break;
1954 }
1955 }
1956
1957 counter++;
1958 temp_mask >>= 1;
1959 }
1960
1961 stats_rsp_params = (tAniGetPEStatsRsp *) cdf_mem_malloc(len);
1962 if (!stats_rsp_params) {
1963 WMA_LOGE("memory allocation failed for tAniGetPEStatsRsp");
1964 CDF_ASSERT(0);
1965 return NULL;
1966 }
1967
1968 cdf_mem_zero(stats_rsp_params, len);
1969 stats_rsp_params->staId = get_stats_param->staId;
1970 stats_rsp_params->statsMask = get_stats_param->statsMask;
1971 stats_rsp_params->msgType = WMA_GET_STATISTICS_RSP;
1972 stats_rsp_params->msgLen = len - sizeof(tAniGetPEStatsRsp);
1973 stats_rsp_params->rc = CDF_STATUS_SUCCESS;
1974 return stats_rsp_params;
1975}
1976
1977/**
1978 * wma_get_stats_req() - get stats request
1979 * @handle: wma handle
1980 * @get_stats_param: stats params
1981 *
1982 * Return: none
1983 */
1984void wma_get_stats_req(WMA_HANDLE handle,
1985 tAniGetPEStatsReq *get_stats_param)
1986{
1987 tp_wma_handle wma_handle = (tp_wma_handle) handle;
1988 struct wma_txrx_node *node;
1989 wmi_buf_t buf;
1990 wmi_request_stats_cmd_fixed_param *cmd;
1991 tAniGetPEStatsRsp *pGetPEStatsRspParams;
1992 uint8_t len = sizeof(wmi_request_stats_cmd_fixed_param);
1993
1994 WMA_LOGD("%s: Enter", __func__);
1995 node = &wma_handle->interfaces[get_stats_param->sessionId];
1996 if (node->stats_rsp) {
1997 pGetPEStatsRspParams = node->stats_rsp;
1998 if (pGetPEStatsRspParams->staId == get_stats_param->staId &&
1999 pGetPEStatsRspParams->statsMask ==
2000 get_stats_param->statsMask) {
2001 WMA_LOGI("Stats for staId %d with stats mask %d "
2002 "is pending.... ignore new request",
2003 get_stats_param->staId,
2004 get_stats_param->statsMask);
2005 goto end;
2006 } else {
2007 cdf_mem_free(node->stats_rsp);
2008 node->stats_rsp = NULL;
2009 node->fw_stats_set = 0;
2010 }
2011 }
2012
2013 pGetPEStatsRspParams = wma_get_stats_rsp_buf(get_stats_param);
2014 if (!pGetPEStatsRspParams)
2015 goto end;
2016
2017 buf = wmi_buf_alloc(wma_handle->wmi_handle, len);
2018 if (!buf) {
2019 WMA_LOGE("%s: Failed to allocate wmi buffer", __func__);
2020 goto failed;
2021 }
2022
2023 node->fw_stats_set = 0;
2024 node->stats_rsp = pGetPEStatsRspParams;
2025 cmd = (wmi_request_stats_cmd_fixed_param *) wmi_buf_data(buf);
2026 WMITLV_SET_HDR(&cmd->tlv_header,
2027 WMITLV_TAG_STRUC_wmi_request_stats_cmd_fixed_param,
2028 WMITLV_GET_STRUCT_TLVLEN
2029 (wmi_request_stats_cmd_fixed_param));
2030 cmd->stats_id =
2031 WMI_REQUEST_PEER_STAT | WMI_REQUEST_PDEV_STAT |
2032 WMI_REQUEST_VDEV_STAT;
2033 cmd->vdev_id = get_stats_param->sessionId;
2034 WMI_CHAR_ARRAY_TO_MAC_ADDR(node->bssid, &cmd->peer_macaddr);
2035 WMA_LOGD("STATS REQ VDEV_ID:%d-->", cmd->vdev_id);
2036 if (wmi_unified_cmd_send(wma_handle->wmi_handle, buf, len,
2037 WMI_REQUEST_STATS_CMDID)) {
2038
2039 WMA_LOGE("%s: Failed to send WMI_REQUEST_STATS_CMDID",
2040 __func__);
2041 wmi_buf_free(buf);
2042 goto failed;
2043 }
2044
2045 goto end;
2046failed:
2047
2048 pGetPEStatsRspParams->rc = CDF_STATUS_E_FAILURE;
2049 node->stats_rsp = NULL;
2050 /* send response to UMAC */
2051 wma_send_msg(wma_handle, WMA_GET_STATISTICS_RSP, pGetPEStatsRspParams,
2052 0);
2053end:
2054 cdf_mem_free(get_stats_param);
2055 WMA_LOGD("%s: Exit", __func__);
2056 return;
2057}
2058
2059/**
2060 * wma_get_beacon_buffer_by_vdev_id() - get the beacon buffer from vdev ID
2061 * @vdev_id: vdev id
2062 * @buffer_size: size of buffer
2063 *
2064 * Return: none
2065 */
2066void *wma_get_beacon_buffer_by_vdev_id(uint8_t vdev_id, uint32_t *buffer_size)
2067{
2068 tp_wma_handle wma;
2069 struct beacon_info *beacon;
2070 uint8_t *buf;
2071 uint32_t buf_size;
2072
2073 wma = cds_get_context(CDF_MODULE_ID_WMA);
2074 if (!wma) {
2075 WMA_LOGE("%s: Invalid WMA handle", __func__);
2076 return NULL;
2077 }
2078
2079 if (vdev_id >= wma->max_bssid) {
2080 WMA_LOGE("%s: Invalid vdev_id %u", __func__, vdev_id);
2081 return NULL;
2082 }
2083
2084 if (!wma_is_vdev_in_ap_mode(wma, vdev_id)) {
2085 WMA_LOGE("%s: vdevid %d is not in AP mode", __func__, vdev_id);
2086 return NULL;
2087 }
2088
2089 beacon = wma->interfaces[vdev_id].beacon;
2090
2091 if (!beacon) {
2092 WMA_LOGE("%s: beacon invalid", __func__);
2093 return NULL;
2094 }
2095
2096 cdf_spin_lock_bh(&beacon->lock);
2097
2098 buf_size = cdf_nbuf_len(beacon->buf);
2099 buf = cdf_mem_malloc(buf_size);
2100
2101 if (!buf) {
2102 cdf_spin_unlock_bh(&beacon->lock);
2103 WMA_LOGE("%s: alloc failed for beacon buf", __func__);
2104 return NULL;
2105 }
2106
2107 cdf_mem_copy(buf, cdf_nbuf_data(beacon->buf), buf_size);
2108
2109 cdf_spin_unlock_bh(&beacon->lock);
2110
2111 if (buffer_size)
2112 *buffer_size = buf_size;
2113
2114 return buf;
2115}
2116
2117/**
2118 * wma_get_vdev_address_by_vdev_id() - lookup MAC address from vdev ID
2119 * @vdev_id: vdev id
2120 *
2121 * Return: mac address
2122 */
2123uint8_t *wma_get_vdev_address_by_vdev_id(uint8_t vdev_id)
2124{
2125 tp_wma_handle wma;
2126
2127 wma = cds_get_context(CDF_MODULE_ID_WMA);
2128 if (!wma) {
2129 WMA_LOGE("%s: Invalid WMA handle", __func__);
2130 return NULL;
2131 }
2132
2133 if (vdev_id >= wma->max_bssid) {
2134 WMA_LOGE("%s: Invalid vdev_id %u", __func__, vdev_id);
2135 return NULL;
2136 }
2137
2138 return wma->interfaces[vdev_id].addr;
2139}
2140
2141/**
2142 * wma_get_interface_by_vdev_id() - lookup interface entry using vdev ID
2143 * @vdev_id: vdev id
2144 *
2145 * Return: entry from vdev table
2146 */
2147struct wma_txrx_node *wma_get_interface_by_vdev_id(uint8_t vdev_id)
2148{
2149 tp_wma_handle wma;
2150
2151 wma = cds_get_context(CDF_MODULE_ID_WMA);
2152 if (!wma) {
2153 WMA_LOGE("%s: Invalid WMA handle", __func__);
2154 return NULL;
2155 }
2156
2157 if (vdev_id >= wma->max_bssid) {
2158 WMA_LOGE("%s: Invalid vdev_id %u", __func__, vdev_id);
2159 return NULL;
2160 }
2161
2162 return &wma->interfaces[vdev_id];
2163}
2164
2165/**
2166 * wma_is_vdev_up() - return whether a vdev is up
2167 * @vdev_id: vdev id
2168 *
2169 * Return: true if the vdev is up, false otherwise
2170 */
2171bool wma_is_vdev_up(uint8_t vdev_id)
2172{
2173 struct wma_txrx_node *vdev = wma_get_interface_by_vdev_id(vdev_id);
2174 if (vdev)
2175 return vdev->vdev_up;
2176 else
2177 return false;
2178}
2179
2180#if defined(QCA_WIFI_FTM)
2181/**
2182 * wma_utf_rsp() - utf response
2183 * @wma_handle: wma handle
2184 * @payload: payload
2185 * @len: length of payload
2186 *
2187 * Return: 0 for success or error code
2188 */
2189int wma_utf_rsp(tp_wma_handle wma_handle, uint8_t **payload, uint32_t *len)
2190{
2191 int ret = -1;
2192 uint32_t payload_len;
2193
2194 payload_len = wma_handle->utf_event_info.length;
2195 if (payload_len) {
2196 ret = 0;
2197
2198 /*
2199 * The first 4 bytes holds the payload size
2200 * and the actual payload sits next to it
2201 */
2202 *payload = (uint8_t *) cdf_mem_malloc((uint32_t) payload_len
2203 + sizeof(A_UINT32));
2204 *(A_UINT32 *) &(*payload[0]) =
2205 wma_handle->utf_event_info.length;
2206 memcpy(*payload + sizeof(A_UINT32),
2207 wma_handle->utf_event_info.data, payload_len);
2208 wma_handle->utf_event_info.length = 0;
2209 *len = payload_len;
2210 }
2211
2212 return ret;
2213}
2214
2215/**
2216 * wma_post_ftm_response() - post ftm response to upper layer
2217 * @wma_handle: wma handle
2218 *
2219 * Return: none
2220 */
2221static void wma_post_ftm_response(tp_wma_handle wma_handle)
2222{
2223 int ret;
2224 uint8_t *payload;
2225 uint32_t data_len;
2226 cds_msg_t msg = { 0 };
2227 CDF_STATUS status;
2228
2229 ret = wma_utf_rsp(wma_handle, &payload, &data_len);
2230
2231 if (ret) {
2232 return;
2233 }
2234
2235 sys_build_message_header(SYS_MSG_ID_FTM_RSP, &msg);
2236 msg.bodyptr = payload;
2237 msg.bodyval = 0;
2238
2239 status = cds_mq_post_message(CDS_MQ_ID_SYS, &msg);
2240
2241 if (status != CDF_STATUS_SUCCESS) {
2242 WMA_LOGE("failed to post ftm response to SYS");
2243 cdf_mem_free(payload);
2244 }
2245}
2246
2247/**
2248 * wma_process_utf_event() - process utf event
2249 * @handle: wma handle
2250 * @datap: data buffer
2251 * @dataplen: data length
2252 *
2253 * Return: 0 for success or error code
2254 */
2255static int
2256wma_process_utf_event(WMA_HANDLE handle, uint8_t *datap, uint32_t dataplen)
2257{
2258 tp_wma_handle wma_handle = (tp_wma_handle) handle;
2259 SEG_HDR_INFO_STRUCT segHdrInfo;
2260 uint8_t totalNumOfSegments, currentSeq;
2261 WMI_PDEV_UTF_EVENTID_param_tlvs *param_buf;
2262 uint8_t *data;
2263 uint32_t datalen;
2264
2265 param_buf = (WMI_PDEV_UTF_EVENTID_param_tlvs *) datap;
2266 if (!param_buf) {
2267 WMA_LOGE("Get NULL point message from FW");
2268 return -EINVAL;
2269 }
2270 data = param_buf->data;
2271 datalen = param_buf->num_data;
2272
2273 segHdrInfo = *(SEG_HDR_INFO_STRUCT *) &(data[0]);
2274
2275 wma_handle->utf_event_info.currentSeq = (segHdrInfo.segmentInfo & 0xF);
2276
2277 currentSeq = (segHdrInfo.segmentInfo & 0xF);
2278 totalNumOfSegments = (segHdrInfo.segmentInfo >> 4) & 0xF;
2279
2280 datalen = datalen - sizeof(segHdrInfo);
2281
2282 if (currentSeq == 0) {
2283 wma_handle->utf_event_info.expectedSeq = 0;
2284 wma_handle->utf_event_info.offset = 0;
2285 } else {
2286 if (wma_handle->utf_event_info.expectedSeq != currentSeq)
2287 WMA_LOGE("Mismatch in expecting seq expected"
2288 " Seq %d got seq %d",
2289 wma_handle->utf_event_info.expectedSeq,
2290 currentSeq);
2291 }
2292
2293 memcpy(&wma_handle->utf_event_info.
2294 data[wma_handle->utf_event_info.offset],
2295 &data[sizeof(segHdrInfo)], datalen);
2296 wma_handle->utf_event_info.offset =
2297 wma_handle->utf_event_info.offset + datalen;
2298 wma_handle->utf_event_info.expectedSeq++;
2299
2300 if (wma_handle->utf_event_info.expectedSeq == totalNumOfSegments) {
2301 if (wma_handle->utf_event_info.offset != segHdrInfo.len)
2302 WMA_LOGE("All segs received total len mismatch.."
2303 " len %zu total len %d",
2304 wma_handle->utf_event_info.offset,
2305 segHdrInfo.len);
2306
2307 wma_handle->utf_event_info.length =
2308 wma_handle->utf_event_info.offset;
2309 }
2310
2311 wma_post_ftm_response(wma_handle);
2312
2313 return 0;
2314}
2315
2316/**
2317 * wma_utf_detach() - utf detach
2318 * @wma_handle: wma handle
2319 *
2320 * Return: none
2321 */
2322void wma_utf_detach(tp_wma_handle wma_handle)
2323{
2324 if (wma_handle->utf_event_info.data) {
2325 cdf_mem_free(wma_handle->utf_event_info.data);
2326 wma_handle->utf_event_info.data = NULL;
2327 wma_handle->utf_event_info.length = 0;
2328 wmi_unified_unregister_event_handler(wma_handle->wmi_handle,
2329 WMI_PDEV_UTF_EVENTID);
2330 }
2331}
2332
2333/**
2334 * wma_utf_attach() - utf attach
2335 * @wma_handle: wma handle
2336 *
2337 * Return: none
2338 */
2339void wma_utf_attach(tp_wma_handle wma_handle)
2340{
2341 int ret;
2342
2343 wma_handle->utf_event_info.data = (unsigned char *)
2344 cdf_mem_malloc(MAX_UTF_EVENT_LENGTH);
2345 wma_handle->utf_event_info.length = 0;
2346
2347 ret = wmi_unified_register_event_handler(wma_handle->wmi_handle,
2348 WMI_PDEV_UTF_EVENTID,
2349 wma_process_utf_event);
2350
2351 if (ret)
2352 WMA_LOGP("%s: Failed to register UTF event callback", __func__);
2353}
2354
2355/**
2356 * wmi_unified_pdev_utf_cmd() - send utf command to fw
2357 * @wmi_handle: wmi handle
2358 * @utf_payload: utf payload
2359 * @len: length
2360 *
2361 * Return: 0 for success or error code
2362 */
2363static int
2364wmi_unified_pdev_utf_cmd(wmi_unified_t wmi_handle, uint8_t *utf_payload,
2365 uint16_t len)
2366{
2367 wmi_buf_t buf;
2368 uint8_t *cmd;
2369 int ret = 0;
2370 static uint8_t msgref = 1;
2371 uint8_t segNumber = 0, segInfo, numSegments;
2372 uint16_t chunk_len, total_bytes;
2373 uint8_t *bufpos;
2374 SEG_HDR_INFO_STRUCT segHdrInfo;
2375
2376 bufpos = utf_payload;
2377 total_bytes = len;
2378 ASSERT(total_bytes / MAX_WMI_UTF_LEN ==
2379 (uint8_t) (total_bytes / MAX_WMI_UTF_LEN));
2380 numSegments = (uint8_t) (total_bytes / MAX_WMI_UTF_LEN);
2381
2382 if (len - (numSegments * MAX_WMI_UTF_LEN))
2383 numSegments++;
2384
2385 while (len) {
2386 if (len > MAX_WMI_UTF_LEN)
2387 chunk_len = MAX_WMI_UTF_LEN; /* MAX messsage */
2388 else
2389 chunk_len = len;
2390
2391 buf = wmi_buf_alloc(wmi_handle,
2392 (chunk_len + sizeof(segHdrInfo) +
2393 WMI_TLV_HDR_SIZE));
2394 if (!buf) {
2395 WMA_LOGE("%s:wmi_buf_alloc failed", __func__);
2396 return -ENOMEM;
2397 }
2398
2399 cmd = (uint8_t *) wmi_buf_data(buf);
2400
2401 segHdrInfo.len = total_bytes;
2402 segHdrInfo.msgref = msgref;
2403 segInfo = ((numSegments << 4) & 0xF0) | (segNumber & 0xF);
2404 segHdrInfo.segmentInfo = segInfo;
2405 segHdrInfo.pad = 0;
2406
2407 WMA_LOGD("%s:segHdrInfo.len = %d, segHdrInfo.msgref = %d,"
2408 " segHdrInfo.segmentInfo = %d",
2409 __func__, segHdrInfo.len, segHdrInfo.msgref,
2410 segHdrInfo.segmentInfo);
2411
2412 WMA_LOGD("%s:total_bytes %d segNumber %d totalSegments %d"
2413 "chunk len %d", __func__, total_bytes, segNumber,
2414 numSegments, chunk_len);
2415
2416 segNumber++;
2417
2418 WMITLV_SET_HDR(cmd, WMITLV_TAG_ARRAY_BYTE,
2419 (chunk_len + sizeof(segHdrInfo)));
2420 cmd += WMI_TLV_HDR_SIZE;
2421 memcpy(cmd, &segHdrInfo, sizeof(segHdrInfo)); /* 4 bytes */
2422 memcpy(&cmd[sizeof(segHdrInfo)], bufpos, chunk_len);
2423
2424 ret = wmi_unified_cmd_send(wmi_handle, buf,
2425 (chunk_len + sizeof(segHdrInfo) +
2426 WMI_TLV_HDR_SIZE),
2427 WMI_PDEV_UTF_CMDID);
2428
2429 if (ret != EOK) {
2430 WMA_LOGE("Failed to send WMI_PDEV_UTF_CMDID command");
2431 wmi_buf_free(buf);
2432 break;
2433 }
2434
2435 len -= chunk_len;
2436 bufpos += chunk_len;
2437 }
2438
2439 msgref++;
2440
2441 return ret;
2442}
2443
2444/**
2445 * wma_utf_cmd() - utf command
2446 * @wma_handle: wma handle
2447 * @data: data
2448 * @len: length
2449 *
2450 * Return: 0 for success or error code
2451 */
2452int wma_utf_cmd(tp_wma_handle wma_handle, uint8_t *data, uint16_t len)
2453{
2454 wma_handle->utf_event_info.length = 0;
2455 return wmi_unified_pdev_utf_cmd(wma_handle->wmi_handle, data, len);
2456}
2457
2458/**
2459 * wma_process_ftm_command() - process ftm command
2460 * @wma_handle: wma handle
2461 * @msg_buffer: message buffer
2462 *
2463 * Return: CDF_STATUS_SUCCESS for success or error code
2464 */
2465CDF_STATUS
2466wma_process_ftm_command(tp_wma_handle wma_handle,
2467 struct ar6k_testmode_cmd_data *msg_buffer)
2468{
2469 uint8_t *data = NULL;
2470 uint16_t len = 0;
2471 int ret;
2472
2473 if (!msg_buffer)
2474 return CDF_STATUS_E_INVAL;
2475
2476 if (cds_get_conparam() != CDF_FTM_MODE) {
2477 WMA_LOGE("FTM command issued in non-FTM mode");
2478 cdf_mem_free(msg_buffer->data);
2479 cdf_mem_free(msg_buffer);
2480 return CDF_STATUS_E_NOSUPPORT;
2481 }
2482
2483 data = msg_buffer->data;
2484 len = msg_buffer->len;
2485
2486 ret = wma_utf_cmd(wma_handle, data, len);
2487
2488 cdf_mem_free(msg_buffer->data);
2489 cdf_mem_free(msg_buffer);
2490
2491 if (ret)
2492 return CDF_STATUS_E_FAILURE;
2493
2494 return CDF_STATUS_SUCCESS;
2495}
2496#endif /* QCA_WIFI_FTM */
2497
2498/**
2499 * wma_get_wcnss_software_version() - get wcnss software version
2500 * @p_cds_gctx: cds context
2501 * @pVersion: version pointer
2502 * @versionBufferSize: buffer size
2503 *
2504 * Return: CDF_STATUS_SUCCESS for success or error code
2505 */
2506CDF_STATUS wma_get_wcnss_software_version(void *p_cds_gctx,
2507 uint8_t *pVersion,
2508 uint32_t versionBufferSize)
2509{
2510 tp_wma_handle wma_handle;
2511 wma_handle = cds_get_context(CDF_MODULE_ID_WMA);
2512
2513 if (NULL == wma_handle) {
2514 WMA_LOGE("%s: Failed to get wma", __func__);
2515 return CDF_STATUS_E_FAULT;
2516 }
2517
2518 snprintf(pVersion, versionBufferSize, "%x",
2519 (unsigned int)wma_handle->target_fw_version);
2520 return CDF_STATUS_SUCCESS;
2521}
2522
2523/**
2524 * wma_get_tx_rx_ss_from_config() - Get Tx/Rx spatial stream from HW mode config
2525 * @mac_ss: Config which indicates the HW mode as per 'hw_mode_ss_config'
2526 * @tx_ss: Contains the Tx spatial stream
2527 * @rx_ss: Contains the Rx spatial stream
2528 *
2529 * Returns the number of spatial streams of Tx and Rx
2530 *
2531 * Return: None
2532 */
2533void wma_get_tx_rx_ss_from_config(enum hw_mode_ss_config mac_ss,
2534 uint32_t *tx_ss,
2535 uint32_t *rx_ss)
2536{
2537 switch (mac_ss) {
2538 case HW_MODE_SS_0x0:
2539 *tx_ss = 0;
2540 *rx_ss = 0;
2541 break;
2542 case HW_MODE_SS_1x1:
2543 *tx_ss = 1;
2544 *rx_ss = 1;
2545 break;
2546 case HW_MODE_SS_2x2:
2547 *tx_ss = 2;
2548 *rx_ss = 2;
2549 break;
2550 case HW_MODE_SS_3x3:
2551 *tx_ss = 3;
2552 *rx_ss = 3;
2553 break;
2554 case HW_MODE_SS_4x4:
2555 *tx_ss = 4;
2556 *rx_ss = 4;
2557 break;
2558 default:
2559 *tx_ss = 0;
2560 *rx_ss = 0;
2561 }
2562}
2563
2564/**
2565 * wma_get_matching_hw_mode_index() - Get matching HW mode index
2566 * @wma: WMA handle
2567 * @mac0_tx_ss: Number of tx spatial streams of MAC0
2568 * @mac0_rx_ss: Number of rx spatial streams of MAC0
2569 * @mac0_bw: Bandwidth of MAC0 of type 'hw_mode_bandwidth'
2570 * @mac1_tx_ss: Number of tx spatial streams of MAC1
2571 * @mac1_rx_ss: Number of rx spatial streams of MAC1
2572 * @mac1_bw: Bandwidth of MAC1 of type 'hw_mode_bandwidth'
2573 * @dbs: DBS capability of type 'hw_mode_dbs_capab'
2574 * @dfs: Agile DFS capability of type 'hw_mode_agile_dfs_capab'
2575 *
2576 * Fetches the HW mode index corresponding to the HW mode provided
2577 *
2578 * Return: Positive hw mode index in case a match is found or a negative
2579 * value, otherwise
2580 */
2581static int8_t wma_get_matching_hw_mode_index(tp_wma_handle wma,
2582 uint32_t mac0_tx_ss, uint32_t mac0_rx_ss,
2583 enum hw_mode_bandwidth mac0_bw,
2584 uint32_t mac1_tx_ss, uint32_t mac1_rx_ss,
2585 enum hw_mode_bandwidth mac1_bw,
2586 enum hw_mode_dbs_capab dbs,
2587 enum hw_mode_agile_dfs_capab dfs)
2588{
2589 uint32_t i;
2590 uint32_t t_mac0_tx_ss, t_mac0_rx_ss, t_mac0_bw;
2591 uint32_t t_mac1_tx_ss, t_mac1_rx_ss, t_mac1_bw;
2592 uint32_t dbs_mode, agile_dfs_mode;
2593 int8_t found = -EINVAL;
2594
2595 if (!wma) {
2596 WMA_LOGE("%s: Invalid WMA handle", __func__);
2597 return found;
2598 }
2599
2600 for (i = 0; i < wma->num_dbs_hw_modes; i++) {
2601 t_mac0_tx_ss = WMI_DBS_HW_MODE_MAC0_TX_STREAMS_GET(
2602 wma->hw_mode.hw_mode_list[i]);
2603 if (t_mac0_tx_ss != mac0_tx_ss)
2604 continue;
2605
2606 t_mac0_rx_ss = WMI_DBS_HW_MODE_MAC0_RX_STREAMS_GET(
2607 wma->hw_mode.hw_mode_list[i]);
2608 if (t_mac0_rx_ss != mac0_rx_ss)
2609 continue;
2610
2611 t_mac0_bw = WMI_DBS_HW_MODE_MAC0_BANDWIDTH_GET(
2612 wma->hw_mode.hw_mode_list[i]);
2613 if (t_mac0_bw != mac0_bw)
2614 continue;
2615
2616 t_mac1_tx_ss = WMI_DBS_HW_MODE_MAC1_TX_STREAMS_GET(
2617 wma->hw_mode.hw_mode_list[i]);
2618 if (t_mac1_tx_ss != mac1_tx_ss)
2619 continue;
2620
2621 t_mac1_rx_ss = WMI_DBS_HW_MODE_MAC1_RX_STREAMS_GET(
2622 wma->hw_mode.hw_mode_list[i]);
2623 if (t_mac1_rx_ss != mac1_rx_ss)
2624 continue;
2625
2626 t_mac1_bw = WMI_DBS_HW_MODE_MAC1_BANDWIDTH_GET(
2627 wma->hw_mode.hw_mode_list[i]);
2628 if (t_mac1_bw != mac1_bw)
2629 continue;
2630
2631 dbs_mode = WMI_DBS_HW_MODE_DBS_MODE_GET(
2632 wma->hw_mode.hw_mode_list[i]);
2633 if (dbs_mode != dbs)
2634 continue;
2635
2636 agile_dfs_mode = WMI_DBS_HW_MODE_AGILE_DFS_GET(
2637 wma->hw_mode.hw_mode_list[i]);
2638 if (agile_dfs_mode != dfs)
2639 continue;
2640
2641 found = i;
2642 WMA_LOGI("%s: hw_mode index %d found",
2643 __func__, i);
2644 break;
2645 }
2646 return found;
2647}
2648
2649/**
2650 * wma_get_hw_mode_from_dbs_hw_list() - Get hw_mode index
2651 * @mac0_ss: MAC0 spatial stream configuration
2652 * @mac0_bw: MAC0 bandwidth configuration
2653 * @mac1_ss: MAC1 spatial stream configuration
2654 * @mac1_bw: MAC1 bandwidth configuration
2655 * @dbs: HW DBS capability
2656 * @dfs: HW Agile DFS capability
2657 *
2658 * Get the HW mode index corresponding to the HW modes spatial stream,
2659 * bandwidth, DBS and Agile DFS capability
2660 *
2661 * Return: Index number if a match is found or -negative value if not found
2662 */
2663int8_t wma_get_hw_mode_idx_from_dbs_hw_list(enum hw_mode_ss_config mac0_ss,
2664 enum hw_mode_bandwidth mac0_bw,
2665 enum hw_mode_ss_config mac1_ss,
2666 enum hw_mode_bandwidth mac1_bw,
2667 enum hw_mode_dbs_capab dbs,
2668 enum hw_mode_agile_dfs_capab dfs)
2669{
2670 tp_wma_handle wma;
2671 uint32_t mac0_tx_ss, mac0_rx_ss;
2672 uint32_t mac1_tx_ss, mac1_rx_ss;
2673
2674 wma = cds_get_context(CDF_MODULE_ID_WMA);
2675 if (!wma) {
2676 WMA_LOGE("%s: Invalid WMA handle", __func__);
2677 return -EINVAL;
2678 }
2679
2680 wma_get_tx_rx_ss_from_config(mac0_ss, &mac0_tx_ss, &mac0_rx_ss);
2681 wma_get_tx_rx_ss_from_config(mac1_ss, &mac1_tx_ss, &mac1_rx_ss);
2682
2683 WMA_LOGI("%s: MAC0: TxSS=%d, RxSS=%d, BW=%d",
2684 __func__, mac0_tx_ss, mac0_rx_ss, mac0_bw);
2685 WMA_LOGI("%s: MAC1: TxSS=%d, RxSS=%d, BW=%d",
2686 __func__, mac1_tx_ss, mac1_rx_ss, mac1_bw);
2687 WMA_LOGI("%s: DBS capab=%d, Agile DFS capab=%d",
2688 __func__, dbs, dfs);
2689
2690 return wma_get_matching_hw_mode_index(wma, mac0_tx_ss, mac0_rx_ss,
2691 mac0_bw,
2692 mac1_tx_ss, mac1_rx_ss,
2693 mac1_bw,
2694 dbs, dfs);
2695}
2696
2697/**
2698 * wma_get_hw_mode_from_idx() - Get HW mode based on index
2699 * @idx: HW mode index
2700 * @hw_mode: HW mode params
2701 *
2702 * Fetches the HW mode parameters
2703 *
2704 * Return: Success if hw mode is obtained and the hw mode params
2705 */
2706CDF_STATUS wma_get_hw_mode_from_idx(uint32_t idx,
2707 struct sir_hw_mode_params *hw_mode)
2708{
2709 tp_wma_handle wma;
2710 uint32_t param;
2711
2712 wma = cds_get_context(CDF_MODULE_ID_WMA);
2713 if (!wma) {
2714 WMA_LOGE("%s: Invalid WMA handle", __func__);
2715 return CDF_STATUS_E_FAILURE;
2716 }
2717
2718 if (idx > wma->num_dbs_hw_modes) {
2719 WMA_LOGE("%s: Invalid index", __func__);
2720 return CDF_STATUS_E_FAILURE;
2721 }
2722
2723 param = wma->hw_mode.hw_mode_list[idx];
2724
2725 hw_mode->mac0_tx_ss = WMI_DBS_HW_MODE_MAC0_TX_STREAMS_GET(param);
2726 hw_mode->mac0_rx_ss = WMI_DBS_HW_MODE_MAC0_RX_STREAMS_GET(param);
2727 hw_mode->mac0_bw = WMI_DBS_HW_MODE_MAC0_BANDWIDTH_GET(param);
2728 hw_mode->mac1_tx_ss = WMI_DBS_HW_MODE_MAC1_TX_STREAMS_GET(param);
2729 hw_mode->mac1_rx_ss = WMI_DBS_HW_MODE_MAC1_RX_STREAMS_GET(param);
2730 hw_mode->mac1_bw = WMI_DBS_HW_MODE_MAC1_BANDWIDTH_GET(param);
2731 hw_mode->dbs_cap = WMI_DBS_HW_MODE_DBS_MODE_GET(param);
2732 hw_mode->agile_dfs_cap = WMI_DBS_HW_MODE_AGILE_DFS_GET(param);
2733
2734 return CDF_STATUS_SUCCESS;
2735}
2736
2737/**
2738 * wma_get_num_dbs_hw_modes() - Get number of HW mode
2739 *
2740 * Fetches the number of DBS HW modes returned by the FW
2741 *
2742 * Return: Negative value on error or returns the number of DBS HW modes
2743 */
2744int8_t wma_get_num_dbs_hw_modes(void)
2745{
2746 tp_wma_handle wma;
2747
2748 wma = cds_get_context(CDF_MODULE_ID_WMA);
2749 if (!wma) {
2750 WMA_LOGE("%s: Invalid WMA handle", __func__);
2751 return -EINVAL;
2752 }
2753 return wma->num_dbs_hw_modes;
2754}
2755
2756/**
2757 * wma_is_hw_dbs_capable() - Check if HW is DBS capable
2758 *
2759 * Checks if the HW is DBS capable
2760 *
2761 * Return: true if the HW is DBS capable
2762 */
2763bool wma_is_hw_dbs_capable(void)
2764{
2765 tp_wma_handle wma;
2766 uint32_t param, i, found = 0;
2767
2768 wma = cds_get_context(CDF_MODULE_ID_WMA);
2769 if (!wma) {
2770 WMA_LOGE("%s: Invalid WMA handle", __func__);
2771 return false;
2772 }
2773
2774 if (!wma_is_dbs_enable()) {
2775 WMA_LOGI("%s: DBS is disabled", __func__);
2776 return false;
2777 }
2778
2779 WMA_LOGI("%s: DBS service bit map: %d", __func__,
2780 WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
2781 WMI_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT));
2782
2783 /* The agreement with FW is that: To know if the target is DBS
2784 * capable, DBS needs to be supported both in the HW mode list
2785 * and in the service ready event
2786 */
2787 if (!(WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
2788 WMI_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT)))
2789 return false;
2790
2791 for (i = 0; i < wma->num_dbs_hw_modes; i++) {
2792 param = wma->hw_mode.hw_mode_list[i];
2793 WMA_LOGI("%s: HW param: %x", __func__, param);
2794 if (WMI_DBS_HW_MODE_DBS_MODE_GET(param)) {
2795 WMA_LOGI("%s: HW (%d) is DBS capable", __func__, i);
2796 found = 1;
2797 break;
2798 }
2799 }
2800
2801 if (found)
2802 return true;
2803
2804 return false;
2805}
2806
2807/**
2808 * wma_is_hw_agile_dfs_capable() - Check if HW is agile DFS capable
2809 *
2810 * Checks if the HW is agile DFS capable
2811 *
2812 * Return: true if the HW is agile DFS capable
2813 */
2814bool wma_is_hw_agile_dfs_capable(void)
2815{
2816 tp_wma_handle wma;
2817 uint32_t param, i, found = 0;
2818
2819 wma = cds_get_context(CDF_MODULE_ID_WMA);
2820 if (!wma) {
2821 WMA_LOGE("%s: Invalid WMA handle", __func__);
2822 return false;
2823 }
2824
2825 if (!wma_is_agile_dfs_enable()) {
2826 WMA_LOGI("%s: Agile DFS is disabled", __func__);
2827 return false;
2828 }
2829
2830 WMA_LOGI("%s: DBS service bit map: %d", __func__,
2831 WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
2832 WMI_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT));
2833
2834 /* The agreement with FW is that to know if the target is Agile DFS
2835 * capable, DBS needs to be supported in the service bit map and
2836 * Agile DFS needs to be supported in the HW mode list
2837 */
2838 if (!(WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
2839 WMI_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT)))
2840 return false;
2841
2842 for (i = 0; i < wma->num_dbs_hw_modes; i++) {
2843 param = wma->hw_mode.hw_mode_list[i];
2844 WMA_LOGI("%s: HW param: %x", __func__, param);
2845 if (WMI_DBS_HW_MODE_AGILE_DFS_GET(param)) {
2846 WMA_LOGI("%s: HW %d is agile DFS capable",
2847 __func__, i);
2848 found = 1;
2849 break;
2850 }
2851 }
2852
2853 if (found)
2854 return true;
2855
2856 return false;
2857}
2858
2859/**
2860 * wma_get_mac_id_of_vdev() - Get MAC id corresponding to a vdev
2861 * @vdev_id: VDEV whose MAC ID is required
2862 *
2863 * Get MAC id corresponding to a vdev id from the WMA structure
2864 *
2865 * Return: Negative value on failure and MAC id on success
2866 */
2867int8_t wma_get_mac_id_of_vdev(uint32_t vdev_id)
2868{
2869 tp_wma_handle wma;
2870
2871 wma = cds_get_context(CDF_MODULE_ID_WMA);
2872 if (!wma) {
2873 WMA_LOGE("%s: Invalid WMA handle", __func__);
2874 return -EINVAL;
2875 }
2876
2877 if (wma->interfaces)
2878 return wma->interfaces[vdev_id].mac_id;
2879
2880 return -EINVAL;
2881}
2882
2883/**
2884 * wma_get_old_and_new_hw_index() - Get the old and new HW index
2885 * @old_hw_mode_index: Value at this pointer contains the old HW mode index
2886 * Default value when not configured is WMA_DEFAULT_HW_MODE_INDEX
2887 * @new_hw_mode_index: Value at this pointer contains the new HW mode index
2888 * Default value when not configured is WMA_DEFAULT_HW_MODE_INDEX
2889 *
2890 * Get the old and new HW index configured in the driver
2891 *
2892 * Return: Failure in case the HW mode indices cannot be fetched and Success
2893 * otherwise. When no HW mode transition has happened the values of
2894 * old_hw_mode_index and new_hw_mode_index will be the same.
2895 */
2896CDF_STATUS wma_get_old_and_new_hw_index(uint32_t *old_hw_mode_index,
2897 uint32_t *new_hw_mode_index)
2898{
2899 tp_wma_handle wma;
2900
2901 wma = cds_get_context(CDF_MODULE_ID_WMA);
2902 if (!wma) {
2903 WMA_LOGE("%s: Invalid WMA handle", __func__);
2904 return CDF_STATUS_E_INVAL;
2905 }
2906
2907 *old_hw_mode_index = wma->old_hw_mode_index;
2908 *new_hw_mode_index = wma->new_hw_mode_index;
2909
2910 return CDF_STATUS_SUCCESS;
2911}
2912
2913/**
2914 * wma_update_intf_hw_mode_params() - Update WMA params
2915 * @vdev_id: VDEV id whose params needs to be updated
2916 * @mac_id: MAC id to be updated
2917 * @cfgd_hw_mode_index: HW mode index from which Tx and Rx SS will be updated
2918 *
2919 * Updates the MAC id, tx spatial stream, rx spatial stream in WMA
2920 *
2921 * Return: None
2922 */
2923void wma_update_intf_hw_mode_params(uint32_t vdev_id, uint32_t mac_id,
2924 uint32_t cfgd_hw_mode_index)
2925{
2926 tp_wma_handle wma;
2927
2928 wma = cds_get_context(CDF_MODULE_ID_WMA);
2929 if (!wma) {
2930 WMA_LOGE("%s: Invalid WMA handle", __func__);
2931 return;
2932 }
2933
2934 if (!wma->interfaces) {
2935 WMA_LOGE("%s: Interface is NULL", __func__);
2936 return;
2937 }
2938
2939 wma->interfaces[vdev_id].mac_id = mac_id;
2940 if (mac_id == 0) {
2941 wma->interfaces[vdev_id].tx_streams =
2942 WMI_DBS_HW_MODE_MAC0_TX_STREAMS_GET(cfgd_hw_mode_index);
2943 wma->interfaces[vdev_id].rx_streams =
2944 WMI_DBS_HW_MODE_MAC0_RX_STREAMS_GET(cfgd_hw_mode_index);
2945 } else {
2946 wma->interfaces[vdev_id].tx_streams =
2947 WMI_DBS_HW_MODE_MAC1_TX_STREAMS_GET(cfgd_hw_mode_index);
2948 wma->interfaces[vdev_id].rx_streams =
2949 WMI_DBS_HW_MODE_MAC1_RX_STREAMS_GET(cfgd_hw_mode_index);
2950 }
2951}
2952
2953/**
2954 * wma_get_dbs_hw_modes() - Get the DBS HW modes for userspace
2955 * @one_by_one_dbs: 1x1 DBS capability of HW
2956 * @two_by_two_dbs: 2x2 DBS capability of HW
2957 *
2958 * Provides the DBS HW mode capability such as whether
2959 * 1x1 DBS, 2x2 DBS is supported by the HW or not.
2960 *
2961 * Return: Failure in case of error and 0 on success
2962 * one_by_one_dbs/two_by_two_dbs will be false,
2963 * if they are not supported.
2964 * one_by_one_dbs/two_by_two_dbs will be true,
2965 * if they are supported.
2966 * false values of one_by_one_dbs/two_by_two_dbs,
2967 * indicate DBS is disabled
2968 */
2969CDF_STATUS wma_get_dbs_hw_modes(bool *one_by_one_dbs, bool *two_by_two_dbs)
2970{
2971 tp_wma_handle wma;
2972 uint32_t i;
2973 int8_t found_one_by_one = -EINVAL, found_two_by_two = -EINVAL;
2974 uint32_t conf1_tx_ss, conf1_rx_ss;
2975 uint32_t conf2_tx_ss, conf2_rx_ss;
2976
2977 *one_by_one_dbs = false;
2978 *two_by_two_dbs = false;
2979
2980 if (wma_is_hw_dbs_capable() == false) {
2981 WMA_LOGE("%s: HW is not DBS capable", __func__);
2982 /* Caller will understand that DBS is disabled */
2983 return CDF_STATUS_SUCCESS;
2984
2985 }
2986
2987 wma = cds_get_context(CDF_MODULE_ID_WMA);
2988 if (!wma) {
2989 WMA_LOGE("%s: Invalid WMA handle", __func__);
2990 return CDF_STATUS_E_FAILURE;
2991 }
2992
2993 /* To check 1x1 capability */
2994 wma_get_tx_rx_ss_from_config(HW_MODE_SS_1x1,
2995 &conf1_tx_ss, &conf1_rx_ss);
2996 /* To check 2x2 capability */
2997 wma_get_tx_rx_ss_from_config(HW_MODE_SS_2x2,
2998 &conf2_tx_ss, &conf2_rx_ss);
2999
3000 for (i = 0; i < wma->num_dbs_hw_modes; i++) {
3001 uint32_t t_conf0_tx_ss, t_conf0_rx_ss;
3002 uint32_t t_conf1_tx_ss, t_conf1_rx_ss;
3003 uint32_t dbs_mode;
3004
3005 t_conf0_tx_ss = WMI_DBS_HW_MODE_MAC0_TX_STREAMS_GET(
3006 wma->hw_mode.hw_mode_list[i]);
3007 t_conf0_rx_ss = WMI_DBS_HW_MODE_MAC0_RX_STREAMS_GET(
3008 wma->hw_mode.hw_mode_list[i]);
3009 t_conf1_tx_ss = WMI_DBS_HW_MODE_MAC1_TX_STREAMS_GET(
3010 wma->hw_mode.hw_mode_list[i]);
3011 t_conf1_rx_ss = WMI_DBS_HW_MODE_MAC1_RX_STREAMS_GET(
3012 wma->hw_mode.hw_mode_list[i]);
3013 dbs_mode = WMI_DBS_HW_MODE_DBS_MODE_GET(
3014 wma->hw_mode.hw_mode_list[i]);
3015
3016 if (((((t_conf0_tx_ss == conf1_tx_ss) &&
3017 (t_conf0_rx_ss == conf1_rx_ss)) ||
3018 ((t_conf1_tx_ss == conf1_tx_ss) &&
3019 (t_conf1_rx_ss == conf1_rx_ss))) &&
3020 (dbs_mode == HW_MODE_DBS)) &&
3021 (found_one_by_one < 0)) {
3022 found_one_by_one = i;
3023 WMA_LOGI("%s: 1x1 hw_mode index %d found",
3024 __func__, i);
3025 /* Once an entry is found, need not check for 1x1
3026 * again
3027 */
3028 continue;
3029 }
3030
3031 if (((((t_conf0_tx_ss == conf2_tx_ss) &&
3032 (t_conf0_rx_ss == conf2_rx_ss)) ||
3033 ((t_conf1_tx_ss == conf2_tx_ss) &&
3034 (t_conf1_rx_ss == conf2_rx_ss))) &&
3035 (dbs_mode == HW_MODE_DBS)) &&
3036 (found_two_by_two < 0)) {
3037 found_two_by_two = i;
3038 WMA_LOGI("%s: 2x2 hw_mode index %d found",
3039 __func__, i);
3040 /* Once an entry is found, need not check for 2x2
3041 * again
3042 */
3043 continue;
3044 }
3045 }
3046
3047 if (found_one_by_one >= 0)
3048 *one_by_one_dbs = true;
3049 if (found_two_by_two >= 0)
3050 *two_by_two_dbs = true;
3051
3052 return CDF_STATUS_SUCCESS;
3053}
3054
3055/**
3056 * wma_get_current_hw_mode() - Get current HW mode params
3057 * @hw_mode: HW mode parameters
3058 *
3059 * Provides the current HW mode parameters if the HW mode is initialized
3060 * in the driver
3061 *
3062 * Return: Success if the current HW mode params are successfully populated
3063 */
3064CDF_STATUS wma_get_current_hw_mode(struct sir_hw_mode_params *hw_mode)
3065{
3066 CDF_STATUS status;
3067 uint32_t old_hw_index = 0, new_hw_index = 0;
3068
3069 WMA_LOGI("%s: Get the current hw mode", __func__);
3070
3071 status = wma_get_old_and_new_hw_index(&old_hw_index,
3072 &new_hw_index);
3073 if (CDF_STATUS_SUCCESS != status) {
3074 WMA_LOGE("%s: Failed to get HW mode index", __func__);
3075 return CDF_STATUS_E_FAILURE;
3076 }
3077
3078 if (new_hw_index == WMA_DEFAULT_HW_MODE_INDEX) {
3079 WMA_LOGE("%s: HW mode is not yet initialized", __func__);
3080 return CDF_STATUS_E_FAILURE;
3081 }
3082
3083 status = wma_get_hw_mode_from_idx(new_hw_index, hw_mode);
3084 if (CDF_STATUS_SUCCESS != status) {
3085 WMA_LOGE("%s: Failed to get HW mode index", __func__);
3086 return CDF_STATUS_E_FAILURE;
3087 }
3088 return CDF_STATUS_SUCCESS;
3089}
3090
3091/**
3092 * wma_is_dbs_enable() - Check if master DBS control is enabled
3093 *
3094 * Checks if the master DBS control is enabled. This will be used
3095 * to override any other DBS capability
3096 *
3097 * Return: True if master DBS control is enabled
3098 */
3099bool wma_is_dbs_enable(void)
3100{
3101 tp_wma_handle wma;
3102
3103 if (wma_is_dual_mac_disabled_in_ini())
3104 return false;
3105
3106 wma = cds_get_context(CDF_MODULE_ID_WMA);
3107 if (!wma) {
3108 WMA_LOGE("%s: Invalid WMA handle", __func__);
3109 return false;
3110 }
3111
3112 WMA_LOGD("%s: DBS=%d", __func__,
3113 WMI_DBS_FW_MODE_CFG_DBS_GET(wma->dual_mac_cfg.cur_fw_mode_config));
3114
3115 if (WMI_DBS_FW_MODE_CFG_DBS_GET(wma->dual_mac_cfg.cur_fw_mode_config))
3116 return true;
3117
3118 return false;
3119}
3120
3121/**
3122 * wma_is_agile_dfs_enable() - Check if master Agile DFS control is enabled
3123 *
3124 * Checks if the master Agile DFS control is enabled. This will be used
3125 * to override any other Agile DFS capability
3126 *
3127 * Return: True if master Agile DFS control is enabled
3128 */
3129bool wma_is_agile_dfs_enable(void)
3130{
3131 tp_wma_handle wma;
3132
3133 if (wma_is_dual_mac_disabled_in_ini())
3134 return false;
3135
3136 wma = cds_get_context(CDF_MODULE_ID_WMA);
3137 if (!wma) {
3138 WMA_LOGE("%s: Invalid WMA handle", __func__);
3139 return false;
3140 }
3141
3142 WMA_LOGD("%s: DFS=%d Single mac with DFS=%d", __func__,
3143 WMI_DBS_FW_MODE_CFG_AGILE_DFS_GET(
3144 wma->dual_mac_cfg.cur_fw_mode_config),
3145 WMI_DBS_CONC_SCAN_CFG_AGILE_DFS_SCAN_GET(
3146 wma->dual_mac_cfg.cur_scan_config));
3147
3148 if ((WMI_DBS_FW_MODE_CFG_AGILE_DFS_GET(
3149 wma->dual_mac_cfg.cur_fw_mode_config)) &&
3150 (WMI_DBS_CONC_SCAN_CFG_AGILE_DFS_SCAN_GET(
3151 wma->dual_mac_cfg.cur_scan_config)))
3152 return true;
3153
3154 return false;
3155}
3156
3157/**
3158 * wma_get_updated_scan_config() - Get the updated scan configuration
3159 * @scan_config: Pointer containing the updated scan config
3160 * @dbs_scan: 0 or 1 indicating if DBS scan needs to be enabled/disabled
3161 * @dbs_plus_agile_scan: 0 or 1 indicating if DBS plus agile scan needs to be
3162 * enabled/disabled
3163 * @single_mac_scan_with_dfs: 0 or 1 indicating if single MAC scan with DFS
3164 * needs to be enabled/disabled
3165 *
3166 * Takes the current scan configuration and set the necessary scan config
3167 * bits to either 0/1 and provides the updated value to the caller who
3168 * can use this to pass it on to the FW
3169 *
3170 * Return: 0 on success
3171 */
3172CDF_STATUS wma_get_updated_scan_config(uint32_t *scan_config,
3173 bool dbs_scan,
3174 bool dbs_plus_agile_scan,
3175 bool single_mac_scan_with_dfs)
3176{
3177 tp_wma_handle wma;
3178
3179 wma = cds_get_context(CDF_MODULE_ID_WMA);
3180 if (!wma) {
3181 WMA_LOGE("%s: Invalid WMA handle", __func__);
3182 return CDF_STATUS_E_FAILURE;
3183 }
3184 *scan_config = wma->dual_mac_cfg.cur_scan_config;
3185
3186 WMI_DBS_CONC_SCAN_CFG_DBS_SCAN_SET(*scan_config, dbs_scan);
3187 WMI_DBS_CONC_SCAN_CFG_AGILE_SCAN_SET(*scan_config,
3188 dbs_plus_agile_scan);
3189 WMI_DBS_CONC_SCAN_CFG_AGILE_DFS_SCAN_SET(*scan_config,
3190 single_mac_scan_with_dfs);
3191
3192 WMA_LOGD("%s: *scan_config:%x ", __func__, *scan_config);
3193 return CDF_STATUS_SUCCESS;
3194}
3195
3196/**
3197 * wma_get_updated_fw_mode_config() - Get the updated fw mode configuration
3198 * @fw_mode_config: Pointer containing the updated fw mode config
3199 * @dbs: 0 or 1 indicating if DBS needs to be enabled/disabled
3200 * @agile_dfs: 0 or 1 indicating if agile DFS needs to be enabled/disabled
3201 *
3202 * Takes the current fw mode configuration and set the necessary fw mode config
3203 * bits to either 0/1 and provides the updated value to the caller who
3204 * can use this to pass it on to the FW
3205 *
3206 * Return: 0 on success
3207 */
3208CDF_STATUS wma_get_updated_fw_mode_config(uint32_t *fw_mode_config,
3209 bool dbs,
3210 bool agile_dfs)
3211{
3212 tp_wma_handle wma;
3213
3214 wma = cds_get_context(CDF_MODULE_ID_WMA);
3215 if (!wma) {
3216 WMA_LOGE("%s: Invalid WMA handle", __func__);
3217 return CDF_STATUS_E_FAILURE;
3218 }
3219 *fw_mode_config = wma->dual_mac_cfg.cur_fw_mode_config;
3220
3221 WMI_DBS_FW_MODE_CFG_DBS_SET(*fw_mode_config, dbs);
3222 WMI_DBS_FW_MODE_CFG_AGILE_DFS_SET(*fw_mode_config, agile_dfs);
3223
3224 WMA_LOGD("%s: *fw_mode_config:%x ", __func__, *fw_mode_config);
3225 return CDF_STATUS_SUCCESS;
3226}
3227
3228/**
3229 * wma_get_dbs_config() - Get DBS bit
3230 *
3231 * Gets the DBS bit of fw_mode_config_bits
3232 *
3233 * Return: 0 or 1 to indicate the DBS bit
3234 */
3235bool wma_get_dbs_config(void)
3236{
3237 tp_wma_handle wma;
3238 uint32_t fw_mode_config;
3239
3240 if (wma_is_dual_mac_disabled_in_ini())
3241 return false;
3242
3243 wma = cds_get_context(CDF_MODULE_ID_WMA);
3244 if (!wma) {
3245 WMA_LOGE("%s: Invalid WMA handle", __func__);
3246 /* We take that it is disabled and proceed */
3247 return false;
3248 }
3249 fw_mode_config = wma->dual_mac_cfg.cur_fw_mode_config;
3250
3251 return WMI_DBS_FW_MODE_CFG_DBS_GET(fw_mode_config);
3252}
3253
3254/**
3255 * wma_get_agile_dfs_config() - Get Agile DFS bit
3256 *
3257 * Gets the Agile DFS bit of fw_mode_config_bits
3258 *
3259 * Return: 0 or 1 to indicate the Agile DFS bit
3260 */
3261bool wma_get_agile_dfs_config(void)
3262{
3263 tp_wma_handle wma;
3264 uint32_t fw_mode_config;
3265
3266 if (wma_is_dual_mac_disabled_in_ini())
3267 return false;
3268
3269 wma = cds_get_context(CDF_MODULE_ID_WMA);
3270 if (!wma) {
3271 WMA_LOGE("%s: Invalid WMA handle", __func__);
3272 /* We take that it is disabled and proceed */
3273 return false;
3274 }
3275 fw_mode_config = wma->dual_mac_cfg.cur_fw_mode_config;
3276
3277 return WMI_DBS_FW_MODE_CFG_AGILE_DFS_GET(fw_mode_config);
3278}
3279
3280/**
3281 * wma_get_dbs_scan_config() - Get DBS scan bit
3282 *
3283 * Gets the DBS scan bit of concurrent_scan_config_bits
3284 *
3285 * Return: 0 or 1 to indicate the DBS scan bit
3286 */
3287bool wma_get_dbs_scan_config(void)
3288{
3289 tp_wma_handle wma;
3290 uint32_t scan_config;
3291
3292 if (wma_is_dual_mac_disabled_in_ini())
3293 return false;
3294
3295 wma = cds_get_context(CDF_MODULE_ID_WMA);
3296 if (!wma) {
3297 WMA_LOGE("%s: Invalid WMA handle", __func__);
3298 /* We take that it is disabled and proceed */
3299 return false;
3300 }
3301 scan_config = wma->dual_mac_cfg.cur_scan_config;
3302
3303 return WMI_DBS_CONC_SCAN_CFG_DBS_SCAN_GET(scan_config);
3304}
3305
3306/**
3307 * wma_get_dbs_plus_agile_scan_config() - Get DBS plus agile scan bit
3308 *
3309 * Gets the DBS plus agile scan bit of concurrent_scan_config_bits
3310 *
3311 * Return: 0 or 1 to indicate the DBS plus agile scan bit
3312 */
3313bool wma_get_dbs_plus_agile_scan_config(void)
3314{
3315 tp_wma_handle wma;
3316 uint32_t scan_config;
3317
3318 if (wma_is_dual_mac_disabled_in_ini())
3319 return false;
3320
3321 wma = cds_get_context(CDF_MODULE_ID_WMA);
3322 if (!wma) {
3323 WMA_LOGE("%s: Invalid WMA handle", __func__);
3324 /* We take that it is disabled and proceed */
3325 return false;
3326 }
3327 scan_config = wma->dual_mac_cfg.cur_scan_config;
3328
3329 return WMI_DBS_CONC_SCAN_CFG_AGILE_SCAN_GET(scan_config);
3330}
3331
3332/**
3333 * wma_get_single_mac_scan_with_dfs_config() - Get Single MAC scan with DFS bit
3334 *
3335 * Gets the Single MAC scan with DFS bit of concurrent_scan_config_bits
3336 *
3337 * Return: 0 or 1 to indicate the Single MAC scan with DFS bit
3338 */
3339bool wma_get_single_mac_scan_with_dfs_config(void)
3340{
3341 tp_wma_handle wma;
3342 uint32_t scan_config;
3343
3344 if (wma_is_dual_mac_disabled_in_ini())
3345 return false;
3346
3347 wma = cds_get_context(CDF_MODULE_ID_WMA);
3348 if (!wma) {
3349 WMA_LOGE("%s: Invalid WMA handle", __func__);
3350 /* We take that it is disabled and proceed */
3351 return false;
3352 }
3353 scan_config = wma->dual_mac_cfg.cur_scan_config;
3354
3355 return WMI_DBS_CONC_SCAN_CFG_AGILE_DFS_SCAN_GET(scan_config);
3356}
3357
3358/**
3359 * wma_is_dual_mac_disabled_in_ini() - Check if dual mac is disabled in INI
3360 *
3361 * Checks if the dual mac feature is disabled in INI
3362 *
3363 * Return: true if the dual mac feature is disabled from INI
3364 */
3365bool wma_is_dual_mac_disabled_in_ini(void)
3366{
3367 tpAniSirGlobal mac = cds_get_context(CDF_MODULE_ID_PE);
3368
3369 if (!mac) {
3370 WMA_LOGE("%s: Invalid mac pointer", __func__);
3371 return true;
3372 }
3373
3374 if (mac->dual_mac_feature_disable)
3375 return true;
3376
3377 return false;
3378}
3379
3380/**
3381 * wma_get_prev_dbs_config() - Get prev DBS bit
3382 *
3383 * Gets the previous DBS bit of fw_mode_config_bits
3384 *
3385 * Return: 0 or 1 to indicate the DBS bit
3386 */
3387bool wma_get_prev_dbs_config(void)
3388{
3389 tp_wma_handle wma;
3390 uint32_t fw_mode_config;
3391
3392 if (wma_is_dual_mac_disabled_in_ini())
3393 return false;
3394
3395 wma = cds_get_context(CDF_MODULE_ID_WMA);
3396 if (!wma) {
3397 WMA_LOGE("%s: Invalid WMA handle", __func__);
3398 /* We take that it is disabled and proceed */
3399 return false;
3400 }
3401 fw_mode_config = wma->dual_mac_cfg.prev_fw_mode_config;
3402
3403 return WMI_DBS_FW_MODE_CFG_DBS_GET(fw_mode_config);
3404}
3405
3406/**
3407 * wma_get_prev_agile_dfs_config() - Get prev Agile DFS bit
3408 *
3409 * Gets the previous Agile DFS bit of fw_mode_config_bits
3410 *
3411 * Return: 0 or 1 to indicate the Agile DFS bit
3412 */
3413bool wma_get_prev_agile_dfs_config(void)
3414{
3415 tp_wma_handle wma;
3416 uint32_t fw_mode_config;
3417
3418 if (wma_is_dual_mac_disabled_in_ini())
3419 return false;
3420
3421 wma = cds_get_context(CDF_MODULE_ID_WMA);
3422 if (!wma) {
3423 WMA_LOGE("%s: Invalid WMA handle", __func__);
3424 /* We take that it is disabled and proceed */
3425 return false;
3426 }
3427 fw_mode_config = wma->dual_mac_cfg.prev_fw_mode_config;
3428
3429 return WMI_DBS_FW_MODE_CFG_AGILE_DFS_GET(fw_mode_config);
3430}
3431
3432/**
3433 * wma_get_prev_dbs_scan_config() - Get prev DBS scan bit
3434 *
3435 * Gets the previous DBS scan bit of concurrent_scan_config_bits
3436 *
3437 * Return: 0 or 1 to indicate the DBS scan bit
3438 */
3439bool wma_get_prev_dbs_scan_config(void)
3440{
3441 tp_wma_handle wma;
3442 uint32_t scan_config;
3443
3444 if (wma_is_dual_mac_disabled_in_ini())
3445 return false;
3446
3447 wma = cds_get_context(CDF_MODULE_ID_WMA);
3448 if (!wma) {
3449 WMA_LOGE("%s: Invalid WMA handle", __func__);
3450 /* We take that it is disabled and proceed */
3451 return false;
3452 }
3453 scan_config = wma->dual_mac_cfg.prev_scan_config;
3454
3455 return WMI_DBS_CONC_SCAN_CFG_DBS_SCAN_GET(scan_config);
3456}
3457
3458/**
3459 * wma_get_prev_dbs_plus_agile_scan_config() - Get prev DBS plus agile scan bit
3460 *
3461 * Gets the previous DBS plus agile scan bit of concurrent_scan_config_bits
3462 *
3463 * Return: 0 or 1 to indicate the DBS plus agile scan bit
3464 */
3465bool wma_get_prev_dbs_plus_agile_scan_config(void)
3466{
3467 tp_wma_handle wma;
3468 uint32_t scan_config;
3469
3470 if (wma_is_dual_mac_disabled_in_ini())
3471 return false;
3472
3473 wma = cds_get_context(CDF_MODULE_ID_WMA);
3474 if (!wma) {
3475 WMA_LOGE("%s: Invalid WMA handle", __func__);
3476 /* We take that it is disabled and proceed */
3477 return false;
3478 }
3479 scan_config = wma->dual_mac_cfg.prev_scan_config;
3480
3481 return WMI_DBS_CONC_SCAN_CFG_AGILE_SCAN_GET(scan_config);
3482}
3483
3484/**
3485 * wma_get_prev_single_mac_scan_with_dfs_config() - Get prev Single MAC scan
3486 * with DFS bit
3487 *
3488 * Gets the previous Single MAC scan with DFS bit of concurrent_scan_config_bits
3489 *
3490 * Return: 0 or 1 to indicate the Single MAC scan with DFS bit
3491 */
3492bool wma_get_prev_single_mac_scan_with_dfs_config(void)
3493{
3494 tp_wma_handle wma;
3495 uint32_t scan_config;
3496
3497 if (wma_is_dual_mac_disabled_in_ini())
3498 return false;
3499
3500 wma = cds_get_context(CDF_MODULE_ID_WMA);
3501 if (!wma) {
3502 WMA_LOGE("%s: Invalid WMA handle", __func__);
3503 /* We take that it is disabled and proceed */
3504 return false;
3505 }
3506 scan_config = wma->dual_mac_cfg.prev_scan_config;
3507
3508 return WMI_DBS_CONC_SCAN_CFG_AGILE_DFS_SCAN_GET(scan_config);
3509}
3510
3511/**
3512 * wma_is_scan_simultaneous_capable() - Check if scan parallelization is
3513 * supported or not
3514 *
3515 * currently scan parallelization feature support is dependent on DBS but
3516 * it can be independent in future.
3517 *
3518 * Return: True if master DBS control is enabled
3519 */
3520bool wma_is_scan_simultaneous_capable(void)
3521{
3522 if (wma_is_hw_dbs_capable())
3523 return true;
3524
3525 return false;
3526}