Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1 | /* |
Himanshu Agarwal | c733bd3 | 2017-11-18 18:35:42 +0530 | [diff] [blame] | 2 | * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved. |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
| 28 | /** |
| 29 | * DOC: wma_data.c |
| 30 | * This file contains tx/rx and data path related functions. |
| 31 | */ |
| 32 | |
| 33 | /* Header files */ |
| 34 | |
| 35 | #include "wma.h" |
| 36 | #include "wma_api.h" |
| 37 | #include "cds_api.h" |
| 38 | #include "wmi_unified_api.h" |
| 39 | #include "wlan_qct_sys.h" |
| 40 | #include "wni_api.h" |
| 41 | #include "ani_global.h" |
| 42 | #include "wmi_unified.h" |
| 43 | #include "wni_cfg.h" |
| 44 | #include "cfg_api.h" |
Manjunathappa Prakash | 3454fd6 | 2016-04-01 08:52:06 -0700 | [diff] [blame] | 45 | #include <cdp_txrx_tx_throttle.h> |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 46 | #if defined(CONFIG_HL_SUPPORT) |
| 47 | #include "wlan_tgt_def_config_hl.h" |
| 48 | #else |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 49 | #include "wlan_tgt_def_config.h" |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 50 | #endif |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 51 | #include "qdf_nbuf.h" |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 52 | #include "qdf_types.h" |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 53 | #include "qdf_mem.h" |
Nirav Shah | eb017be | 2018-02-15 11:20:58 +0530 | [diff] [blame^] | 54 | #include "qdf_util.h" |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 55 | |
| 56 | #include "wma_types.h" |
| 57 | #include "lim_api.h" |
| 58 | #include "lim_session_utils.h" |
| 59 | |
| 60 | #include "cds_utils.h" |
| 61 | |
| 62 | #if !defined(REMOVE_PKT_LOG) |
| 63 | #include "pktlog_ac.h" |
| 64 | #endif /* REMOVE_PKT_LOG */ |
| 65 | |
| 66 | #include "dbglog_host.h" |
| 67 | #include "csr_api.h" |
| 68 | #include "ol_fw.h" |
| 69 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 70 | #include "wma_internal.h" |
Dhanashri Atre | b08959a | 2016-03-01 17:28:03 -0800 | [diff] [blame] | 71 | #include "cdp_txrx_flow_ctrl_legacy.h" |
| 72 | #include "cdp_txrx_cmn.h" |
| 73 | #include "cdp_txrx_misc.h" |
Manjunathappa Prakash | 10d357a | 2016-03-31 19:20:49 -0700 | [diff] [blame] | 74 | #include <cdp_txrx_peer_ops.h> |
| 75 | #include <cdp_txrx_cfg.h> |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 76 | #include "cdp_txrx_stats.h" |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 77 | #include <cdp_txrx_misc.h> |
| 78 | #include "enet.h" |
Himanshu Agarwal | 2fdf77a | 2016-12-29 11:41:00 +0530 | [diff] [blame] | 79 | #include "wlan_mgmt_txrx_utils_api.h" |
| 80 | #include "wlan_objmgr_psoc_obj.h" |
| 81 | #include "wlan_objmgr_pdev_obj.h" |
| 82 | #include "wlan_objmgr_vdev_obj.h" |
| 83 | #include "wlan_objmgr_peer_obj.h" |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 84 | #include <cdp_txrx_handle.h> |
Tushnim Bhattacharyya | 45ed04f | 2017-03-15 10:15:05 -0700 | [diff] [blame] | 85 | #include <wlan_pmo_ucfg_api.h> |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 86 | #include "wlan_lmac_if_api.h" |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 87 | |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 88 | struct wma_search_rate { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 89 | int32_t rate; |
| 90 | uint8_t flag; |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 91 | }; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 92 | |
| 93 | #define WMA_MAX_OFDM_CCK_RATE_TBL_SIZE 12 |
| 94 | /* In ofdm_cck_rate_tbl->flag, if bit 7 is 1 it's CCK, otherwise it ofdm. |
| 95 | * Lower bit carries the ofdm/cck index for encoding the rate |
| 96 | */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 97 | static struct wma_search_rate ofdm_cck_rate_tbl[WMA_MAX_OFDM_CCK_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 98 | {540, 4}, /* 4: OFDM 54 Mbps */ |
| 99 | {480, 0}, /* 0: OFDM 48 Mbps */ |
| 100 | {360, 5}, /* 5: OFDM 36 Mbps */ |
| 101 | {240, 1}, /* 1: OFDM 24 Mbps */ |
| 102 | {180, 6}, /* 6: OFDM 18 Mbps */ |
| 103 | {120, 2}, /* 2: OFDM 12 Mbps */ |
| 104 | {110, (1 << 7)}, /* 0: CCK 11 Mbps Long */ |
| 105 | {90, 7}, /* 7: OFDM 9 Mbps */ |
| 106 | {60, 3}, /* 3: OFDM 6 Mbps */ |
| 107 | {55, ((1 << 7) | 1)}, /* 1: CCK 5.5 Mbps Long */ |
| 108 | {20, ((1 << 7) | 2)}, /* 2: CCK 2 Mbps Long */ |
| 109 | {10, ((1 << 7) | 3)} /* 3: CCK 1 Mbps Long */ |
| 110 | }; |
| 111 | |
| 112 | #define WMA_MAX_VHT20_RATE_TBL_SIZE 9 |
| 113 | /* In vht20_400ns_rate_tbl flag carries the mcs index for encoding the rate */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 114 | static struct wma_search_rate vht20_400ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 115 | {867, 8}, /* MCS8 1SS short GI */ |
| 116 | {722, 7}, /* MCS7 1SS short GI */ |
| 117 | {650, 6}, /* MCS6 1SS short GI */ |
| 118 | {578, 5}, /* MCS5 1SS short GI */ |
| 119 | {433, 4}, /* MCS4 1SS short GI */ |
| 120 | {289, 3}, /* MCS3 1SS short GI */ |
| 121 | {217, 2}, /* MCS2 1SS short GI */ |
| 122 | {144, 1}, /* MCS1 1SS short GI */ |
| 123 | {72, 0} /* MCS0 1SS short GI */ |
| 124 | }; |
| 125 | |
| 126 | /* In vht20_800ns_rate_tbl flag carries the mcs index for encoding the rate */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 127 | static struct wma_search_rate vht20_800ns_rate_tbl[WMA_MAX_VHT20_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 128 | {780, 8}, /* MCS8 1SS long GI */ |
| 129 | {650, 7}, /* MCS7 1SS long GI */ |
| 130 | {585, 6}, /* MCS6 1SS long GI */ |
| 131 | {520, 5}, /* MCS5 1SS long GI */ |
| 132 | {390, 4}, /* MCS4 1SS long GI */ |
| 133 | {260, 3}, /* MCS3 1SS long GI */ |
| 134 | {195, 2}, /* MCS2 1SS long GI */ |
| 135 | {130, 1}, /* MCS1 1SS long GI */ |
| 136 | {65, 0} /* MCS0 1SS long GI */ |
| 137 | }; |
| 138 | |
| 139 | #define WMA_MAX_VHT40_RATE_TBL_SIZE 10 |
| 140 | /* In vht40_400ns_rate_tbl flag carries the mcs index for encoding the rate */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 141 | static struct wma_search_rate vht40_400ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 142 | {2000, 9}, /* MCS9 1SS short GI */ |
| 143 | {1800, 8}, /* MCS8 1SS short GI */ |
| 144 | {1500, 7}, /* MCS7 1SS short GI */ |
| 145 | {1350, 6}, /* MCS6 1SS short GI */ |
| 146 | {1200, 5}, /* MCS5 1SS short GI */ |
| 147 | {900, 4}, /* MCS4 1SS short GI */ |
| 148 | {600, 3}, /* MCS3 1SS short GI */ |
| 149 | {450, 2}, /* MCS2 1SS short GI */ |
| 150 | {300, 1}, /* MCS1 1SS short GI */ |
| 151 | {150, 0}, /* MCS0 1SS short GI */ |
| 152 | }; |
| 153 | |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 154 | static struct wma_search_rate vht40_800ns_rate_tbl[WMA_MAX_VHT40_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 155 | {1800, 9}, /* MCS9 1SS long GI */ |
| 156 | {1620, 8}, /* MCS8 1SS long GI */ |
| 157 | {1350, 7}, /* MCS7 1SS long GI */ |
| 158 | {1215, 6}, /* MCS6 1SS long GI */ |
| 159 | {1080, 5}, /* MCS5 1SS long GI */ |
| 160 | {810, 4}, /* MCS4 1SS long GI */ |
| 161 | {540, 3}, /* MCS3 1SS long GI */ |
| 162 | {405, 2}, /* MCS2 1SS long GI */ |
| 163 | {270, 1}, /* MCS1 1SS long GI */ |
| 164 | {135, 0} /* MCS0 1SS long GI */ |
| 165 | }; |
| 166 | |
| 167 | #define WMA_MAX_VHT80_RATE_TBL_SIZE 10 |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 168 | static struct wma_search_rate vht80_400ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 169 | {4333, 9}, /* MCS9 1SS short GI */ |
| 170 | {3900, 8}, /* MCS8 1SS short GI */ |
| 171 | {3250, 7}, /* MCS7 1SS short GI */ |
| 172 | {2925, 6}, /* MCS6 1SS short GI */ |
| 173 | {2600, 5}, /* MCS5 1SS short GI */ |
| 174 | {1950, 4}, /* MCS4 1SS short GI */ |
| 175 | {1300, 3}, /* MCS3 1SS short GI */ |
| 176 | {975, 2}, /* MCS2 1SS short GI */ |
| 177 | {650, 1}, /* MCS1 1SS short GI */ |
| 178 | {325, 0} /* MCS0 1SS short GI */ |
| 179 | }; |
| 180 | |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 181 | static struct wma_search_rate vht80_800ns_rate_tbl[WMA_MAX_VHT80_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 182 | {3900, 9}, /* MCS9 1SS long GI */ |
| 183 | {3510, 8}, /* MCS8 1SS long GI */ |
| 184 | {2925, 7}, /* MCS7 1SS long GI */ |
| 185 | {2633, 6}, /* MCS6 1SS long GI */ |
| 186 | {2340, 5}, /* MCS5 1SS long GI */ |
| 187 | {1755, 4}, /* MCS4 1SS long GI */ |
| 188 | {1170, 3}, /* MCS3 1SS long GI */ |
| 189 | {878, 2}, /* MCS2 1SS long GI */ |
| 190 | {585, 1}, /* MCS1 1SS long GI */ |
| 191 | {293, 0} /* MCS0 1SS long GI */ |
| 192 | }; |
| 193 | |
| 194 | #define WMA_MAX_HT20_RATE_TBL_SIZE 8 |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 195 | static struct wma_search_rate ht20_400ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 196 | {722, 7}, /* MCS7 1SS short GI */ |
| 197 | {650, 6}, /* MCS6 1SS short GI */ |
| 198 | {578, 5}, /* MCS5 1SS short GI */ |
| 199 | {433, 4}, /* MCS4 1SS short GI */ |
| 200 | {289, 3}, /* MCS3 1SS short GI */ |
| 201 | {217, 2}, /* MCS2 1SS short GI */ |
| 202 | {144, 1}, /* MCS1 1SS short GI */ |
| 203 | {72, 0} /* MCS0 1SS short GI */ |
| 204 | }; |
| 205 | |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 206 | static struct wma_search_rate ht20_800ns_rate_tbl[WMA_MAX_HT20_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 207 | {650, 7}, /* MCS7 1SS long GI */ |
| 208 | {585, 6}, /* MCS6 1SS long GI */ |
| 209 | {520, 5}, /* MCS5 1SS long GI */ |
| 210 | {390, 4}, /* MCS4 1SS long GI */ |
| 211 | {260, 3}, /* MCS3 1SS long GI */ |
| 212 | {195, 2}, /* MCS2 1SS long GI */ |
| 213 | {130, 1}, /* MCS1 1SS long GI */ |
| 214 | {65, 0} /* MCS0 1SS long GI */ |
| 215 | }; |
| 216 | |
| 217 | #define WMA_MAX_HT40_RATE_TBL_SIZE 8 |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 218 | static struct wma_search_rate ht40_400ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 219 | {1500, 7}, /* MCS7 1SS short GI */ |
| 220 | {1350, 6}, /* MCS6 1SS short GI */ |
| 221 | {1200, 5}, /* MCS5 1SS short GI */ |
| 222 | {900, 4}, /* MCS4 1SS short GI */ |
| 223 | {600, 3}, /* MCS3 1SS short GI */ |
| 224 | {450, 2}, /* MCS2 1SS short GI */ |
| 225 | {300, 1}, /* MCS1 1SS short GI */ |
| 226 | {150, 0} /* MCS0 1SS short GI */ |
| 227 | }; |
| 228 | |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 229 | static struct wma_search_rate ht40_800ns_rate_tbl[WMA_MAX_HT40_RATE_TBL_SIZE] = { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 230 | {1350, 7}, /* MCS7 1SS long GI */ |
| 231 | {1215, 6}, /* MCS6 1SS long GI */ |
| 232 | {1080, 5}, /* MCS5 1SS long GI */ |
| 233 | {810, 4}, /* MCS4 1SS long GI */ |
| 234 | {540, 3}, /* MCS3 1SS long GI */ |
| 235 | {405, 2}, /* MCS2 1SS long GI */ |
| 236 | {270, 1}, /* MCS1 1SS long GI */ |
| 237 | {135, 0} /* MCS0 1SS long GI */ |
| 238 | }; |
| 239 | |
| 240 | /** |
| 241 | * wma_bin_search_rate() - binary search function to find rate |
| 242 | * @tbl: rate table |
| 243 | * @tbl_size: table size |
| 244 | * @mbpsx10_rate: return mbps rate |
| 245 | * @ret_flag: return flag |
| 246 | * |
| 247 | * Return: none |
| 248 | */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 249 | static void wma_bin_search_rate(struct wma_search_rate *tbl, int32_t tbl_size, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 250 | int32_t *mbpsx10_rate, uint8_t *ret_flag) |
| 251 | { |
| 252 | int32_t upper, lower, mid; |
| 253 | |
| 254 | /* the table is descenting. index holds the largest value and the |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 255 | * bottom index holds the smallest value |
| 256 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 257 | |
| 258 | upper = 0; /* index 0 */ |
| 259 | lower = tbl_size - 1; /* last index */ |
| 260 | |
| 261 | if (*mbpsx10_rate >= tbl[upper].rate) { |
| 262 | /* use the largest rate */ |
| 263 | *mbpsx10_rate = tbl[upper].rate; |
| 264 | *ret_flag = tbl[upper].flag; |
| 265 | return; |
| 266 | } else if (*mbpsx10_rate <= tbl[lower].rate) { |
| 267 | /* use the smallest rate */ |
| 268 | *mbpsx10_rate = tbl[lower].rate; |
| 269 | *ret_flag = tbl[lower].flag; |
| 270 | return; |
| 271 | } |
| 272 | /* now we do binery search to get the floor value */ |
| 273 | while (lower - upper > 1) { |
| 274 | mid = (upper + lower) >> 1; |
| 275 | if (*mbpsx10_rate == tbl[mid].rate) { |
| 276 | /* found the exact match */ |
| 277 | *mbpsx10_rate = tbl[mid].rate; |
| 278 | *ret_flag = tbl[mid].flag; |
| 279 | return; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 280 | } |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 281 | /* not found. if mid's rate is larger than input move |
| 282 | * upper to mid. If mid's rate is larger than input |
| 283 | * move lower to mid. |
| 284 | */ |
| 285 | if (*mbpsx10_rate > tbl[mid].rate) |
| 286 | lower = mid; |
| 287 | else |
| 288 | upper = mid; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 289 | } |
| 290 | /* after the bin search the index is the ceiling of rate */ |
| 291 | *mbpsx10_rate = tbl[upper].rate; |
| 292 | *ret_flag = tbl[upper].flag; |
| 293 | return; |
| 294 | } |
| 295 | |
| 296 | /** |
| 297 | * wma_fill_ofdm_cck_mcast_rate() - fill ofdm cck mcast rate |
| 298 | * @mbpsx10_rate: mbps rates |
| 299 | * @nss: nss |
| 300 | * @rate: rate |
| 301 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 302 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 303 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 304 | static QDF_STATUS wma_fill_ofdm_cck_mcast_rate(int32_t mbpsx10_rate, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 305 | uint8_t nss, uint8_t *rate) |
| 306 | { |
| 307 | uint8_t idx = 0; |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 308 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 309 | wma_bin_search_rate(ofdm_cck_rate_tbl, WMA_MAX_OFDM_CCK_RATE_TBL_SIZE, |
| 310 | &mbpsx10_rate, &idx); |
| 311 | |
| 312 | /* if bit 7 is set it uses CCK */ |
| 313 | if (idx & 0x80) |
| 314 | *rate |= (1 << 6) | (idx & 0xF); /* set bit 6 to 1 for CCK */ |
| 315 | else |
| 316 | *rate |= (idx & 0xF); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 317 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | /** |
| 321 | * wma_set_ht_vht_mcast_rate() - set ht/vht mcast rate |
| 322 | * @shortgi: short gaurd interval |
| 323 | * @mbpsx10_rate: mbps rates |
| 324 | * @sgi_idx: shortgi index |
| 325 | * @sgi_rate: shortgi rate |
| 326 | * @lgi_idx: longgi index |
| 327 | * @lgi_rate: longgi rate |
| 328 | * @premable: preamble |
| 329 | * @rate: rate |
| 330 | * @streaming_rate: streaming rate |
| 331 | * |
| 332 | * Return: none |
| 333 | */ |
| 334 | static void wma_set_ht_vht_mcast_rate(uint32_t shortgi, int32_t mbpsx10_rate, |
| 335 | uint8_t sgi_idx, int32_t sgi_rate, |
| 336 | uint8_t lgi_idx, int32_t lgi_rate, |
| 337 | uint8_t premable, uint8_t *rate, |
| 338 | int32_t *streaming_rate) |
| 339 | { |
| 340 | if (shortgi == 0) { |
| 341 | *rate |= (premable << 6) | (lgi_idx & 0xF); |
| 342 | *streaming_rate = lgi_rate; |
| 343 | } else { |
| 344 | *rate |= (premable << 6) | (sgi_idx & 0xF); |
| 345 | *streaming_rate = sgi_rate; |
| 346 | } |
| 347 | } |
| 348 | |
| 349 | /** |
| 350 | * wma_fill_ht20_mcast_rate() - fill ht20 mcast rate |
| 351 | * @shortgi: short gaurd interval |
| 352 | * @mbpsx10_rate: mbps rates |
| 353 | * @nss: nss |
| 354 | * @rate: rate |
| 355 | * @streaming_rate: streaming rate |
| 356 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 357 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 358 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 359 | static QDF_STATUS wma_fill_ht20_mcast_rate(uint32_t shortgi, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 360 | int32_t mbpsx10_rate, uint8_t nss, |
| 361 | uint8_t *rate, |
| 362 | int32_t *streaming_rate) |
| 363 | { |
| 364 | uint8_t sgi_idx = 0, lgi_idx = 0; |
| 365 | int32_t sgi_rate, lgi_rate; |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 366 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 367 | if (nss == 1) |
| 368 | mbpsx10_rate = mbpsx10_rate >> 1; |
| 369 | |
| 370 | sgi_rate = mbpsx10_rate; |
| 371 | lgi_rate = mbpsx10_rate; |
| 372 | if (shortgi) |
| 373 | wma_bin_search_rate(ht20_400ns_rate_tbl, |
| 374 | WMA_MAX_HT20_RATE_TBL_SIZE, &sgi_rate, |
| 375 | &sgi_idx); |
| 376 | else |
| 377 | wma_bin_search_rate(ht20_800ns_rate_tbl, |
| 378 | WMA_MAX_HT20_RATE_TBL_SIZE, &lgi_rate, |
| 379 | &lgi_idx); |
| 380 | |
| 381 | wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate, |
| 382 | lgi_idx, lgi_rate, 2, rate, streaming_rate); |
| 383 | if (nss == 1) |
| 384 | *streaming_rate = *streaming_rate << 1; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 385 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 386 | } |
| 387 | |
| 388 | /** |
| 389 | * wma_fill_ht40_mcast_rate() - fill ht40 mcast rate |
| 390 | * @shortgi: short gaurd interval |
| 391 | * @mbpsx10_rate: mbps rates |
| 392 | * @nss: nss |
| 393 | * @rate: rate |
| 394 | * @streaming_rate: streaming rate |
| 395 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 396 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 397 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 398 | static QDF_STATUS wma_fill_ht40_mcast_rate(uint32_t shortgi, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 399 | int32_t mbpsx10_rate, uint8_t nss, |
| 400 | uint8_t *rate, |
| 401 | int32_t *streaming_rate) |
| 402 | { |
| 403 | uint8_t sgi_idx = 0, lgi_idx = 0; |
| 404 | int32_t sgi_rate, lgi_rate; |
| 405 | |
| 406 | /* for 2x2 divide the rate by 2 */ |
| 407 | if (nss == 1) |
| 408 | mbpsx10_rate = mbpsx10_rate >> 1; |
| 409 | |
| 410 | sgi_rate = mbpsx10_rate; |
| 411 | lgi_rate = mbpsx10_rate; |
| 412 | if (shortgi) |
| 413 | wma_bin_search_rate(ht40_400ns_rate_tbl, |
| 414 | WMA_MAX_HT40_RATE_TBL_SIZE, &sgi_rate, |
| 415 | &sgi_idx); |
| 416 | else |
| 417 | wma_bin_search_rate(ht40_800ns_rate_tbl, |
| 418 | WMA_MAX_HT40_RATE_TBL_SIZE, &lgi_rate, |
| 419 | &lgi_idx); |
| 420 | |
| 421 | wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate, |
| 422 | lgi_idx, lgi_rate, 2, rate, streaming_rate); |
| 423 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 424 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 425 | } |
| 426 | |
| 427 | /** |
| 428 | * wma_fill_vht20_mcast_rate() - fill vht20 mcast rate |
| 429 | * @shortgi: short gaurd interval |
| 430 | * @mbpsx10_rate: mbps rates |
| 431 | * @nss: nss |
| 432 | * @rate: rate |
| 433 | * @streaming_rate: streaming rate |
| 434 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 435 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 436 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 437 | static QDF_STATUS wma_fill_vht20_mcast_rate(uint32_t shortgi, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 438 | int32_t mbpsx10_rate, uint8_t nss, |
| 439 | uint8_t *rate, |
| 440 | int32_t *streaming_rate) |
| 441 | { |
| 442 | uint8_t sgi_idx = 0, lgi_idx = 0; |
| 443 | int32_t sgi_rate, lgi_rate; |
| 444 | |
| 445 | /* for 2x2 divide the rate by 2 */ |
| 446 | if (nss == 1) |
| 447 | mbpsx10_rate = mbpsx10_rate >> 1; |
| 448 | |
| 449 | sgi_rate = mbpsx10_rate; |
| 450 | lgi_rate = mbpsx10_rate; |
| 451 | if (shortgi) |
| 452 | wma_bin_search_rate(vht20_400ns_rate_tbl, |
| 453 | WMA_MAX_VHT20_RATE_TBL_SIZE, &sgi_rate, |
| 454 | &sgi_idx); |
| 455 | else |
| 456 | wma_bin_search_rate(vht20_800ns_rate_tbl, |
| 457 | WMA_MAX_VHT20_RATE_TBL_SIZE, &lgi_rate, |
| 458 | &lgi_idx); |
| 459 | |
| 460 | wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate, |
| 461 | lgi_idx, lgi_rate, 3, rate, streaming_rate); |
| 462 | if (nss == 1) |
| 463 | *streaming_rate = *streaming_rate << 1; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 464 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 465 | } |
| 466 | |
| 467 | /** |
| 468 | * wma_fill_vht40_mcast_rate() - fill vht40 mcast rate |
| 469 | * @shortgi: short gaurd interval |
| 470 | * @mbpsx10_rate: mbps rates |
| 471 | * @nss: nss |
| 472 | * @rate: rate |
| 473 | * @streaming_rate: streaming rate |
| 474 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 475 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 476 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 477 | static QDF_STATUS wma_fill_vht40_mcast_rate(uint32_t shortgi, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 478 | int32_t mbpsx10_rate, uint8_t nss, |
| 479 | uint8_t *rate, |
| 480 | int32_t *streaming_rate) |
| 481 | { |
| 482 | uint8_t sgi_idx = 0, lgi_idx = 0; |
| 483 | int32_t sgi_rate, lgi_rate; |
| 484 | |
| 485 | /* for 2x2 divide the rate by 2 */ |
| 486 | if (nss == 1) |
| 487 | mbpsx10_rate = mbpsx10_rate >> 1; |
| 488 | |
| 489 | sgi_rate = mbpsx10_rate; |
| 490 | lgi_rate = mbpsx10_rate; |
| 491 | if (shortgi) |
| 492 | wma_bin_search_rate(vht40_400ns_rate_tbl, |
| 493 | WMA_MAX_VHT40_RATE_TBL_SIZE, &sgi_rate, |
| 494 | &sgi_idx); |
| 495 | else |
| 496 | wma_bin_search_rate(vht40_800ns_rate_tbl, |
| 497 | WMA_MAX_VHT40_RATE_TBL_SIZE, &lgi_rate, |
| 498 | &lgi_idx); |
| 499 | |
| 500 | wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, |
| 501 | sgi_idx, sgi_rate, lgi_idx, lgi_rate, |
| 502 | 3, rate, streaming_rate); |
| 503 | if (nss == 1) |
| 504 | *streaming_rate = *streaming_rate << 1; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 505 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 506 | } |
| 507 | |
| 508 | /** |
| 509 | * wma_fill_vht80_mcast_rate() - fill vht80 mcast rate |
| 510 | * @shortgi: short gaurd interval |
| 511 | * @mbpsx10_rate: mbps rates |
| 512 | * @nss: nss |
| 513 | * @rate: rate |
| 514 | * @streaming_rate: streaming rate |
| 515 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 516 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 517 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 518 | static QDF_STATUS wma_fill_vht80_mcast_rate(uint32_t shortgi, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 519 | int32_t mbpsx10_rate, uint8_t nss, |
| 520 | uint8_t *rate, |
| 521 | int32_t *streaming_rate) |
| 522 | { |
| 523 | uint8_t sgi_idx = 0, lgi_idx = 0; |
| 524 | int32_t sgi_rate, lgi_rate; |
| 525 | |
| 526 | /* for 2x2 divide the rate by 2 */ |
| 527 | if (nss == 1) |
| 528 | mbpsx10_rate = mbpsx10_rate >> 1; |
| 529 | |
| 530 | sgi_rate = mbpsx10_rate; |
| 531 | lgi_rate = mbpsx10_rate; |
| 532 | if (shortgi) |
| 533 | wma_bin_search_rate(vht80_400ns_rate_tbl, |
| 534 | WMA_MAX_VHT80_RATE_TBL_SIZE, &sgi_rate, |
| 535 | &sgi_idx); |
| 536 | else |
| 537 | wma_bin_search_rate(vht80_800ns_rate_tbl, |
| 538 | WMA_MAX_VHT80_RATE_TBL_SIZE, &lgi_rate, |
| 539 | &lgi_idx); |
| 540 | |
| 541 | wma_set_ht_vht_mcast_rate(shortgi, mbpsx10_rate, sgi_idx, sgi_rate, |
| 542 | lgi_idx, lgi_rate, 3, rate, streaming_rate); |
| 543 | if (nss == 1) |
| 544 | *streaming_rate = *streaming_rate << 1; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 545 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 546 | } |
| 547 | |
| 548 | /** |
| 549 | * wma_fill_ht_mcast_rate() - fill ht mcast rate |
| 550 | * @shortgi: short gaurd interval |
| 551 | * @chwidth: channel width |
| 552 | * @chanmode: channel mode |
| 553 | * @mhz: frequency |
| 554 | * @mbpsx10_rate: mbps rates |
| 555 | * @nss: nss |
| 556 | * @rate: rate |
| 557 | * @streaming_rate: streaming rate |
| 558 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 559 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 560 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 561 | static QDF_STATUS wma_fill_ht_mcast_rate(uint32_t shortgi, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 562 | uint32_t chwidth, int32_t mbpsx10_rate, |
| 563 | uint8_t nss, WLAN_PHY_MODE chanmode, |
| 564 | uint8_t *rate, |
| 565 | int32_t *streaming_rate) |
| 566 | { |
| 567 | int32_t ret = 0; |
| 568 | |
| 569 | *streaming_rate = 0; |
| 570 | if (chwidth == 0) |
| 571 | ret = wma_fill_ht20_mcast_rate(shortgi, mbpsx10_rate, |
| 572 | nss, rate, streaming_rate); |
| 573 | else if (chwidth == 1) |
| 574 | ret = wma_fill_ht40_mcast_rate(shortgi, mbpsx10_rate, |
| 575 | nss, rate, streaming_rate); |
| 576 | else |
| 577 | WMA_LOGE("%s: Error, Invalid chwidth enum %d", __func__, |
| 578 | chwidth); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 579 | return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 580 | } |
| 581 | |
| 582 | /** |
| 583 | * wma_fill_vht_mcast_rate() - fill vht mcast rate |
| 584 | * @shortgi: short gaurd interval |
| 585 | * @chwidth: channel width |
| 586 | * @chanmode: channel mode |
| 587 | * @mhz: frequency |
| 588 | * @mbpsx10_rate: mbps rates |
| 589 | * @nss: nss |
| 590 | * @rate: rate |
| 591 | * @streaming_rate: streaming rate |
| 592 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 593 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 594 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 595 | static QDF_STATUS wma_fill_vht_mcast_rate(uint32_t shortgi, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 596 | uint32_t chwidth, |
| 597 | int32_t mbpsx10_rate, uint8_t nss, |
| 598 | WLAN_PHY_MODE chanmode, |
| 599 | uint8_t *rate, |
| 600 | int32_t *streaming_rate) |
| 601 | { |
| 602 | int32_t ret = 0; |
| 603 | |
| 604 | *streaming_rate = 0; |
| 605 | if (chwidth == 0) |
| 606 | ret = wma_fill_vht20_mcast_rate(shortgi, mbpsx10_rate, nss, |
| 607 | rate, streaming_rate); |
| 608 | else if (chwidth == 1) |
| 609 | ret = wma_fill_vht40_mcast_rate(shortgi, mbpsx10_rate, nss, |
| 610 | rate, streaming_rate); |
| 611 | else if (chwidth == 2) |
| 612 | ret = wma_fill_vht80_mcast_rate(shortgi, mbpsx10_rate, nss, |
| 613 | rate, streaming_rate); |
| 614 | else |
| 615 | WMA_LOGE("%s: chwidth enum %d not supported", |
| 616 | __func__, chwidth); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 617 | return (*streaming_rate != 0) ? QDF_STATUS_SUCCESS : QDF_STATUS_E_INVAL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 618 | } |
| 619 | |
| 620 | #define WMA_MCAST_1X1_CUT_OFF_RATE 2000 |
| 621 | /** |
| 622 | * wma_encode_mc_rate() - fill mc rates |
| 623 | * @shortgi: short gaurd interval |
| 624 | * @chwidth: channel width |
| 625 | * @chanmode: channel mode |
| 626 | * @mhz: frequency |
| 627 | * @mbpsx10_rate: mbps rates |
| 628 | * @nss: nss |
| 629 | * @rate: rate |
| 630 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 631 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 632 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 633 | static QDF_STATUS wma_encode_mc_rate(uint32_t shortgi, uint32_t chwidth, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 634 | WLAN_PHY_MODE chanmode, A_UINT32 mhz, |
| 635 | int32_t mbpsx10_rate, uint8_t nss, |
| 636 | uint8_t *rate) |
| 637 | { |
| 638 | int32_t ret = 0; |
| 639 | |
| 640 | /* nss input value: 0 - 1x1; 1 - 2x2; 2 - 3x3 |
| 641 | * the phymode selection is based on following assumption: |
| 642 | * (1) if the app specifically requested 1x1 or 2x2 we hornor it |
| 643 | * (2) if mbpsx10_rate <= 540: always use BG |
| 644 | * (3) 540 < mbpsx10_rate <= 2000: use 1x1 HT/VHT |
| 645 | * (4) 2000 < mbpsx10_rate: use 2x2 HT/VHT |
| 646 | */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 647 | WMA_LOGE("%s: Input: nss = %d, chanmode = %d, mbpsx10 = 0x%x, chwidth = %d, shortgi = %d", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 648 | __func__, nss, chanmode, mbpsx10_rate, chwidth, shortgi); |
| 649 | if ((mbpsx10_rate & 0x40000000) && nss > 0) { |
| 650 | /* bit 30 indicates user inputed nss, |
| 651 | * bit 28 and 29 used to encode nss |
| 652 | */ |
| 653 | uint8_t user_nss = (mbpsx10_rate & 0x30000000) >> 28; |
| 654 | |
| 655 | nss = (user_nss < nss) ? user_nss : nss; |
| 656 | /* zero out bits 19 - 21 to recover the actual rate */ |
| 657 | mbpsx10_rate &= ~0x70000000; |
| 658 | } else if (mbpsx10_rate <= WMA_MCAST_1X1_CUT_OFF_RATE) { |
| 659 | /* if the input rate is less or equal to the |
| 660 | * 1x1 cutoff rate we use 1x1 only |
| 661 | */ |
| 662 | nss = 0; |
| 663 | } |
| 664 | /* encode NSS bits (bit 4, bit 5) */ |
| 665 | *rate = (nss & 0x3) << 4; |
| 666 | /* if mcast input rate exceeds the ofdm/cck max rate 54mpbs |
| 667 | * we try to choose best ht/vht mcs rate |
| 668 | */ |
| 669 | if (540 < mbpsx10_rate) { |
| 670 | /* cannot use ofdm/cck, choose closest ht/vht mcs rate */ |
| 671 | uint8_t rate_ht = *rate; |
| 672 | uint8_t rate_vht = *rate; |
| 673 | int32_t stream_rate_ht = 0; |
| 674 | int32_t stream_rate_vht = 0; |
| 675 | int32_t stream_rate = 0; |
| 676 | |
| 677 | ret = wma_fill_ht_mcast_rate(shortgi, chwidth, mbpsx10_rate, |
| 678 | nss, chanmode, &rate_ht, |
| 679 | &stream_rate_ht); |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 680 | if (ret != QDF_STATUS_SUCCESS) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 681 | stream_rate_ht = 0; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 682 | if (mhz < WMA_2_4_GHZ_MAX_FREQ) { |
| 683 | /* not in 5 GHZ frequency */ |
| 684 | *rate = rate_ht; |
| 685 | stream_rate = stream_rate_ht; |
| 686 | goto ht_vht_done; |
| 687 | } |
| 688 | /* capable doing 11AC mcast so that search vht tables */ |
| 689 | ret = wma_fill_vht_mcast_rate(shortgi, chwidth, mbpsx10_rate, |
| 690 | nss, chanmode, &rate_vht, |
| 691 | &stream_rate_vht); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 692 | if (ret != QDF_STATUS_SUCCESS) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 693 | if (stream_rate_ht != 0) |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 694 | ret = QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 695 | *rate = rate_ht; |
| 696 | stream_rate = stream_rate_ht; |
| 697 | goto ht_vht_done; |
| 698 | } |
| 699 | if (stream_rate_ht == 0) { |
| 700 | /* only vht rate available */ |
| 701 | *rate = rate_vht; |
| 702 | stream_rate = stream_rate_vht; |
| 703 | } else { |
| 704 | /* set ht as default first */ |
| 705 | *rate = rate_ht; |
| 706 | stream_rate = stream_rate_ht; |
| 707 | if (stream_rate < mbpsx10_rate) { |
| 708 | if (mbpsx10_rate <= stream_rate_vht || |
| 709 | stream_rate < stream_rate_vht) { |
| 710 | *rate = rate_vht; |
| 711 | stream_rate = stream_rate_vht; |
| 712 | } |
| 713 | } else { |
| 714 | if (stream_rate_vht >= mbpsx10_rate && |
| 715 | stream_rate_vht < stream_rate) { |
| 716 | *rate = rate_vht; |
| 717 | stream_rate = stream_rate_vht; |
| 718 | } |
| 719 | } |
| 720 | } |
| 721 | ht_vht_done: |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 722 | WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, freq = %d", |
| 723 | __func__, nss, chanmode, mhz); |
| 724 | WMA_LOGD(" %s: input_rate = %d, chwidth = %d rate = 0x%x, streaming_rate = %d", |
| 725 | __func__, mbpsx10_rate, chwidth, *rate, stream_rate); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 726 | } else { |
| 727 | if (mbpsx10_rate > 0) |
| 728 | ret = wma_fill_ofdm_cck_mcast_rate(mbpsx10_rate, |
| 729 | nss, rate); |
| 730 | else |
| 731 | *rate = 0xFF; |
| 732 | |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 733 | WMA_LOGE("%s: NSS = %d, ucast_chanmode = %d, input_rate = %d, rate = 0x%x", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 734 | __func__, nss, chanmode, mbpsx10_rate, *rate); |
| 735 | } |
| 736 | return ret; |
| 737 | } |
| 738 | |
| 739 | /** |
| 740 | * wma_set_bss_rate_flags() - set rate flags based on BSS capability |
| 741 | * @iface: txrx_node ctx |
| 742 | * @add_bss: add_bss params |
| 743 | * |
| 744 | * Return: none |
| 745 | */ |
| 746 | void wma_set_bss_rate_flags(struct wma_txrx_node *iface, |
| 747 | tpAddBssParams add_bss) |
| 748 | { |
| 749 | iface->rate_flags = 0; |
| 750 | |
| 751 | if (add_bss->vhtCapable) { |
| 752 | if (add_bss->ch_width == CH_WIDTH_80P80MHZ) |
| 753 | iface->rate_flags |= eHAL_TX_RATE_VHT80; |
| 754 | if (add_bss->ch_width == CH_WIDTH_160MHZ) |
| 755 | iface->rate_flags |= eHAL_TX_RATE_VHT80; |
| 756 | if (add_bss->ch_width == CH_WIDTH_80MHZ) |
| 757 | iface->rate_flags |= eHAL_TX_RATE_VHT80; |
| 758 | else if (add_bss->ch_width) |
| 759 | iface->rate_flags |= eHAL_TX_RATE_VHT40; |
| 760 | else |
| 761 | iface->rate_flags |= eHAL_TX_RATE_VHT20; |
| 762 | } |
| 763 | /* avoid to conflict with htCapable flag */ |
| 764 | else if (add_bss->htCapable) { |
| 765 | if (add_bss->ch_width) |
| 766 | iface->rate_flags |= eHAL_TX_RATE_HT40; |
| 767 | else |
| 768 | iface->rate_flags |= eHAL_TX_RATE_HT20; |
| 769 | } |
| 770 | |
| 771 | if (add_bss->staContext.fShortGI20Mhz || |
| 772 | add_bss->staContext.fShortGI40Mhz) |
| 773 | iface->rate_flags |= eHAL_TX_RATE_SGI; |
| 774 | |
| 775 | if (!add_bss->htCapable && !add_bss->vhtCapable) |
| 776 | iface->rate_flags = eHAL_TX_RATE_LEGACY; |
| 777 | } |
| 778 | |
| 779 | /** |
| 780 | * wmi_unified_send_txbf() - set txbf parameter to fw |
| 781 | * @wma: wma handle |
| 782 | * @params: txbf parameters |
| 783 | * |
| 784 | * Return: 0 for success or error code |
| 785 | */ |
| 786 | int32_t wmi_unified_send_txbf(tp_wma_handle wma, tpAddStaParams params) |
| 787 | { |
Yingying Tang | adfc2ac | 2016-09-29 16:41:26 +0800 | [diff] [blame] | 788 | wmi_vdev_txbf_en txbf_en = {0}; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 789 | |
| 790 | /* This is set when Other partner is Bformer |
| 791 | * and we are capable bformee(enabled both in ini and fw) |
| 792 | */ |
| 793 | txbf_en.sutxbfee = params->vhtTxBFCapable; |
| 794 | txbf_en.mutxbfee = params->vhtTxMUBformeeCapable; |
| 795 | txbf_en.sutxbfer = params->enable_su_tx_bformer; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 796 | |
| 797 | /* When MU TxBfee is set, SU TxBfee must be set by default */ |
| 798 | if (txbf_en.mutxbfee) |
| 799 | txbf_en.sutxbfee = txbf_en.mutxbfee; |
| 800 | |
| 801 | WMA_LOGD("txbf_en.sutxbfee %d txbf_en.mutxbfee %d, sutxbfer %d", |
| 802 | txbf_en.sutxbfee, txbf_en.mutxbfee, txbf_en.sutxbfer); |
| 803 | |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 804 | return wma_vdev_set_param(wma->wmi_handle, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 805 | params->smesessionId, |
| 806 | WMI_VDEV_PARAM_TXBF, |
| 807 | *((A_UINT8 *) &txbf_en)); |
| 808 | } |
| 809 | |
| 810 | /** |
| 811 | * wma_data_tx_ack_work_handler() - process data tx ack |
| 812 | * @ack_work: work structure |
| 813 | * |
| 814 | * Return: none |
| 815 | */ |
Krishna Kumaar Natarajan | 9f42170 | 2015-11-10 14:56:16 -0800 | [diff] [blame] | 816 | static void wma_data_tx_ack_work_handler(void *ack_work) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 817 | { |
| 818 | struct wma_tx_ack_work_ctx *work; |
| 819 | tp_wma_handle wma_handle; |
Himanshu Agarwal | 2fdf77a | 2016-12-29 11:41:00 +0530 | [diff] [blame] | 820 | wma_tx_ota_comp_callback ack_cb; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 821 | |
Rajeev Kumar | fec3dbe | 2016-01-19 15:23:52 -0800 | [diff] [blame] | 822 | if (cds_is_load_or_unload_in_progress()) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 823 | WMA_LOGE("%s: Driver load/unload in progress", __func__); |
| 824 | return; |
| 825 | } |
| 826 | |
Krishna Kumaar Natarajan | 9f42170 | 2015-11-10 14:56:16 -0800 | [diff] [blame] | 827 | work = (struct wma_tx_ack_work_ctx *)ack_work; |
| 828 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 829 | wma_handle = work->wma_handle; |
| 830 | ack_cb = wma_handle->umac_data_ota_ack_cb; |
| 831 | |
| 832 | if (work->status) |
| 833 | WMA_LOGE("Data Tx Ack Cb Status %d", work->status); |
| 834 | else |
| 835 | WMA_LOGD("Data Tx Ack Cb Status %d", work->status); |
| 836 | |
| 837 | /* Call the Ack Cb registered by UMAC */ |
| 838 | if (ack_cb) |
Himanshu Agarwal | 2fdf77a | 2016-12-29 11:41:00 +0530 | [diff] [blame] | 839 | ack_cb((tpAniSirGlobal) (wma_handle->mac_context), NULL, |
| 840 | work->status ? 0 : 1, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 841 | else |
| 842 | WMA_LOGE("Data Tx Ack Cb is NULL"); |
| 843 | |
| 844 | wma_handle->umac_data_ota_ack_cb = NULL; |
| 845 | wma_handle->last_umac_data_nbuf = NULL; |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 846 | qdf_mem_free(work); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 847 | wma_handle->ack_work_ctx = NULL; |
| 848 | } |
| 849 | |
| 850 | /** |
| 851 | * wma_data_tx_ack_comp_hdlr() - handles tx data ack completion |
| 852 | * @context: context with which the handler is registered |
| 853 | * @netbuf: tx data nbuf |
| 854 | * @err: status of tx completion |
| 855 | * |
| 856 | * This is the cb registered with TxRx for |
| 857 | * Ack Complete |
| 858 | * |
| 859 | * Return: none |
| 860 | */ |
| 861 | void |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 862 | wma_data_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 863 | { |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 864 | void *pdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 865 | tp_wma_handle wma_handle = (tp_wma_handle) wma_context; |
| 866 | |
| 867 | if (NULL == wma_handle) { |
| 868 | WMA_LOGE("%s: Invalid WMA Handle", __func__); |
| 869 | return; |
| 870 | } |
| 871 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 872 | pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 873 | |
| 874 | if (NULL == pdev) { |
| 875 | WMA_LOGE("%s: Failed to get pdev", __func__); |
| 876 | return; |
| 877 | } |
| 878 | |
| 879 | /* |
| 880 | * if netBuf does not match with pending nbuf then just free the |
| 881 | * netbuf and do not call ack cb |
| 882 | */ |
| 883 | if (wma_handle->last_umac_data_nbuf != netbuf) { |
| 884 | if (wma_handle->umac_data_ota_ack_cb) { |
| 885 | WMA_LOGE("%s: nbuf does not match but umac_data_ota_ack_cb is not null", |
| 886 | __func__); |
| 887 | } else { |
| 888 | WMA_LOGE("%s: nbuf does not match and umac_data_ota_ack_cb is also null", |
| 889 | __func__); |
| 890 | } |
| 891 | goto free_nbuf; |
| 892 | } |
| 893 | |
| 894 | if (wma_handle && wma_handle->umac_data_ota_ack_cb) { |
| 895 | struct wma_tx_ack_work_ctx *ack_work; |
| 896 | |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 897 | ack_work = qdf_mem_malloc(sizeof(struct wma_tx_ack_work_ctx)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 898 | wma_handle->ack_work_ctx = ack_work; |
| 899 | if (ack_work) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 900 | ack_work->wma_handle = wma_handle; |
| 901 | ack_work->sub_type = 0; |
| 902 | ack_work->status = status; |
| 903 | |
Anurag Chouhan | 42958bb | 2016-02-19 15:43:11 +0530 | [diff] [blame] | 904 | qdf_create_work(0, &ack_work->ack_cmp_work, |
Krishna Kumaar Natarajan | 9f42170 | 2015-11-10 14:56:16 -0800 | [diff] [blame] | 905 | wma_data_tx_ack_work_handler, |
| 906 | ack_work); |
Anurag Chouhan | 42958bb | 2016-02-19 15:43:11 +0530 | [diff] [blame] | 907 | qdf_sched_work(0, &ack_work->ack_cmp_work); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 908 | } |
| 909 | } |
| 910 | |
| 911 | free_nbuf: |
| 912 | /* unmap and freeing the tx buf as txrx is not taking care */ |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 913 | qdf_nbuf_unmap_single(wma_handle->qdf_dev, netbuf, QDF_DMA_TO_DEVICE); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 914 | qdf_nbuf_free(netbuf); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 915 | } |
| 916 | |
| 917 | /** |
Jiachao Wu | 08719b0 | 2017-07-05 13:05:34 +0800 | [diff] [blame] | 918 | * wma_check_txrx_chainmask() - check txrx chainmask |
| 919 | * @num_rf_chains: number of rf chains |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 920 | * @cmd_value: command value |
| 921 | * |
Jiachao Wu | 08719b0 | 2017-07-05 13:05:34 +0800 | [diff] [blame] | 922 | * Return: QDF_STATUS_SUCCESS for success or error code |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 923 | */ |
Jiachao Wu | 08719b0 | 2017-07-05 13:05:34 +0800 | [diff] [blame] | 924 | QDF_STATUS wma_check_txrx_chainmask(int num_rf_chains, int cmd_value) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 925 | { |
Jiachao Wu | 08719b0 | 2017-07-05 13:05:34 +0800 | [diff] [blame] | 926 | if ((cmd_value > WMA_MAX_RF_CHAINS(num_rf_chains)) || |
| 927 | (cmd_value < WMA_MIN_RF_CHAINS)) { |
| 928 | WMA_LOGE("%s: Requested value %d over the range", |
| 929 | __func__, cmd_value); |
| 930 | return QDF_STATUS_E_INVAL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 931 | } |
Jiachao Wu | 08719b0 | 2017-07-05 13:05:34 +0800 | [diff] [blame] | 932 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 933 | } |
| 934 | |
| 935 | /** |
| 936 | * wma_peer_state_change_event_handler() - peer state change event handler |
| 937 | * @handle: wma handle |
| 938 | * @event_buff: event buffer |
| 939 | * @len: length of buffer |
| 940 | * |
| 941 | * This event handler unpauses vdev if peer state change to AUTHORIZED STATE |
| 942 | * |
| 943 | * Return: 0 for success or error code |
| 944 | */ |
| 945 | int wma_peer_state_change_event_handler(void *handle, |
| 946 | uint8_t *event_buff, |
| 947 | uint32_t len) |
| 948 | { |
| 949 | WMI_PEER_STATE_EVENTID_param_tlvs *param_buf; |
| 950 | wmi_peer_state_event_fixed_param *event; |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 951 | struct cdp_vdev *vdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 952 | tp_wma_handle wma_handle = (tp_wma_handle) handle; |
| 953 | |
| 954 | if (!event_buff) { |
| 955 | WMA_LOGE("%s: Received NULL event ptr from FW", __func__); |
| 956 | return -EINVAL; |
| 957 | } |
| 958 | param_buf = (WMI_PEER_STATE_EVENTID_param_tlvs *) event_buff; |
| 959 | if (!param_buf) { |
| 960 | WMA_LOGE("%s: Received NULL buf ptr from FW", __func__); |
| 961 | return -ENOMEM; |
| 962 | } |
| 963 | |
| 964 | event = param_buf->fixed_param; |
| 965 | vdev = wma_find_vdev_by_id(wma_handle, event->vdev_id); |
| 966 | if (NULL == vdev) { |
Srinivas Girigowda | fc8b4ff | 2017-07-13 22:33:14 -0700 | [diff] [blame] | 967 | WMA_LOGD("%s: Couldn't find vdev for vdev_id: %d", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 968 | __func__, event->vdev_id); |
| 969 | return -EINVAL; |
| 970 | } |
| 971 | |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 972 | if ((cdp_get_opmode(cds_get_context(QDF_MODULE_ID_SOC), |
| 973 | vdev) == |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 974 | wlan_op_mode_sta) && |
| 975 | event->state == WMI_PEER_STATE_AUTHORIZED) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 976 | /* |
| 977 | * set event so that hdd |
| 978 | * can procced and unpause tx queue |
| 979 | */ |
| 980 | #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL |
| 981 | if (!wma_handle->peer_authorized_cb) { |
| 982 | WMA_LOGE("%s: peer authorized cb not registered", |
| 983 | __func__); |
| 984 | return -EINVAL; |
| 985 | } |
Manjunathappa Prakash | 10d357a | 2016-03-31 19:20:49 -0700 | [diff] [blame] | 986 | wma_handle->peer_authorized_cb(event->vdev_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 987 | #endif |
| 988 | } |
| 989 | |
| 990 | return 0; |
| 991 | } |
| 992 | |
| 993 | /** |
| 994 | * wma_set_enable_disable_mcc_adaptive_scheduler() -enable/disable mcc scheduler |
| 995 | * @mcc_adaptive_scheduler: enable/disable |
| 996 | * |
| 997 | * This function enable/disable mcc adaptive scheduler in fw. |
| 998 | * |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 999 | * Return: QDF_STATUS_SUCCESS for sucess or error code |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1000 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1001 | QDF_STATUS wma_set_enable_disable_mcc_adaptive_scheduler(uint32_t |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1002 | mcc_adaptive_scheduler) |
| 1003 | { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1004 | tp_wma_handle wma = NULL; |
Govind Singh | efc5ccd | 2016-04-25 11:11:55 +0530 | [diff] [blame] | 1005 | uint32_t pdev_id; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1006 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1007 | wma = cds_get_context(QDF_MODULE_ID_WMA); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1008 | if (NULL == wma) { |
| 1009 | WMA_LOGE("%s : Failed to get wma", __func__); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1010 | return QDF_STATUS_E_FAULT; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1011 | } |
| 1012 | |
Manishekar Chandrasekaran | d3ee975 | 2016-08-09 18:52:50 +0530 | [diff] [blame] | 1013 | /* |
| 1014 | * Since there could be up to two instances of OCS in FW (one per MAC), |
| 1015 | * FW provides the option of enabling and disabling MAS on a per MAC |
| 1016 | * basis. But, Host does not have enable/disable option for individual |
| 1017 | * MACs. So, FW agreed for the Host to send down a 'pdev id' of 0. |
| 1018 | * When 'pdev id' of 0 is used, FW treats this as a SOC level command |
| 1019 | * and applies the same value to both MACs. Irrespective of the value |
| 1020 | * of 'WMI_SERVICE_DEPRECATED_REPLACE', the pdev id needs to be '0' |
| 1021 | * (SOC level) for WMI_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMDID |
Govind Singh | efc5ccd | 2016-04-25 11:11:55 +0530 | [diff] [blame] | 1022 | */ |
Manishekar Chandrasekaran | d3ee975 | 2016-08-09 18:52:50 +0530 | [diff] [blame] | 1023 | pdev_id = WMI_PDEV_ID_SOC; |
Govind Singh | efc5ccd | 2016-04-25 11:11:55 +0530 | [diff] [blame] | 1024 | |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 1025 | return wmi_unified_set_enable_disable_mcc_adaptive_scheduler_cmd( |
Govind Singh | efc5ccd | 2016-04-25 11:11:55 +0530 | [diff] [blame] | 1026 | wma->wmi_handle, mcc_adaptive_scheduler, pdev_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1027 | } |
| 1028 | |
| 1029 | /** |
| 1030 | * wma_set_mcc_channel_time_latency() -set MCC channel time latency |
| 1031 | * @wma: wma handle |
| 1032 | * @mcc_channel: mcc channel |
| 1033 | * @mcc_channel_time_latency: MCC channel time latency. |
| 1034 | * |
| 1035 | * Currently used to set time latency for an MCC vdev/adapter using operating |
| 1036 | * channel of it and channel number. The info is provided run time using |
| 1037 | * iwpriv command: iwpriv <wlan0 | p2p0> setMccLatency <latency in ms>. |
| 1038 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1039 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1040 | */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1041 | QDF_STATUS wma_set_mcc_channel_time_latency(tp_wma_handle wma, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1042 | uint32_t mcc_channel, uint32_t mcc_channel_time_latency) |
| 1043 | { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1044 | uint32_t cfg_val = 0; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1045 | struct sAniSirGlobal *pMac = NULL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1046 | uint32_t channel1 = mcc_channel; |
| 1047 | uint32_t chan1_freq = cds_chan_to_freq(channel1); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1048 | |
| 1049 | if (!wma) { |
| 1050 | WMA_LOGE("%s:NULL wma ptr. Exiting", __func__); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 1051 | QDF_ASSERT(0); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1052 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1053 | } |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1054 | pMac = cds_get_context(QDF_MODULE_ID_PE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1055 | if (!pMac) { |
| 1056 | WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 1057 | QDF_ASSERT(0); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1058 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1059 | } |
| 1060 | |
| 1061 | /* First step is to confirm if MCC is active */ |
| 1062 | if (!lim_is_in_mcc(pMac)) { |
| 1063 | WMA_LOGE("%s: MCC is not active. Exiting", __func__); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 1064 | QDF_ASSERT(0); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1065 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1066 | } |
| 1067 | /* Confirm MCC adaptive scheduler feature is disabled */ |
| 1068 | if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 1069 | &cfg_val) == eSIR_SUCCESS) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1070 | if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) { |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1071 | WMA_LOGD("%s: Can't set channel latency while MCC ADAPTIVE SCHED is enabled. Exit", |
| 1072 | __func__); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1073 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1074 | } |
| 1075 | } else { |
| 1076 | WMA_LOGE("%s: Failed to get value for MCC_ADAPTIVE_SCHED, " |
| 1077 | "Exit w/o setting latency", __func__); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 1078 | QDF_ASSERT(0); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1079 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1080 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1081 | |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 1082 | return wmi_unified_set_mcc_channel_time_latency_cmd(wma->wmi_handle, |
| 1083 | chan1_freq, |
| 1084 | mcc_channel_time_latency); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1085 | } |
| 1086 | |
| 1087 | /** |
| 1088 | * wma_set_mcc_channel_time_quota() -set MCC channel time quota |
| 1089 | * @wma: wma handle |
| 1090 | * @adapter_1_chan_number: adapter 1 channel number |
| 1091 | * @adapter_1_quota: adapter 1 quota |
| 1092 | * @adapter_2_chan_number: adapter 2 channel number |
| 1093 | * |
| 1094 | * Currently used to set time quota for 2 MCC vdevs/adapters using (operating |
| 1095 | * channel, quota) for each mode . The info is provided run time using |
| 1096 | * iwpriv command: iwpriv <wlan0 | p2p0> setMccQuota <quota in ms>. |
| 1097 | * Note: the quota provided in command is for the same mode in cmd. HDD |
| 1098 | * checks if MCC mode is active, gets the second mode and its operating chan. |
| 1099 | * Quota for the 2nd role is calculated as 100 - quota of first mode. |
| 1100 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1101 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1102 | */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1103 | QDF_STATUS wma_set_mcc_channel_time_quota(tp_wma_handle wma, |
| 1104 | uint32_t adapter_1_chan_number, uint32_t adapter_1_quota, |
| 1105 | uint32_t adapter_2_chan_number) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1106 | { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1107 | uint32_t cfg_val = 0; |
| 1108 | struct sAniSirGlobal *pMac = NULL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1109 | uint32_t chan1_freq = cds_chan_to_freq(adapter_1_chan_number); |
| 1110 | uint32_t chan2_freq = cds_chan_to_freq(adapter_2_chan_number); |
| 1111 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1112 | if (!wma) { |
| 1113 | WMA_LOGE("%s:NULL wma ptr. Exiting", __func__); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 1114 | QDF_ASSERT(0); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1115 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1116 | } |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1117 | pMac = cds_get_context(QDF_MODULE_ID_PE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1118 | if (!pMac) { |
| 1119 | WMA_LOGE("%s:NULL pMac ptr. Exiting", __func__); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 1120 | QDF_ASSERT(0); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1121 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1122 | } |
| 1123 | |
| 1124 | /* First step is to confirm if MCC is active */ |
| 1125 | if (!lim_is_in_mcc(pMac)) { |
| 1126 | WMA_LOGD("%s: MCC is not active. Exiting", __func__); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 1127 | QDF_ASSERT(0); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1128 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1129 | } |
| 1130 | |
| 1131 | /* Confirm MCC adaptive scheduler feature is disabled */ |
| 1132 | if (wlan_cfg_get_int(pMac, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 1133 | &cfg_val) == eSIR_SUCCESS) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1134 | if (cfg_val == WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED_STAMAX) { |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1135 | WMA_LOGD("%s: Can't set channel quota while MCC_ADAPTIVE_SCHED is enabled. Exit", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1136 | __func__); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1137 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1138 | } |
| 1139 | } else { |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1140 | WMA_LOGE("%s: Failed to retrieve WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED. Exit", |
| 1141 | __func__); |
Anurag Chouhan | b2dc16f | 2016-02-25 11:47:37 +0530 | [diff] [blame] | 1142 | QDF_ASSERT(0); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1143 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1144 | } |
| 1145 | |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 1146 | return wmi_unified_set_mcc_channel_time_quota_cmd(wma->wmi_handle, |
| 1147 | chan1_freq, |
| 1148 | adapter_1_quota, |
| 1149 | chan2_freq); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1150 | } |
| 1151 | |
| 1152 | /** |
| 1153 | * wma_set_linkstate() - set wma linkstate |
| 1154 | * @wma: wma handle |
| 1155 | * @params: link state params |
| 1156 | * |
| 1157 | * Return: none |
| 1158 | */ |
| 1159 | void wma_set_linkstate(tp_wma_handle wma, tpLinkStateParams params) |
| 1160 | { |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1161 | struct cdp_pdev *pdev; |
| 1162 | struct cdp_vdev *vdev; |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 1163 | void *soc = cds_get_context(QDF_MODULE_ID_SOC); |
Sandeep Puligilla | 7da8633 | 2016-11-10 16:14:40 -0800 | [diff] [blame] | 1164 | uint8_t vdev_id; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1165 | bool roam_synch_in_progress = false; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1166 | QDF_STATUS status; |
Sandeep Puligilla | 7da8633 | 2016-11-10 16:14:40 -0800 | [diff] [blame] | 1167 | struct wma_target_req *msg; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1168 | |
| 1169 | params->status = true; |
| 1170 | WMA_LOGD("%s: state %d selfmac %pM", __func__, |
| 1171 | params->state, params->selfMacAddr); |
Kiran Kumar Lokere | 92b1fca | 2016-05-23 15:28:15 -0700 | [diff] [blame] | 1172 | if ((params->state != eSIR_LINK_PREASSOC_STATE) && |
| 1173 | (params->state != eSIR_LINK_DOWN_STATE)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1174 | WMA_LOGD("%s: unsupported link state %d", |
| 1175 | __func__, params->state); |
Deepak Dhamdhere | be72e80 | 2017-02-12 12:50:42 -0800 | [diff] [blame] | 1176 | params->status = false; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1177 | goto out; |
| 1178 | } |
| 1179 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1180 | pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1181 | |
| 1182 | if (NULL == pdev) { |
| 1183 | WMA_LOGE("%s: Unable to get TXRX context", __func__); |
Deepak Dhamdhere | be72e80 | 2017-02-12 12:50:42 -0800 | [diff] [blame] | 1184 | params->status = false; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1185 | goto out; |
| 1186 | } |
| 1187 | |
| 1188 | vdev = wma_find_vdev_by_addr(wma, params->selfMacAddr, &vdev_id); |
| 1189 | if (!vdev) { |
| 1190 | WMA_LOGP("%s: vdev not found for addr: %pM", |
| 1191 | __func__, params->selfMacAddr); |
Deepak Dhamdhere | be72e80 | 2017-02-12 12:50:42 -0800 | [diff] [blame] | 1192 | params->status = false; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1193 | goto out; |
| 1194 | } |
| 1195 | |
| 1196 | if (wma_is_vdev_in_ap_mode(wma, vdev_id)) { |
| 1197 | WMA_LOGD("%s: Ignoring set link req in ap mode", __func__); |
Deepak Dhamdhere | be72e80 | 2017-02-12 12:50:42 -0800 | [diff] [blame] | 1198 | params->status = false; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1199 | goto out; |
| 1200 | } |
| 1201 | |
| 1202 | if (params->state == eSIR_LINK_PREASSOC_STATE) { |
Varun Reddy Yeturu | d5939f8 | 2015-12-24 18:14:02 -0800 | [diff] [blame] | 1203 | if (wma_is_roam_synch_in_progress(wma, vdev_id)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1204 | roam_synch_in_progress = true; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1205 | status = wma_create_peer(wma, pdev, vdev, params->bssid, |
| 1206 | WMI_PEER_TYPE_DEFAULT, vdev_id, |
| 1207 | roam_synch_in_progress); |
Deepak Dhamdhere | be72e80 | 2017-02-12 12:50:42 -0800 | [diff] [blame] | 1208 | if (status != QDF_STATUS_SUCCESS) { |
Varun Reddy Yeturu | d5939f8 | 2015-12-24 18:14:02 -0800 | [diff] [blame] | 1209 | WMA_LOGE("%s: Unable to create peer", __func__); |
Deepak Dhamdhere | be72e80 | 2017-02-12 12:50:42 -0800 | [diff] [blame] | 1210 | params->status = false; |
| 1211 | } |
Varun Reddy Yeturu | d5939f8 | 2015-12-24 18:14:02 -0800 | [diff] [blame] | 1212 | if (roam_synch_in_progress) |
| 1213 | return; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1214 | } else { |
| 1215 | WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP", |
| 1216 | __func__, vdev_id); |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1217 | cdp_fc_vdev_pause(soc, |
| 1218 | wma->interfaces[vdev_id].handle, |
| 1219 | OL_TXQ_PAUSE_REASON_VDEV_STOP); |
Sandeep Puligilla | 7da8633 | 2016-11-10 16:14:40 -0800 | [diff] [blame] | 1220 | msg = wma_fill_vdev_req(wma, vdev_id, |
| 1221 | WMA_SET_LINK_STATE, |
| 1222 | WMA_TARGET_REQ_TYPE_VDEV_STOP, params, |
| 1223 | WMA_VDEV_STOP_REQUEST_TIMEOUT); |
| 1224 | if (!msg) { |
| 1225 | WMA_LOGP(FL("Failed to fill vdev request for vdev_id %d"), |
| 1226 | vdev_id); |
Deepak Dhamdhere | be72e80 | 2017-02-12 12:50:42 -0800 | [diff] [blame] | 1227 | params->status = false; |
Sandeep Puligilla | 7da8633 | 2016-11-10 16:14:40 -0800 | [diff] [blame] | 1228 | status = QDF_STATUS_E_NOMEM; |
| 1229 | } |
Mukul Sharma | 6411bb8 | 2017-03-01 15:57:07 +0530 | [diff] [blame] | 1230 | wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST); |
Dustin Brown | bf6d16b | 2017-03-03 11:41:05 -0800 | [diff] [blame] | 1231 | if (wma_send_vdev_stop_to_fw(wma, vdev_id)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1232 | WMA_LOGP("%s: %d Failed to send vdev stop", |
| 1233 | __func__, __LINE__); |
Deepak Dhamdhere | be72e80 | 2017-02-12 12:50:42 -0800 | [diff] [blame] | 1234 | params->status = false; |
Abhishek Singh | c15f649 | 2017-07-27 16:25:51 +0530 | [diff] [blame] | 1235 | wma_remove_vdev_req(wma, vdev_id, |
| 1236 | WMA_TARGET_REQ_TYPE_VDEV_STOP); |
Sandeep Puligilla | 7da8633 | 2016-11-10 16:14:40 -0800 | [diff] [blame] | 1237 | } else { |
| 1238 | WMA_LOGP("%s: %d vdev stop sent vdev %d", |
| 1239 | __func__, __LINE__, vdev_id); |
| 1240 | /* |
| 1241 | * Remove peer, Vdev down and sending set link |
| 1242 | * response will be handled in vdev stop response |
| 1243 | * handler |
| 1244 | */ |
| 1245 | return; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1246 | } |
| 1247 | } |
| 1248 | out: |
| 1249 | wma_send_msg(wma, WMA_SET_LINK_STATE_RSP, (void *)params, 0); |
| 1250 | } |
| 1251 | |
| 1252 | /** |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1253 | * wma_process_rate_update_indate() - rate update indication |
| 1254 | * @wma: wma handle |
| 1255 | * @pRateUpdateParams: Rate update params |
| 1256 | * |
| 1257 | * This function update rate & short GI interval to fw based on params |
| 1258 | * send by SME. |
| 1259 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1260 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1261 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1262 | QDF_STATUS wma_process_rate_update_indicate(tp_wma_handle wma, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1263 | tSirRateUpdateInd * |
| 1264 | pRateUpdateParams) |
| 1265 | { |
| 1266 | int32_t ret = 0; |
| 1267 | uint8_t vdev_id = 0; |
| 1268 | void *pdev; |
| 1269 | int32_t mbpsx10_rate = -1; |
| 1270 | uint32_t paramId; |
| 1271 | uint8_t rate = 0; |
| 1272 | uint32_t short_gi; |
| 1273 | struct wma_txrx_node *intr = wma->interfaces; |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 1274 | QDF_STATUS status; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1275 | |
| 1276 | /* Get the vdev id */ |
Srinivas Girigowda | afede18 | 2015-11-18 22:36:12 -0800 | [diff] [blame] | 1277 | pdev = wma_find_vdev_by_addr(wma, pRateUpdateParams->bssid.bytes, |
| 1278 | &vdev_id); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1279 | if (!pdev) { |
| 1280 | WMA_LOGE("vdev handle is invalid for %pM", |
Srinivas Girigowda | afede18 | 2015-11-18 22:36:12 -0800 | [diff] [blame] | 1281 | pRateUpdateParams->bssid.bytes); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1282 | qdf_mem_free(pRateUpdateParams); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1283 | return QDF_STATUS_E_INVAL; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1284 | } |
| 1285 | short_gi = intr[vdev_id].config.shortgi; |
| 1286 | if (short_gi == 0) |
| 1287 | short_gi = (intr[vdev_id].rate_flags & eHAL_TX_RATE_SGI) ? |
| 1288 | true : false; |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1289 | /* first check if reliable TX mcast rate is used. If not check the bcast |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1290 | * Then is mcast. Mcast rate is saved in mcastDataRate24GHz |
| 1291 | */ |
| 1292 | if (pRateUpdateParams->reliableMcastDataRateTxFlag > 0) { |
| 1293 | mbpsx10_rate = pRateUpdateParams->reliableMcastDataRate; |
| 1294 | paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE; |
| 1295 | if (pRateUpdateParams-> |
| 1296 | reliableMcastDataRateTxFlag & eHAL_TX_RATE_SGI) |
| 1297 | short_gi = 1; /* upper layer specified short GI */ |
| 1298 | } else if (pRateUpdateParams->bcastDataRate > -1) { |
| 1299 | mbpsx10_rate = pRateUpdateParams->bcastDataRate; |
| 1300 | paramId = WMI_VDEV_PARAM_BCAST_DATA_RATE; |
| 1301 | } else { |
| 1302 | mbpsx10_rate = pRateUpdateParams->mcastDataRate24GHz; |
| 1303 | paramId = WMI_VDEV_PARAM_MCAST_DATA_RATE; |
| 1304 | if (pRateUpdateParams-> |
| 1305 | mcastDataRate24GHzTxFlag & eHAL_TX_RATE_SGI) |
| 1306 | short_gi = 1; /* upper layer specified short GI */ |
| 1307 | } |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1308 | WMA_LOGE("%s: dev_id = %d, dev_type = %d, dev_mode = %d,", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1309 | __func__, vdev_id, intr[vdev_id].type, |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1310 | pRateUpdateParams->dev_mode); |
| 1311 | WMA_LOGE("%s: mac = %pM, config.shortgi = %d, rate_flags = 0x%x", |
| 1312 | __func__, pRateUpdateParams->bssid.bytes, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1313 | intr[vdev_id].config.shortgi, intr[vdev_id].rate_flags); |
| 1314 | ret = wma_encode_mc_rate(short_gi, intr[vdev_id].config.chwidth, |
| 1315 | intr[vdev_id].chanmode, intr[vdev_id].mhz, |
| 1316 | mbpsx10_rate, pRateUpdateParams->nss, &rate); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1317 | if (ret != QDF_STATUS_SUCCESS) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1318 | WMA_LOGE("%s: Error, Invalid input rate value", __func__); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1319 | qdf_mem_free(pRateUpdateParams); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1320 | return ret; |
| 1321 | } |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 1322 | status = wma_vdev_set_param(wma->wmi_handle, vdev_id, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1323 | WMI_VDEV_PARAM_SGI, short_gi); |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 1324 | if (QDF_IS_STATUS_ERROR(status)) { |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1325 | WMA_LOGE("%s: Fail to Set WMI_VDEV_PARAM_SGI(%d), status = %d", |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 1326 | __func__, short_gi, status); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1327 | qdf_mem_free(pRateUpdateParams); |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 1328 | return status; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1329 | } |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 1330 | status = wma_vdev_set_param(wma->wmi_handle, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1331 | vdev_id, paramId, rate); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 1332 | qdf_mem_free(pRateUpdateParams); |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 1333 | if (QDF_IS_STATUS_ERROR(status)) { |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1334 | WMA_LOGE("%s: Fail to Set rate, status = %d", __func__, status); |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 1335 | return status; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1336 | } |
| 1337 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1338 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1339 | } |
| 1340 | |
| 1341 | /** |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1342 | * wma_mgmt_tx_ack_comp_hdlr() - handles tx ack mgmt completion |
| 1343 | * @context: context with which the handler is registered |
| 1344 | * @netbuf: tx mgmt nbuf |
| 1345 | * @status: status of tx completion |
| 1346 | * |
| 1347 | * This is callback registered with TxRx for |
| 1348 | * Ack Complete. |
| 1349 | * |
| 1350 | * Return: none |
| 1351 | */ |
| 1352 | static void |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1353 | wma_mgmt_tx_ack_comp_hdlr(void *wma_context, qdf_nbuf_t netbuf, int32_t status) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1354 | { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1355 | tp_wma_handle wma_handle = (tp_wma_handle) wma_context; |
Himanshu Agarwal | c733bd3 | 2017-11-18 18:35:42 +0530 | [diff] [blame] | 1356 | struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *) |
| 1357 | wma_handle->pdev; |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 1358 | uint16_t desc_id; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1359 | |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 1360 | desc_id = QDF_NBUF_CB_MGMT_TXRX_DESC_ID(netbuf); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1361 | |
Himanshu Agarwal | c733bd3 | 2017-11-18 18:35:42 +0530 | [diff] [blame] | 1362 | mgmt_txrx_tx_completion_handler(pdev, desc_id, status, NULL); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1363 | } |
| 1364 | |
| 1365 | /** |
| 1366 | * wma_mgmt_tx_dload_comp_hldr() - handles tx mgmt completion |
| 1367 | * @context: context with which the handler is registered |
| 1368 | * @netbuf: tx mgmt nbuf |
| 1369 | * @status: status of tx completion |
| 1370 | * |
| 1371 | * This function calls registered download callback while sending mgmt packet. |
| 1372 | * |
| 1373 | * Return: none |
| 1374 | */ |
| 1375 | static void |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 1376 | wma_mgmt_tx_dload_comp_hldr(void *wma_context, qdf_nbuf_t netbuf, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1377 | int32_t status) |
| 1378 | { |
Anurag Chouhan | ce0dc99 | 2016-02-16 18:18:03 +0530 | [diff] [blame] | 1379 | QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1380 | |
| 1381 | tp_wma_handle wma_handle = (tp_wma_handle) wma_context; |
| 1382 | void *mac_context = wma_handle->mac_context; |
| 1383 | |
| 1384 | WMA_LOGD("Tx Complete Status %d", status); |
| 1385 | |
| 1386 | if (!wma_handle->tx_frm_download_comp_cb) { |
| 1387 | WMA_LOGE("Tx Complete Cb not registered by umac"); |
| 1388 | return; |
| 1389 | } |
| 1390 | |
| 1391 | /* Call Tx Mgmt Complete Callback registered by umac */ |
| 1392 | wma_handle->tx_frm_download_comp_cb(mac_context, netbuf, 0); |
| 1393 | |
| 1394 | /* Reset Callback */ |
| 1395 | wma_handle->tx_frm_download_comp_cb = NULL; |
| 1396 | |
| 1397 | /* Set the Tx Mgmt Complete Event */ |
Anurag Chouhan | ce0dc99 | 2016-02-16 18:18:03 +0530 | [diff] [blame] | 1398 | qdf_status = qdf_event_set(&wma_handle->tx_frm_download_comp_event); |
| 1399 | if (!QDF_IS_STATUS_SUCCESS(qdf_status)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1400 | WMA_LOGP("%s: Event Set failed - tx_frm_comp_event", __func__); |
| 1401 | } |
| 1402 | |
| 1403 | /** |
| 1404 | * wma_tx_attach() - attach tx related callbacks |
| 1405 | * @pwmaCtx: wma context |
| 1406 | * |
| 1407 | * attaches tx fn with underlying layer. |
| 1408 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1409 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1410 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1411 | QDF_STATUS wma_tx_attach(tp_wma_handle wma_handle) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1412 | { |
| 1413 | /* Get the Vos Context */ |
| 1414 | p_cds_contextType cds_handle = |
| 1415 | (p_cds_contextType) (wma_handle->cds_context); |
| 1416 | |
| 1417 | /* Get the txRx Pdev handle */ |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1418 | struct cdp_pdev *txrx_pdev = cds_handle->pdev_txrx_ctx; |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 1419 | void *soc = cds_get_context(QDF_MODULE_ID_SOC); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1420 | |
| 1421 | /* Register for Tx Management Frames */ |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 1422 | cdp_mgmt_tx_cb_set(soc, txrx_pdev, 0, |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1423 | wma_mgmt_tx_dload_comp_hldr, |
| 1424 | wma_mgmt_tx_ack_comp_hdlr, wma_handle); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1425 | |
| 1426 | /* Store the Mac Context */ |
| 1427 | wma_handle->mac_context = cds_handle->pMACContext; |
| 1428 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1429 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1430 | } |
| 1431 | |
| 1432 | /** |
| 1433 | * wma_tx_detach() - detach tx related callbacks |
| 1434 | * @tp_wma_handle: wma context |
| 1435 | * |
| 1436 | * Deregister with TxRx for Tx Mgmt Download and Ack completion. |
| 1437 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 1438 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1439 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1440 | QDF_STATUS wma_tx_detach(tp_wma_handle wma_handle) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1441 | { |
Nishank Aggarwal | a13b61d | 2016-12-01 12:53:58 +0530 | [diff] [blame] | 1442 | void *soc = cds_get_context(QDF_MODULE_ID_SOC); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1443 | |
| 1444 | /* Get the Vos Context */ |
| 1445 | p_cds_contextType cds_handle = |
| 1446 | (p_cds_contextType) (wma_handle->cds_context); |
| 1447 | |
| 1448 | /* Get the txRx Pdev handle */ |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1449 | struct cdp_pdev *txrx_pdev = cds_handle->pdev_txrx_ctx; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1450 | |
Nishank Aggarwal | a13b61d | 2016-12-01 12:53:58 +0530 | [diff] [blame] | 1451 | if (!soc) { |
| 1452 | WMA_LOGE("%s:SOC context is NULL", __func__); |
| 1453 | return QDF_STATUS_E_FAILURE; |
| 1454 | } |
| 1455 | |
Himanshu Agarwal | e1086fa | 2015-10-19 18:05:15 +0530 | [diff] [blame] | 1456 | if (txrx_pdev) { |
| 1457 | /* Deregister with TxRx for Tx Mgmt completion call back */ |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 1458 | cdp_mgmt_tx_cb_set(soc, txrx_pdev, 0, NULL, NULL, txrx_pdev); |
Himanshu Agarwal | e1086fa | 2015-10-19 18:05:15 +0530 | [diff] [blame] | 1459 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1460 | |
| 1461 | /* Reset Tx Frm Callbacks */ |
| 1462 | wma_handle->tx_frm_download_comp_cb = NULL; |
| 1463 | |
| 1464 | /* Reset Tx Data Frame Ack Cb */ |
| 1465 | wma_handle->umac_data_ota_ack_cb = NULL; |
| 1466 | |
| 1467 | /* Reset last Tx Data Frame nbuf ptr */ |
| 1468 | wma_handle->last_umac_data_nbuf = NULL; |
| 1469 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1470 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1471 | } |
| 1472 | |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 1473 | #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \ |
| 1474 | defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(CONFIG_HL_SUPPORT) |
| 1475 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1476 | /** |
| 1477 | * wma_mcc_vdev_tx_pause_evt_handler() - pause event handler |
| 1478 | * @handle: wma handle |
| 1479 | * @event: event buffer |
| 1480 | * @len: data length |
| 1481 | * |
| 1482 | * This function handle pause event from fw and pause/unpause |
| 1483 | * vdev. |
| 1484 | * |
| 1485 | * Return: 0 for success or error code. |
| 1486 | */ |
| 1487 | int wma_mcc_vdev_tx_pause_evt_handler(void *handle, uint8_t *event, |
| 1488 | uint32_t len) |
| 1489 | { |
| 1490 | tp_wma_handle wma = (tp_wma_handle) handle; |
| 1491 | WMI_TX_PAUSE_EVENTID_param_tlvs *param_buf; |
| 1492 | wmi_tx_pause_event_fixed_param *wmi_event; |
| 1493 | uint8_t vdev_id; |
| 1494 | A_UINT32 vdev_map; |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 1495 | void *soc = cds_get_context(QDF_MODULE_ID_SOC); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1496 | |
| 1497 | param_buf = (WMI_TX_PAUSE_EVENTID_param_tlvs *) event; |
| 1498 | if (!param_buf) { |
| 1499 | WMA_LOGE("Invalid roam event buffer"); |
| 1500 | return -EINVAL; |
| 1501 | } |
| 1502 | |
Mukul Sharma | 4c60a7e | 2017-03-06 19:42:18 +0530 | [diff] [blame] | 1503 | if (pmo_ucfg_get_wow_bus_suspend(wma->psoc)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1504 | WMA_LOGD(" Suspend is in progress: Pause/Unpause Tx is NoOp"); |
| 1505 | return 0; |
| 1506 | } |
| 1507 | |
Nishank Aggarwal | a13b61d | 2016-12-01 12:53:58 +0530 | [diff] [blame] | 1508 | if (!soc) { |
| 1509 | WMA_LOGE("%s:SOC context is NULL", __func__); |
| 1510 | return -EINVAL; |
| 1511 | } |
| 1512 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1513 | wmi_event = param_buf->fixed_param; |
| 1514 | vdev_map = wmi_event->vdev_map; |
| 1515 | /* FW mapped vdev from ID |
| 1516 | * vdev_map = (1 << vdev_id) |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1517 | * So, host should unmap to ID |
| 1518 | */ |
Naveen Rawat | 60f39da | 2017-10-03 16:58:25 -0700 | [diff] [blame] | 1519 | for (vdev_id = 0; vdev_map != 0 && vdev_id < wma->max_bssid; |
| 1520 | vdev_id++) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1521 | if (!(vdev_map & 0x1)) { |
| 1522 | /* No Vdev */ |
| 1523 | } else { |
| 1524 | if (!wma->interfaces[vdev_id].handle) { |
| 1525 | WMA_LOGE("%s: invalid vdev ID %d", __func__, |
| 1526 | vdev_id); |
| 1527 | /* Test Next VDEV */ |
| 1528 | vdev_map >>= 1; |
| 1529 | continue; |
| 1530 | } |
| 1531 | |
| 1532 | /* PAUSE action, add bitmap */ |
| 1533 | if (ACTION_PAUSE == wmi_event->action) { |
| 1534 | /* |
| 1535 | * Now only support per-dev pause so it is not |
| 1536 | * necessary to pause a paused queue again. |
| 1537 | */ |
Mukul Sharma | 6411bb8 | 2017-03-01 15:57:07 +0530 | [diff] [blame] | 1538 | if (!wma_vdev_get_pause_bitmap(vdev_id)) |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 1539 | cdp_fc_vdev_pause(soc, |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1540 | wma-> |
| 1541 | interfaces[vdev_id].handle, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1542 | OL_TXQ_PAUSE_REASON_FW); |
Mukul Sharma | 6411bb8 | 2017-03-01 15:57:07 +0530 | [diff] [blame] | 1543 | wma_vdev_set_pause_bit(vdev_id, |
| 1544 | wmi_event->pause_type); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1545 | } |
| 1546 | /* UNPAUSE action, clean bitmap */ |
| 1547 | else if (ACTION_UNPAUSE == wmi_event->action) { |
| 1548 | /* Handle unpause only if already paused */ |
Mukul Sharma | 6411bb8 | 2017-03-01 15:57:07 +0530 | [diff] [blame] | 1549 | if (wma_vdev_get_pause_bitmap(vdev_id)) { |
| 1550 | wma_vdev_clear_pause_bit(vdev_id, |
| 1551 | wmi_event->pause_type); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1552 | |
| 1553 | if (!wma->interfaces[vdev_id]. |
| 1554 | pause_bitmap) { |
| 1555 | /* PAUSE BIT MAP is cleared |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1556 | * UNPAUSE VDEV |
| 1557 | */ |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 1558 | cdp_fc_vdev_unpause(soc, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1559 | wma->interfaces[vdev_id] |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1560 | .handle, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1561 | OL_TXQ_PAUSE_REASON_FW); |
| 1562 | } |
| 1563 | } |
| 1564 | } else { |
| 1565 | WMA_LOGE("Not Valid Action Type %d", |
| 1566 | wmi_event->action); |
| 1567 | } |
| 1568 | |
| 1569 | WMA_LOGD |
| 1570 | ("vdev_id %d, pause_map 0x%x, pause type %d, action %d", |
Mukul Sharma | 6411bb8 | 2017-03-01 15:57:07 +0530 | [diff] [blame] | 1571 | vdev_id, wma_vdev_get_pause_bitmap(vdev_id), |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1572 | wmi_event->pause_type, wmi_event->action); |
| 1573 | } |
| 1574 | /* Test Next VDEV */ |
| 1575 | vdev_map >>= 1; |
| 1576 | } |
| 1577 | |
| 1578 | return 0; |
| 1579 | } |
| 1580 | |
| 1581 | #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */ |
| 1582 | |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 1583 | #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) |
| 1584 | |
| 1585 | /** |
| 1586 | * wma_set_peer_rate_report_condition - |
| 1587 | * this function set peer rate report |
| 1588 | * condition info to firmware. |
| 1589 | * @handle: Handle of WMA |
| 1590 | * @config: Bad peer configuration from SIR module |
| 1591 | * |
| 1592 | * It is a wrapper function to sent WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID |
| 1593 | * to the firmare\target.If the command sent to firmware failed, free the |
| 1594 | * buffer that allocated. |
| 1595 | * |
| 1596 | * Return: QDF_STATUS based on values sent to firmware |
| 1597 | */ |
| 1598 | static |
| 1599 | QDF_STATUS wma_set_peer_rate_report_condition(WMA_HANDLE handle, |
| 1600 | struct t_bad_peer_txtcl_config *config) |
| 1601 | { |
| 1602 | tp_wma_handle wma_handle = (tp_wma_handle)handle; |
| 1603 | struct wmi_peer_rate_report_params rate_report_params = {0}; |
| 1604 | u_int32_t i, j; |
| 1605 | |
| 1606 | rate_report_params.rate_report_enable = config->enable; |
| 1607 | rate_report_params.backoff_time = config->tgt_backoff; |
| 1608 | rate_report_params.timer_period = config->tgt_report_prd; |
| 1609 | for (i = 0; i < WMI_PEER_RATE_REPORT_COND_MAX_NUM; i++) { |
| 1610 | rate_report_params.report_per_phy[i].cond_flags = |
| 1611 | config->threshold[i].cond; |
| 1612 | rate_report_params.report_per_phy[i].delta.delta_min = |
| 1613 | config->threshold[i].delta; |
| 1614 | rate_report_params.report_per_phy[i].delta.percent = |
| 1615 | config->threshold[i].percentage; |
| 1616 | for (j = 0; j < WMI_MAX_NUM_OF_RATE_THRESH; j++) { |
| 1617 | rate_report_params.report_per_phy[i]. |
| 1618 | report_rate_threshold[j] = |
| 1619 | config->threshold[i].thresh[j]; |
| 1620 | } |
| 1621 | } |
| 1622 | |
| 1623 | return wmi_unified_peer_rate_report_cmd(wma_handle->wmi_handle, |
| 1624 | &rate_report_params); |
| 1625 | } |
| 1626 | |
| 1627 | /** |
| 1628 | * wma_process_init_bad_peer_tx_ctl_info - |
| 1629 | * this function to initialize peer rate report config info. |
| 1630 | * @handle: Handle of WMA |
| 1631 | * @config: Bad peer configuration from SIR module |
| 1632 | * |
| 1633 | * This function initializes the bad peer tx control data structure in WMA, |
| 1634 | * sends down the initial configuration to the firmware and configures |
| 1635 | * the peer status update seeting in the tx_rx module. |
| 1636 | * |
| 1637 | * Return: QDF_STATUS based on procedure status |
| 1638 | */ |
| 1639 | |
| 1640 | QDF_STATUS wma_process_init_bad_peer_tx_ctl_info(tp_wma_handle wma, |
| 1641 | struct t_bad_peer_txtcl_config *config) |
| 1642 | { |
| 1643 | /* Parameter sanity check */ |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1644 | struct cdp_pdev *curr_pdev; |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 1645 | void *soc = cds_get_context(QDF_MODULE_ID_SOC); |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 1646 | |
| 1647 | if (NULL == wma || NULL == config) { |
| 1648 | WMA_LOGE("%s Invalid input\n", __func__); |
| 1649 | return QDF_STATUS_E_FAILURE; |
| 1650 | } |
| 1651 | |
| 1652 | curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
| 1653 | if (NULL == curr_pdev) { |
| 1654 | WMA_LOGE("%s: Failed to get pdev\n", __func__); |
| 1655 | return QDF_STATUS_E_FAILURE; |
| 1656 | } |
| 1657 | |
| 1658 | WMA_LOGE("%s enable %d period %d txq limit %d\n", __func__, |
| 1659 | config->enable, |
| 1660 | config->period, |
| 1661 | config->txq_limit); |
| 1662 | |
| 1663 | /* Only need to initialize the setting |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1664 | * when the feature is enabled |
| 1665 | */ |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 1666 | if (config->enable) { |
| 1667 | int i = 0; |
| 1668 | |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1669 | cdp_bad_peer_txctl_set_setting(soc, |
| 1670 | curr_pdev, |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 1671 | config->enable, |
| 1672 | config->period, |
| 1673 | config->txq_limit); |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 1674 | |
| 1675 | for (i = 0; i < WLAN_WMA_IEEE80211_MAX_LEVEL; i++) { |
| 1676 | u_int32_t threshold, limit; |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1677 | |
| 1678 | threshold = config->threshold[i].thresh[0]; |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 1679 | limit = config->threshold[i].txlimit; |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1680 | cdp_bad_peer_txctl_update_threshold(soc, |
| 1681 | curr_pdev, |
| 1682 | i, |
| 1683 | threshold, |
| 1684 | limit); |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 1685 | } |
| 1686 | } |
| 1687 | |
| 1688 | return wma_set_peer_rate_report_condition(wma, config); |
| 1689 | } |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 1690 | #endif /* defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) */ |
| 1691 | |
| 1692 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1693 | /** |
| 1694 | * wma_process_init_thermal_info() - initialize thermal info |
| 1695 | * @wma: Pointer to WMA handle |
| 1696 | * @pThermalParams: Pointer to thermal mitigation parameters |
| 1697 | * |
| 1698 | * This function initializes the thermal management table in WMA, |
| 1699 | * sends down the initial temperature thresholds to the firmware |
| 1700 | * and configures the throttle period in the tx rx module |
| 1701 | * |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1702 | * Returns: QDF_STATUS_SUCCESS for success otherwise failure |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1703 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1704 | QDF_STATUS wma_process_init_thermal_info(tp_wma_handle wma, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1705 | t_thermal_mgmt *pThermalParams) |
| 1706 | { |
| 1707 | t_thermal_cmd_params thermal_params; |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1708 | struct cdp_pdev *curr_pdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1709 | |
| 1710 | if (NULL == wma || NULL == pThermalParams) { |
| 1711 | WMA_LOGE("TM Invalid input"); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1712 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1713 | } |
| 1714 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1715 | curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1716 | if (NULL == curr_pdev) { |
| 1717 | WMA_LOGE("%s: Failed to get pdev", __func__); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1718 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1719 | } |
| 1720 | |
| 1721 | WMA_LOGD("TM enable %d period %d", pThermalParams->thermalMgmtEnabled, |
| 1722 | pThermalParams->throttlePeriod); |
| 1723 | |
Poddar, Siddarth | 8390502 | 2016-04-16 17:56:08 -0700 | [diff] [blame] | 1724 | WMA_LOGD("Throttle Duty Cycle Level in percentage:\n" |
| 1725 | "0 %d\n" |
| 1726 | "1 %d\n" |
| 1727 | "2 %d\n" |
| 1728 | "3 %d", |
| 1729 | pThermalParams->throttle_duty_cycle_tbl[0], |
| 1730 | pThermalParams->throttle_duty_cycle_tbl[1], |
| 1731 | pThermalParams->throttle_duty_cycle_tbl[2], |
| 1732 | pThermalParams->throttle_duty_cycle_tbl[3]); |
| 1733 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1734 | wma->thermal_mgmt_info.thermalMgmtEnabled = |
| 1735 | pThermalParams->thermalMgmtEnabled; |
| 1736 | wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold = |
| 1737 | pThermalParams->thermalLevels[0].minTempThreshold; |
| 1738 | wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold = |
| 1739 | pThermalParams->thermalLevels[0].maxTempThreshold; |
| 1740 | wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold = |
| 1741 | pThermalParams->thermalLevels[1].minTempThreshold; |
| 1742 | wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold = |
| 1743 | pThermalParams->thermalLevels[1].maxTempThreshold; |
| 1744 | wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold = |
| 1745 | pThermalParams->thermalLevels[2].minTempThreshold; |
| 1746 | wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold = |
| 1747 | pThermalParams->thermalLevels[2].maxTempThreshold; |
| 1748 | wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold = |
| 1749 | pThermalParams->thermalLevels[3].minTempThreshold; |
| 1750 | wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold = |
| 1751 | pThermalParams->thermalLevels[3].maxTempThreshold; |
| 1752 | wma->thermal_mgmt_info.thermalCurrLevel = WLAN_WMA_THERMAL_LEVEL_0; |
| 1753 | |
| 1754 | WMA_LOGD("TM level min max:\n" |
| 1755 | "0 %d %d\n" |
| 1756 | "1 %d %d\n" |
| 1757 | "2 %d %d\n" |
| 1758 | "3 %d %d", |
| 1759 | wma->thermal_mgmt_info.thermalLevels[0].minTempThreshold, |
| 1760 | wma->thermal_mgmt_info.thermalLevels[0].maxTempThreshold, |
| 1761 | wma->thermal_mgmt_info.thermalLevels[1].minTempThreshold, |
| 1762 | wma->thermal_mgmt_info.thermalLevels[1].maxTempThreshold, |
| 1763 | wma->thermal_mgmt_info.thermalLevels[2].minTempThreshold, |
| 1764 | wma->thermal_mgmt_info.thermalLevels[2].maxTempThreshold, |
| 1765 | wma->thermal_mgmt_info.thermalLevels[3].minTempThreshold, |
| 1766 | wma->thermal_mgmt_info.thermalLevels[3].maxTempThreshold); |
| 1767 | |
| 1768 | if (wma->thermal_mgmt_info.thermalMgmtEnabled) { |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 1769 | cdp_throttle_init_period(cds_get_context(QDF_MODULE_ID_SOC), |
| 1770 | curr_pdev, |
Poddar, Siddarth | 8390502 | 2016-04-16 17:56:08 -0700 | [diff] [blame] | 1771 | pThermalParams->throttlePeriod, |
| 1772 | &pThermalParams->throttle_duty_cycle_tbl[0]); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1773 | |
| 1774 | /* Get the temperature thresholds to set in firmware */ |
| 1775 | thermal_params.minTemp = |
| 1776 | wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].minTempThreshold; |
| 1777 | thermal_params.maxTemp = |
| 1778 | wma->thermal_mgmt_info.thermalLevels[WLAN_WMA_THERMAL_LEVEL_0].maxTempThreshold; |
| 1779 | thermal_params.thermalEnable = |
| 1780 | wma->thermal_mgmt_info.thermalMgmtEnabled; |
| 1781 | |
| 1782 | WMA_LOGE("TM sending the following to firmware: min %d max %d enable %d", |
| 1783 | thermal_params.minTemp, thermal_params.maxTemp, |
| 1784 | thermal_params.thermalEnable); |
| 1785 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1786 | if (QDF_STATUS_SUCCESS != |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1787 | wma_set_thermal_mgmt(wma, thermal_params)) { |
| 1788 | WMA_LOGE("Could not send thermal mgmt command to the firmware!"); |
| 1789 | } |
| 1790 | } |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1791 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1792 | } |
| 1793 | |
| 1794 | /** |
| 1795 | * wma_set_thermal_level_ind() - send SME set thermal level indication message |
| 1796 | * @level: thermal level |
| 1797 | * |
| 1798 | * Send SME SET_THERMAL_LEVEL_IND message |
| 1799 | * |
| 1800 | * Returns: none |
| 1801 | */ |
| 1802 | static void wma_set_thermal_level_ind(u_int8_t level) |
| 1803 | { |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1804 | QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; |
Rajeev Kumar | b60abe4 | 2017-01-21 15:39:31 -0800 | [diff] [blame] | 1805 | struct scheduler_msg sme_msg = {0}; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1806 | |
| 1807 | WMA_LOGI(FL("Thermal level: %d"), level); |
| 1808 | |
| 1809 | sme_msg.type = eWNI_SME_SET_THERMAL_LEVEL_IND; |
| 1810 | sme_msg.bodyptr = NULL; |
| 1811 | sme_msg.bodyval = level; |
| 1812 | |
Rajeev Kumar | b60abe4 | 2017-01-21 15:39:31 -0800 | [diff] [blame] | 1813 | qdf_status = scheduler_post_msg(QDF_MODULE_ID_SME, &sme_msg); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1814 | if (!QDF_IS_STATUS_SUCCESS(qdf_status)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1815 | WMA_LOGE(FL( |
| 1816 | "Fail to post set thermal level ind msg")); |
| 1817 | } |
| 1818 | |
| 1819 | /** |
| 1820 | * wma_process_set_thermal_level() - sets thermal level |
| 1821 | * @wma: Pointer to WMA handle |
| 1822 | * @thermal_level : Thermal level |
| 1823 | * |
| 1824 | * This function sets the new thermal throttle level in the |
| 1825 | * txrx module and sends down the corresponding temperature |
| 1826 | * thresholds to the firmware |
| 1827 | * |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1828 | * Returns: QDF_STATUS_SUCCESS for success otherwise failure |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1829 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1830 | QDF_STATUS wma_process_set_thermal_level(tp_wma_handle wma, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1831 | uint8_t thermal_level) |
| 1832 | { |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1833 | struct cdp_pdev *curr_pdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1834 | |
| 1835 | if (NULL == wma) { |
| 1836 | WMA_LOGE("TM Invalid input"); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1837 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1838 | } |
| 1839 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1840 | curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1841 | if (NULL == curr_pdev) { |
| 1842 | WMA_LOGE("%s: Failed to get pdev", __func__); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1843 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1844 | } |
| 1845 | |
| 1846 | WMA_LOGE("TM set level %d", thermal_level); |
| 1847 | |
| 1848 | /* Check if thermal mitigation is enabled */ |
| 1849 | if (!wma->thermal_mgmt_info.thermalMgmtEnabled) { |
| 1850 | WMA_LOGE("Thermal mgmt is not enabled, ignoring set level command"); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1851 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1852 | } |
| 1853 | |
| 1854 | if (thermal_level >= WLAN_WMA_MAX_THERMAL_LEVELS) { |
| 1855 | WMA_LOGE("Invalid thermal level set %d", thermal_level); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1856 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1857 | } |
| 1858 | |
| 1859 | if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) { |
| 1860 | WMA_LOGD("Current level %d is same as the set level, ignoring", |
| 1861 | wma->thermal_mgmt_info.thermalCurrLevel); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1862 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1863 | } |
| 1864 | |
| 1865 | wma->thermal_mgmt_info.thermalCurrLevel = thermal_level; |
| 1866 | |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 1867 | cdp_throttle_set_level(cds_get_context(QDF_MODULE_ID_SOC), |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1868 | curr_pdev, |
| 1869 | thermal_level); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1870 | |
| 1871 | /* Send SME SET_THERMAL_LEVEL_IND message */ |
| 1872 | wma_set_thermal_level_ind(thermal_level); |
| 1873 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1874 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1875 | } |
| 1876 | |
| 1877 | |
| 1878 | /** |
| 1879 | * wma_set_thermal_mgmt() - set thermal mgmt command to fw |
| 1880 | * @wma_handle: Pointer to WMA handle |
| 1881 | * @thermal_info: Thermal command information |
| 1882 | * |
| 1883 | * This function sends the thermal management command |
| 1884 | * to the firmware |
| 1885 | * |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1886 | * Return: QDF_STATUS_SUCCESS for success otherwise failure |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1887 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1888 | QDF_STATUS wma_set_thermal_mgmt(tp_wma_handle wma_handle, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1889 | t_thermal_cmd_params thermal_info) |
| 1890 | { |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 1891 | struct thermal_cmd_params mgmt_thermal_info = {0}; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1892 | |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 1893 | if (!wma_handle) { |
| 1894 | WMA_LOGE("%s:'wma_set_thermal_mgmt':invalid input", __func__); |
| 1895 | QDF_ASSERT(0); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 1896 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1897 | } |
| 1898 | |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 1899 | mgmt_thermal_info.min_temp = thermal_info.minTemp; |
| 1900 | mgmt_thermal_info.max_temp = thermal_info.maxTemp; |
| 1901 | mgmt_thermal_info.thermal_enable = thermal_info.thermalEnable; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1902 | |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 1903 | return wmi_unified_set_thermal_mgmt_cmd(wma_handle->wmi_handle, |
| 1904 | &mgmt_thermal_info); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1905 | } |
| 1906 | |
| 1907 | /** |
| 1908 | * wma_thermal_mgmt_get_level() - returns throttle level |
| 1909 | * @handle: Pointer to WMA handle |
| 1910 | * @temp: temperature |
| 1911 | * |
| 1912 | * This function returns the thermal(throttle) level |
| 1913 | * given the temperature |
| 1914 | * |
| 1915 | * Return: thermal (throttle) level |
| 1916 | */ |
Jeff Johnson | c4b47a9 | 2016-10-07 12:34:41 -0700 | [diff] [blame] | 1917 | static uint8_t wma_thermal_mgmt_get_level(void *handle, uint32_t temp) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1918 | { |
| 1919 | tp_wma_handle wma = (tp_wma_handle) handle; |
| 1920 | int i; |
| 1921 | uint8_t level; |
| 1922 | |
| 1923 | level = i = wma->thermal_mgmt_info.thermalCurrLevel; |
| 1924 | while (temp < wma->thermal_mgmt_info.thermalLevels[i].minTempThreshold |
| 1925 | && i > 0) { |
| 1926 | i--; |
| 1927 | level = i; |
| 1928 | } |
| 1929 | |
| 1930 | i = wma->thermal_mgmt_info.thermalCurrLevel; |
| 1931 | while (temp > wma->thermal_mgmt_info.thermalLevels[i].maxTempThreshold |
| 1932 | && i < (WLAN_WMA_MAX_THERMAL_LEVELS - 1)) { |
| 1933 | i++; |
| 1934 | level = i; |
| 1935 | } |
| 1936 | |
| 1937 | WMA_LOGW("Change thermal level from %d -> %d\n", |
| 1938 | wma->thermal_mgmt_info.thermalCurrLevel, level); |
| 1939 | |
| 1940 | return level; |
| 1941 | } |
| 1942 | |
| 1943 | /** |
| 1944 | * wma_thermal_mgmt_evt_handler() - thermal mgmt event handler |
| 1945 | * @wma_handle: Pointer to WMA handle |
| 1946 | * @event: Thermal event information |
| 1947 | * |
| 1948 | * This function handles the thermal mgmt event from the firmware len |
| 1949 | * |
| 1950 | * Return: 0 for success otherwise failure |
| 1951 | */ |
| 1952 | int wma_thermal_mgmt_evt_handler(void *handle, uint8_t *event, |
| 1953 | uint32_t len) |
| 1954 | { |
| 1955 | tp_wma_handle wma; |
| 1956 | wmi_thermal_mgmt_event_fixed_param *tm_event; |
| 1957 | uint8_t thermal_level; |
| 1958 | t_thermal_cmd_params thermal_params; |
| 1959 | WMI_THERMAL_MGMT_EVENTID_param_tlvs *param_buf; |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 1960 | struct cdp_pdev *curr_pdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1961 | |
| 1962 | if (NULL == event || NULL == handle) { |
| 1963 | WMA_LOGE("Invalid thermal mitigation event buffer"); |
| 1964 | return -EINVAL; |
| 1965 | } |
| 1966 | |
| 1967 | wma = (tp_wma_handle) handle; |
| 1968 | |
| 1969 | if (NULL == wma) { |
| 1970 | WMA_LOGE("%s: Failed to get wma handle", __func__); |
| 1971 | return -EINVAL; |
| 1972 | } |
| 1973 | |
| 1974 | param_buf = (WMI_THERMAL_MGMT_EVENTID_param_tlvs *) event; |
| 1975 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 1976 | curr_pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1977 | if (NULL == curr_pdev) { |
| 1978 | WMA_LOGE("%s: Failed to get pdev", __func__); |
| 1979 | return -EINVAL; |
| 1980 | } |
| 1981 | |
| 1982 | /* Check if thermal mitigation is enabled */ |
| 1983 | if (!wma->thermal_mgmt_info.thermalMgmtEnabled) { |
| 1984 | WMA_LOGE("Thermal mgmt is not enabled, ignoring event"); |
| 1985 | return -EINVAL; |
| 1986 | } |
| 1987 | |
| 1988 | tm_event = param_buf->fixed_param; |
| 1989 | WMA_LOGD("Thermal mgmt event received with temperature %d", |
| 1990 | tm_event->temperature_degreeC); |
| 1991 | |
| 1992 | /* Get the thermal mitigation level for the reported temperature */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 1993 | thermal_level = wma_thermal_mgmt_get_level(handle, |
| 1994 | tm_event->temperature_degreeC); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 1995 | WMA_LOGD("Thermal mgmt level %d", thermal_level); |
| 1996 | |
| 1997 | if (thermal_level == wma->thermal_mgmt_info.thermalCurrLevel) { |
| 1998 | WMA_LOGD("Current level %d is same as the set level, ignoring", |
| 1999 | wma->thermal_mgmt_info.thermalCurrLevel); |
| 2000 | return 0; |
| 2001 | } |
| 2002 | |
| 2003 | wma->thermal_mgmt_info.thermalCurrLevel = thermal_level; |
| 2004 | |
| 2005 | /* Inform txrx */ |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 2006 | cdp_throttle_set_level(cds_get_context(QDF_MODULE_ID_SOC), |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 2007 | curr_pdev, |
| 2008 | thermal_level); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2009 | |
| 2010 | /* Send SME SET_THERMAL_LEVEL_IND message */ |
| 2011 | wma_set_thermal_level_ind(thermal_level); |
| 2012 | |
| 2013 | /* Get the temperature thresholds to set in firmware */ |
| 2014 | thermal_params.minTemp = |
| 2015 | wma->thermal_mgmt_info.thermalLevels[thermal_level]. |
| 2016 | minTempThreshold; |
| 2017 | thermal_params.maxTemp = |
| 2018 | wma->thermal_mgmt_info.thermalLevels[thermal_level]. |
| 2019 | maxTempThreshold; |
| 2020 | thermal_params.thermalEnable = |
| 2021 | wma->thermal_mgmt_info.thermalMgmtEnabled; |
| 2022 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2023 | if (QDF_STATUS_SUCCESS != wma_set_thermal_mgmt(wma, thermal_params)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2024 | WMA_LOGE("Could not send thermal mgmt command to the firmware!"); |
| 2025 | return -EINVAL; |
| 2026 | } |
| 2027 | |
| 2028 | return 0; |
| 2029 | } |
| 2030 | |
| 2031 | /** |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2032 | * wma_ibss_peer_info_event_handler() - IBSS peer info event handler |
| 2033 | * @handle: wma handle |
| 2034 | * @data: event data |
| 2035 | * @len: length of data |
| 2036 | * |
| 2037 | * This function handles IBSS peer info event from FW. |
| 2038 | * |
| 2039 | * Return: 0 for success or error code |
| 2040 | */ |
| 2041 | int wma_ibss_peer_info_event_handler(void *handle, uint8_t *data, |
| 2042 | uint32_t len) |
| 2043 | { |
Rajeev Kumar | cf7bd80 | 2017-04-18 11:11:42 -0700 | [diff] [blame] | 2044 | struct scheduler_msg cds_msg = {0}; |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2045 | wmi_peer_info *peer_info; |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 2046 | void *pdev; |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2047 | tSirIbssPeerInfoParams *pSmeRsp; |
| 2048 | uint32_t count, num_peers, status; |
| 2049 | tSirIbssGetPeerInfoRspParams *pRsp; |
| 2050 | WMI_PEER_INFO_EVENTID_param_tlvs *param_tlvs; |
| 2051 | wmi_peer_info_event_fixed_param *fix_param; |
Rajeev Kumar | 94c9b45 | 2016-03-24 12:58:47 -0700 | [diff] [blame] | 2052 | uint8_t peer_mac[IEEE80211_ADDR_LEN]; |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2053 | |
| 2054 | pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
| 2055 | if (NULL == pdev) { |
| 2056 | WMA_LOGE("%s: could not get pdev context", __func__); |
| 2057 | return 0; |
| 2058 | } |
| 2059 | |
| 2060 | param_tlvs = (WMI_PEER_INFO_EVENTID_param_tlvs *) data; |
| 2061 | fix_param = param_tlvs->fixed_param; |
| 2062 | peer_info = param_tlvs->peer_info; |
| 2063 | num_peers = fix_param->num_peers; |
| 2064 | status = 0; |
| 2065 | |
| 2066 | WMA_LOGE("%s: num_peers %d", __func__, num_peers); |
| 2067 | |
| 2068 | pRsp = qdf_mem_malloc(sizeof(tSirIbssGetPeerInfoRspParams)); |
| 2069 | if (NULL == pRsp) { |
| 2070 | WMA_LOGE("%s: could not allocate memory for ibss peer info rsp len %zu", |
| 2071 | __func__, sizeof(tSirIbssGetPeerInfoRspParams)); |
| 2072 | return 0; |
| 2073 | } |
| 2074 | |
| 2075 | /*sanity check */ |
Himanshu Agarwal | 1b34c1f | 2017-12-22 13:43:50 +0530 | [diff] [blame] | 2076 | if ((num_peers > 32) || (num_peers > param_tlvs->num_peer_info) || |
| 2077 | (!peer_info)) { |
Jeff Johnson | adba396 | 2017-09-18 08:12:35 -0700 | [diff] [blame] | 2078 | WMA_LOGE("%s: Invalid event data from target num_peers %d peer_info %pK", |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2079 | __func__, num_peers, peer_info); |
| 2080 | status = 1; |
| 2081 | goto send_response; |
| 2082 | } |
| 2083 | |
yeshwanth sriram guntuka | 0255f85 | 2016-08-31 17:18:19 +0530 | [diff] [blame] | 2084 | /* |
| 2085 | *For displaying only connected IBSS peer info, iterate till |
| 2086 | *last but one entry only as last entry is used for IBSS creator |
| 2087 | */ |
| 2088 | for (count = 0; count < num_peers-1; count++) { |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2089 | pSmeRsp = &pRsp->ibssPeerInfoRspParams.peerInfoParams[count]; |
| 2090 | |
| 2091 | WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_info->peer_mac_address, |
| 2092 | peer_mac); |
Rajeev Kumar | 94c9b45 | 2016-03-24 12:58:47 -0700 | [diff] [blame] | 2093 | qdf_mem_copy(pSmeRsp->mac_addr, peer_mac, |
| 2094 | sizeof(pSmeRsp->mac_addr)); |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2095 | pSmeRsp->mcsIndex = 0; |
| 2096 | pSmeRsp->rssi = peer_info->rssi + WMA_TGT_NOISE_FLOOR_DBM; |
| 2097 | pSmeRsp->txRate = peer_info->data_rate; |
| 2098 | pSmeRsp->txRateFlags = 0; |
| 2099 | |
Rajeev Kumar | 94c9b45 | 2016-03-24 12:58:47 -0700 | [diff] [blame] | 2100 | WMA_LOGE("peer " MAC_ADDRESS_STR "rssi %d txRate %d", |
| 2101 | MAC_ADDR_ARRAY(peer_mac), |
| 2102 | pSmeRsp->rssi, pSmeRsp->txRate); |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2103 | |
| 2104 | peer_info++; |
| 2105 | } |
| 2106 | |
| 2107 | send_response: |
| 2108 | /* message header */ |
| 2109 | pRsp->mesgType = eWNI_SME_IBSS_PEER_INFO_RSP; |
| 2110 | pRsp->mesgLen = sizeof(tSirIbssGetPeerInfoRspParams); |
| 2111 | pRsp->ibssPeerInfoRspParams.status = status; |
| 2112 | pRsp->ibssPeerInfoRspParams.numPeers = num_peers; |
| 2113 | |
| 2114 | /* cds message wrapper */ |
| 2115 | cds_msg.type = eWNI_SME_IBSS_PEER_INFO_RSP; |
| 2116 | cds_msg.bodyptr = (void *)pRsp; |
| 2117 | cds_msg.bodyval = 0; |
| 2118 | |
| 2119 | if (QDF_STATUS_SUCCESS != |
Rajeev Kumar | 156188e | 2017-01-21 17:23:52 -0800 | [diff] [blame] | 2120 | scheduler_post_msg(QDF_MODULE_ID_SME, &cds_msg)) { |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2121 | WMA_LOGE("%s: could not post peer info rsp msg to SME", |
| 2122 | __func__); |
| 2123 | /* free the mem and return */ |
| 2124 | qdf_mem_free((void *)pRsp); |
| 2125 | } |
| 2126 | |
| 2127 | return 0; |
| 2128 | } |
| 2129 | |
| 2130 | /** |
| 2131 | * wma_fast_tx_fail_event_handler() -tx failure event handler |
| 2132 | * @handle: wma handle |
| 2133 | * @data: event data |
| 2134 | * @len: data length |
| 2135 | * |
| 2136 | * Handle fast tx failure indication event from FW |
| 2137 | * |
| 2138 | * Return: 0 for success or error code. |
| 2139 | */ |
| 2140 | int wma_fast_tx_fail_event_handler(void *handle, uint8_t *data, |
| 2141 | uint32_t len) |
| 2142 | { |
| 2143 | uint8_t tx_fail_cnt; |
| 2144 | uint8_t peer_mac[IEEE80211_ADDR_LEN]; |
| 2145 | tp_wma_handle wma = (tp_wma_handle) handle; |
| 2146 | WMI_PEER_TX_FAIL_CNT_THR_EVENTID_param_tlvs *param_tlvs; |
| 2147 | wmi_peer_tx_fail_cnt_thr_event_fixed_param *fix_param; |
| 2148 | |
| 2149 | param_tlvs = (WMI_PEER_TX_FAIL_CNT_THR_EVENTID_param_tlvs *) data; |
| 2150 | fix_param = param_tlvs->fixed_param; |
| 2151 | |
| 2152 | WMI_MAC_ADDR_TO_CHAR_ARRAY(&fix_param->peer_mac_address, peer_mac); |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2153 | WMA_LOGE("%s: received fast tx failure event for peer 0x:%2x:0x%2x:0x%2x:0x%2x:0x%2x:0x%2x seq No %d", |
| 2154 | __func__, |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2155 | peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3], |
| 2156 | peer_mac[4], peer_mac[5], fix_param->seq_no); |
| 2157 | |
| 2158 | tx_fail_cnt = fix_param->seq_no; |
| 2159 | |
| 2160 | /*call HDD callback */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2161 | if (wma->hddTxFailCb != NULL) |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2162 | wma->hddTxFailCb(peer_mac, tx_fail_cnt); |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2163 | else |
Jeff Johnson | adba396 | 2017-09-18 08:12:35 -0700 | [diff] [blame] | 2164 | WMA_LOGE("%s: HDD callback is %pK", __func__, wma->hddTxFailCb); |
Rajeev Kumar | 8e3e283 | 2015-11-06 16:02:54 -0800 | [diff] [blame] | 2165 | |
| 2166 | return 0; |
| 2167 | } |
| 2168 | |
| 2169 | /** |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2170 | * wma_decap_to_8023() - Decapsulate to 802.3 format |
| 2171 | * @msdu: skb buffer |
| 2172 | * @info: decapsulate info |
| 2173 | * |
| 2174 | * Return: none |
| 2175 | */ |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2176 | static void wma_decap_to_8023(qdf_nbuf_t msdu, struct wma_decap_info_t *info) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2177 | { |
| 2178 | struct llc_snap_hdr_t *llc_hdr; |
| 2179 | uint16_t ether_type; |
| 2180 | uint16_t l2_hdr_space; |
| 2181 | struct ieee80211_qosframe_addr4 *wh; |
| 2182 | uint8_t local_buf[ETHERNET_HDR_LEN]; |
| 2183 | uint8_t *buf; |
| 2184 | struct ethernet_hdr_t *ethr_hdr; |
| 2185 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2186 | buf = (uint8_t *) qdf_nbuf_data(msdu); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2187 | llc_hdr = (struct llc_snap_hdr_t *)buf; |
| 2188 | ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1]; |
| 2189 | /* do llc remove if needed */ |
| 2190 | l2_hdr_space = 0; |
| 2191 | if (IS_SNAP(llc_hdr)) { |
| 2192 | if (IS_BTEP(llc_hdr)) { |
| 2193 | /* remove llc */ |
| 2194 | l2_hdr_space += sizeof(struct llc_snap_hdr_t); |
| 2195 | llc_hdr = NULL; |
| 2196 | } else if (IS_RFC1042(llc_hdr)) { |
| 2197 | if (!(ether_type == ETHERTYPE_AARP || |
| 2198 | ether_type == ETHERTYPE_IPX)) { |
| 2199 | /* remove llc */ |
| 2200 | l2_hdr_space += sizeof(struct llc_snap_hdr_t); |
| 2201 | llc_hdr = NULL; |
| 2202 | } |
| 2203 | } |
| 2204 | } |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2205 | if (l2_hdr_space > ETHERNET_HDR_LEN) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2206 | buf = qdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN); |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2207 | else if (l2_hdr_space < ETHERNET_HDR_LEN) |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2208 | buf = qdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2209 | |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2210 | /* mpdu hdr should be present in info,re-create ethr_hdr based on |
| 2211 | * mpdu hdr |
| 2212 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2213 | wh = (struct ieee80211_qosframe_addr4 *)info->hdr; |
| 2214 | ethr_hdr = (struct ethernet_hdr_t *)local_buf; |
| 2215 | switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) { |
| 2216 | case IEEE80211_FC1_DIR_NODS: |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2217 | qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2218 | ETHERNET_ADDR_LEN); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2219 | qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2220 | ETHERNET_ADDR_LEN); |
| 2221 | break; |
| 2222 | case IEEE80211_FC1_DIR_TODS: |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2223 | qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2224 | ETHERNET_ADDR_LEN); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2225 | qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2226 | ETHERNET_ADDR_LEN); |
| 2227 | break; |
| 2228 | case IEEE80211_FC1_DIR_FROMDS: |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2229 | qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2230 | ETHERNET_ADDR_LEN); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2231 | qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr3, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2232 | ETHERNET_ADDR_LEN); |
| 2233 | break; |
| 2234 | case IEEE80211_FC1_DIR_DSTODS: |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2235 | qdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2236 | ETHERNET_ADDR_LEN); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2237 | qdf_mem_copy(ethr_hdr->src_addr, wh->i_addr4, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2238 | ETHERNET_ADDR_LEN); |
| 2239 | break; |
| 2240 | } |
| 2241 | |
| 2242 | if (llc_hdr == NULL) { |
| 2243 | ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff; |
| 2244 | ethr_hdr->ethertype[1] = (ether_type) & 0xff; |
| 2245 | } else { |
| 2246 | uint32_t pktlen = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2247 | qdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2248 | ether_type = (uint16_t) pktlen; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2249 | ether_type = qdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2250 | ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff; |
| 2251 | ethr_hdr->ethertype[1] = (ether_type) & 0xff; |
| 2252 | } |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2253 | qdf_mem_copy(buf, ethr_hdr, ETHERNET_HDR_LEN); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2254 | } |
| 2255 | |
| 2256 | /** |
| 2257 | * wma_ieee80211_hdrsize() - get 802.11 header size |
| 2258 | * @data: 80211 frame |
| 2259 | * |
| 2260 | * Return: size of header |
| 2261 | */ |
| 2262 | static int32_t wma_ieee80211_hdrsize(const void *data) |
| 2263 | { |
| 2264 | const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data; |
| 2265 | int32_t size = sizeof(struct ieee80211_frame); |
| 2266 | |
| 2267 | if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) |
| 2268 | size += IEEE80211_ADDR_LEN; |
| 2269 | if (IEEE80211_QOS_HAS_SEQ(wh)) |
| 2270 | size += sizeof(uint16_t); |
| 2271 | return size; |
| 2272 | } |
| 2273 | |
| 2274 | /** |
Naveen Rawat | 296a518 | 2017-09-25 14:02:48 -0700 | [diff] [blame] | 2275 | * rate_pream: Mapping from data rates to preamble. |
| 2276 | */ |
| 2277 | static uint32_t rate_pream[] = {WMI_RATE_PREAMBLE_CCK, WMI_RATE_PREAMBLE_CCK, |
| 2278 | WMI_RATE_PREAMBLE_CCK, WMI_RATE_PREAMBLE_CCK, |
| 2279 | WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM, |
| 2280 | WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM, |
| 2281 | WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM, |
| 2282 | WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM}; |
| 2283 | |
| 2284 | /** |
| 2285 | * rate_mcs: Mapping from data rates to MCS (+4 for OFDM to keep the sequence). |
| 2286 | */ |
| 2287 | static uint32_t rate_mcs[] = {WMI_MAX_CCK_TX_RATE_1M, WMI_MAX_CCK_TX_RATE_2M, |
| 2288 | WMI_MAX_CCK_TX_RATE_5_5M, WMI_MAX_CCK_TX_RATE_11M, |
| 2289 | WMI_MAX_OFDM_TX_RATE_6M + 4, |
| 2290 | WMI_MAX_OFDM_TX_RATE_9M + 4, |
| 2291 | WMI_MAX_OFDM_TX_RATE_12M + 4, |
| 2292 | WMI_MAX_OFDM_TX_RATE_18M + 4, |
| 2293 | WMI_MAX_OFDM_TX_RATE_24M + 4, |
| 2294 | WMI_MAX_OFDM_TX_RATE_36M + 4, |
| 2295 | WMI_MAX_OFDM_TX_RATE_48M + 4, |
| 2296 | WMI_MAX_OFDM_TX_RATE_54M + 4}; |
| 2297 | |
| 2298 | #define WMA_TX_SEND_MGMT_TYPE 0 |
| 2299 | #define WMA_TX_SEND_DATA_TYPE 1 |
| 2300 | |
| 2301 | /** |
| 2302 | * wma_update_tx_send_params() - Update tx_send_params TLV info |
| 2303 | * @tx_param: Pointer to tx_send_params |
| 2304 | * @rid: rate ID passed by PE |
| 2305 | * |
| 2306 | * Return: None |
| 2307 | */ |
| 2308 | static void wma_update_tx_send_params(struct tx_send_params *tx_param, |
| 2309 | enum rateid rid) |
| 2310 | { |
| 2311 | uint8_t preamble = 0, nss = 0, rix = 0; |
| 2312 | |
| 2313 | preamble = rate_pream[rid]; |
| 2314 | rix = rate_mcs[rid]; |
| 2315 | |
| 2316 | tx_param->mcs_mask = (1 << rix); |
| 2317 | tx_param->nss_mask = (1 << nss); |
| 2318 | tx_param->preamble_type = (1 << preamble); |
| 2319 | tx_param->frame_type = WMA_TX_SEND_MGMT_TYPE; |
| 2320 | |
| 2321 | WMA_LOGD(FL("rate_id: %d, mcs: %0x, nss: %0x, preamble: %0x"), |
| 2322 | rid, tx_param->mcs_mask, tx_param->nss_mask, |
| 2323 | tx_param->preamble_type); |
| 2324 | } |
| 2325 | |
| 2326 | /** |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2327 | * wma_tx_packet() - Sends Tx Frame to TxRx |
| 2328 | * @wma_context: wma context |
| 2329 | * @tx_frame: frame buffer |
| 2330 | * @frmLen: frame length |
| 2331 | * @frmType: frame type |
| 2332 | * @txDir: tx diection |
| 2333 | * @tid: TID |
| 2334 | * @tx_frm_download_comp_cb: tx download callback handler |
| 2335 | * @tx_frm_ota_comp_cb: OTA complition handler |
| 2336 | * @tx_flag: tx flag |
| 2337 | * @vdev_id: vdev id |
| 2338 | * @tdlsFlag: tdls flag |
| 2339 | * |
| 2340 | * This function sends the frame corresponding to the |
| 2341 | * given vdev id. |
| 2342 | * This is blocking call till the downloading of frame is complete. |
| 2343 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 2344 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2345 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2346 | QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2347 | eFrameType frmType, eFrameTxDir txDir, uint8_t tid, |
Himanshu Agarwal | 2fdf77a | 2016-12-29 11:41:00 +0530 | [diff] [blame] | 2348 | wma_tx_dwnld_comp_callback tx_frm_download_comp_cb, |
| 2349 | void *pData, |
| 2350 | wma_tx_ota_comp_callback tx_frm_ota_comp_cb, |
| 2351 | uint8_t tx_flag, uint8_t vdev_id, bool tdlsFlag, |
Naveen Rawat | 296a518 | 2017-09-25 14:02:48 -0700 | [diff] [blame] | 2352 | uint16_t channel_freq, enum rateid rid) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2353 | { |
| 2354 | tp_wma_handle wma_handle = (tp_wma_handle) (wma_context); |
| 2355 | int32_t status; |
Anurag Chouhan | ce0dc99 | 2016-02-16 18:18:03 +0530 | [diff] [blame] | 2356 | QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2357 | int32_t is_high_latency; |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 2358 | struct cdp_vdev *txrx_vdev; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2359 | enum frame_index tx_frm_index = GENERIC_NODOWNLD_NOACK_COMP_INDEX; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2360 | tpSirMacFrameCtl pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2361 | uint8_t use_6mbps = 0; |
| 2362 | uint8_t downld_comp_required = 0; |
| 2363 | uint16_t chanfreq; |
| 2364 | #ifdef WLAN_FEATURE_11W |
| 2365 | uint8_t *pFrame = NULL; |
| 2366 | void *pPacket = NULL; |
| 2367 | uint16_t newFrmLen = 0; |
| 2368 | #endif /* WLAN_FEATURE_11W */ |
| 2369 | struct wma_txrx_node *iface; |
| 2370 | tpAniSirGlobal pMac; |
| 2371 | tpSirMacMgmtHdr mHdr; |
Govind Singh | 09c3b49 | 2016-03-08 16:05:14 +0530 | [diff] [blame] | 2372 | struct wmi_mgmt_params mgmt_param = {0}; |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 2373 | struct cdp_cfg *ctrl_pdev; |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 2374 | void *soc = cds_get_context(QDF_MODULE_ID_SOC); |
Himanshu Agarwal | 2fdf77a | 2016-12-29 11:41:00 +0530 | [diff] [blame] | 2375 | struct ieee80211_frame *wh; |
| 2376 | struct wlan_objmgr_peer *peer = NULL; |
| 2377 | struct wlan_objmgr_psoc *psoc; |
| 2378 | void *mac_addr; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2379 | |
| 2380 | if (NULL == wma_handle) { |
| 2381 | WMA_LOGE("wma_handle is NULL"); |
Manikandan Mohan | 41e2d6f | 2017-04-10 16:17:39 +0530 | [diff] [blame] | 2382 | cds_packet_free((void *)tx_frame); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2383 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2384 | } |
| 2385 | iface = &wma_handle->interfaces[vdev_id]; |
| 2386 | /* Get the vdev handle from vdev id */ |
| 2387 | txrx_vdev = wma_handle->interfaces[vdev_id].handle; |
| 2388 | |
| 2389 | if (!txrx_vdev) { |
| 2390 | WMA_LOGE("TxRx Vdev Handle is NULL"); |
Manikandan Mohan | 41e2d6f | 2017-04-10 16:17:39 +0530 | [diff] [blame] | 2391 | cds_packet_free((void *)tx_frame); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2392 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2393 | } |
| 2394 | |
Nishank Aggarwal | a13b61d | 2016-12-01 12:53:58 +0530 | [diff] [blame] | 2395 | if (!soc) { |
| 2396 | WMA_LOGE("%s:SOC context is NULL", __func__); |
| 2397 | return QDF_STATUS_E_FAILURE; |
| 2398 | } |
| 2399 | |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 2400 | cdp_hl_tdls_flag_reset(soc, txrx_vdev, false); |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 2401 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2402 | if (frmType >= TXRX_FRM_MAX) { |
| 2403 | WMA_LOGE("Invalid Frame Type Fail to send Frame"); |
Manikandan Mohan | 41e2d6f | 2017-04-10 16:17:39 +0530 | [diff] [blame] | 2404 | cds_packet_free((void *)tx_frame); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2405 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2406 | } |
| 2407 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 2408 | pMac = cds_get_context(QDF_MODULE_ID_PE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2409 | if (!pMac) { |
| 2410 | WMA_LOGE("pMac Handle is NULL"); |
Manikandan Mohan | 41e2d6f | 2017-04-10 16:17:39 +0530 | [diff] [blame] | 2411 | cds_packet_free((void *)tx_frame); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2412 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2413 | } |
| 2414 | /* |
| 2415 | * Currently only support to |
| 2416 | * send 80211 Mgmt and 80211 Data are added. |
| 2417 | */ |
| 2418 | if (!((frmType == TXRX_FRM_802_11_MGMT) || |
| 2419 | (frmType == TXRX_FRM_802_11_DATA))) { |
| 2420 | WMA_LOGE("No Support to send other frames except 802.11 Mgmt/Data"); |
Manikandan Mohan | 41e2d6f | 2017-04-10 16:17:39 +0530 | [diff] [blame] | 2421 | cds_packet_free((void *)tx_frame); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2422 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2423 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2424 | #ifdef WLAN_FEATURE_11W |
| 2425 | if ((iface && iface->rmfEnabled) && |
| 2426 | (frmType == TXRX_FRM_802_11_MGMT) && |
| 2427 | (pFc->subType == SIR_MAC_MGMT_DISASSOC || |
| 2428 | pFc->subType == SIR_MAC_MGMT_DEAUTH || |
| 2429 | pFc->subType == SIR_MAC_MGMT_ACTION)) { |
| 2430 | struct ieee80211_frame *wh = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2431 | (struct ieee80211_frame *)qdf_nbuf_data(tx_frame); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2432 | if (!IEEE80211_IS_BROADCAST(wh->i_addr1) && |
| 2433 | !IEEE80211_IS_MULTICAST(wh->i_addr1)) { |
| 2434 | if (pFc->wep) { |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2435 | /* Allocate extra bytes for privacy header and |
| 2436 | * trailer |
| 2437 | */ |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2438 | newFrmLen = frmLen + IEEE80211_CCMP_HEADERLEN + |
| 2439 | IEEE80211_CCMP_MICLEN; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2440 | qdf_status = |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2441 | cds_packet_alloc((uint16_t) newFrmLen, |
| 2442 | (void **)&pFrame, |
| 2443 | (void **)&pPacket); |
| 2444 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2445 | if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2446 | WMA_LOGP("%s: Failed to allocate %d bytes for RMF status code (%x)", |
| 2447 | __func__, newFrmLen, |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2448 | qdf_status); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2449 | /* Free the original packet memory */ |
| 2450 | cds_packet_free((void *)tx_frame); |
| 2451 | goto error; |
| 2452 | } |
| 2453 | |
| 2454 | /* |
| 2455 | * Initialize the frame with 0's and only fill |
| 2456 | * MAC header and data, Keep the CCMP header and |
| 2457 | * trailer as 0's, firmware shall fill this |
| 2458 | */ |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2459 | qdf_mem_set(pFrame, newFrmLen, 0); |
| 2460 | qdf_mem_copy(pFrame, wh, sizeof(*wh)); |
| 2461 | qdf_mem_copy(pFrame + sizeof(*wh) + |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2462 | IEEE80211_CCMP_HEADERLEN, |
| 2463 | pData + sizeof(*wh), |
| 2464 | frmLen - sizeof(*wh)); |
| 2465 | |
| 2466 | cds_packet_free((void *)tx_frame); |
| 2467 | tx_frame = pPacket; |
Naveen Rawat | 67d60b3 | 2017-01-10 17:54:36 -0800 | [diff] [blame] | 2468 | pData = pFrame; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2469 | frmLen = newFrmLen; |
Kapil Gupta | e92d91f | 2016-12-22 14:59:25 +0530 | [diff] [blame] | 2470 | pFc = (tpSirMacFrameCtl) |
| 2471 | (qdf_nbuf_data(tx_frame)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2472 | } |
| 2473 | } else { |
| 2474 | /* Allocate extra bytes for MMIE */ |
| 2475 | newFrmLen = frmLen + IEEE80211_MMIE_LEN; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2476 | qdf_status = cds_packet_alloc((uint16_t) newFrmLen, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2477 | (void **)&pFrame, |
| 2478 | (void **)&pPacket); |
| 2479 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2480 | if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2481 | WMA_LOGP("%s: Failed to allocate %d bytes for RMF status code (%x)", |
| 2482 | __func__, newFrmLen, |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2483 | qdf_status); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2484 | /* Free the original packet memory */ |
| 2485 | cds_packet_free((void *)tx_frame); |
| 2486 | goto error; |
| 2487 | } |
| 2488 | /* |
| 2489 | * Initialize the frame with 0's and only fill |
| 2490 | * MAC header and data. MMIE field will be |
| 2491 | * filled by cds_attach_mmie API |
| 2492 | */ |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2493 | qdf_mem_set(pFrame, newFrmLen, 0); |
| 2494 | qdf_mem_copy(pFrame, wh, sizeof(*wh)); |
| 2495 | qdf_mem_copy(pFrame + sizeof(*wh), |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2496 | pData + sizeof(*wh), frmLen - sizeof(*wh)); |
| 2497 | if (!cds_attach_mmie(iface->key.key, |
| 2498 | iface->key.key_id[0].ipn, |
| 2499 | WMA_IGTK_KEY_INDEX_4, |
| 2500 | pFrame, |
| 2501 | pFrame + newFrmLen, newFrmLen)) { |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2502 | WMA_LOGP("%s: Failed to attach MMIE at the end of frame", |
| 2503 | __func__); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2504 | /* Free the original packet memory */ |
| 2505 | cds_packet_free((void *)tx_frame); |
| 2506 | goto error; |
| 2507 | } |
| 2508 | cds_packet_free((void *)tx_frame); |
| 2509 | tx_frame = pPacket; |
Naveen Rawat | 67d60b3 | 2017-01-10 17:54:36 -0800 | [diff] [blame] | 2510 | pData = pFrame; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2511 | frmLen = newFrmLen; |
Kapil Gupta | e92d91f | 2016-12-22 14:59:25 +0530 | [diff] [blame] | 2512 | pFc = (tpSirMacFrameCtl) (qdf_nbuf_data(tx_frame)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2513 | } |
| 2514 | } |
| 2515 | #endif /* WLAN_FEATURE_11W */ |
Kapil Gupta | e92d91f | 2016-12-22 14:59:25 +0530 | [diff] [blame] | 2516 | mHdr = (tpSirMacMgmtHdr)qdf_nbuf_data(tx_frame); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2517 | if ((frmType == TXRX_FRM_802_11_MGMT) && |
| 2518 | (pFc->subType == SIR_MAC_MGMT_PROBE_RSP)) { |
| 2519 | uint64_t adjusted_tsf_le; |
| 2520 | struct ieee80211_frame *wh = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2521 | (struct ieee80211_frame *)qdf_nbuf_data(tx_frame); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2522 | |
| 2523 | /* Make the TSF offset negative to match TSF in beacons */ |
| 2524 | adjusted_tsf_le = cpu_to_le64(0ULL - |
| 2525 | wma_handle->interfaces[vdev_id]. |
| 2526 | tsfadjust); |
| 2527 | A_MEMCPY(&wh[1], &adjusted_tsf_le, sizeof(adjusted_tsf_le)); |
| 2528 | } |
| 2529 | if (frmType == TXRX_FRM_802_11_DATA) { |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2530 | qdf_nbuf_t ret; |
| 2531 | qdf_nbuf_t skb = (qdf_nbuf_t) tx_frame; |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 2532 | void *pdev = cds_get_context(QDF_MODULE_ID_TXRX); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2533 | |
| 2534 | struct wma_decap_info_t decap_info; |
| 2535 | struct ieee80211_frame *wh = |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2536 | (struct ieee80211_frame *)qdf_nbuf_data(skb); |
Anurag Chouhan | 210db07 | 2016-02-22 18:42:15 +0530 | [diff] [blame] | 2537 | unsigned long curr_timestamp = qdf_mc_timer_get_system_ticks(); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2538 | |
| 2539 | if (pdev == NULL) { |
| 2540 | WMA_LOGE("%s: pdev pointer is not available", __func__); |
Manikandan Mohan | 41e2d6f | 2017-04-10 16:17:39 +0530 | [diff] [blame] | 2541 | cds_packet_free((void *)tx_frame); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2542 | return QDF_STATUS_E_FAULT; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2543 | } |
| 2544 | |
| 2545 | /* |
| 2546 | * 1) TxRx Module expects data input to be 802.3 format |
| 2547 | * So Decapsulation has to be done. |
| 2548 | * 2) Only one Outstanding Data pending for Ack is allowed |
| 2549 | */ |
| 2550 | if (tx_frm_ota_comp_cb) { |
| 2551 | if (wma_handle->umac_data_ota_ack_cb) { |
| 2552 | /* |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2553 | * If last data frame was sent more than 5 secs |
| 2554 | * ago and still we didn't receive ack/nack from |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2555 | * fw then allow Tx of this data frame |
| 2556 | */ |
| 2557 | if (curr_timestamp >= |
| 2558 | wma_handle->last_umac_data_ota_timestamp + |
| 2559 | 500) { |
| 2560 | WMA_LOGE("%s: No Tx Ack for last data frame for more than 5 secs, allow Tx of current data frame", |
| 2561 | __func__); |
| 2562 | } else { |
| 2563 | WMA_LOGE("%s: Already one Data pending for Ack, reject Tx of data frame", |
| 2564 | __func__); |
Manikandan Mohan | 41e2d6f | 2017-04-10 16:17:39 +0530 | [diff] [blame] | 2565 | cds_packet_free((void *)tx_frame); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2566 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2567 | } |
| 2568 | } |
| 2569 | } else { |
| 2570 | /* |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2571 | * Data Frames are sent through TxRx Non Standard Data |
| 2572 | * path so Ack Complete Cb is must |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2573 | */ |
| 2574 | WMA_LOGE("No Ack Complete Cb. Don't Allow"); |
Manikandan Mohan | 41e2d6f | 2017-04-10 16:17:39 +0530 | [diff] [blame] | 2575 | cds_packet_free((void *)tx_frame); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2576 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2577 | } |
| 2578 | |
| 2579 | /* Take out 802.11 header from skb */ |
| 2580 | decap_info.hdr_len = wma_ieee80211_hdrsize(wh); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2581 | qdf_mem_copy(decap_info.hdr, wh, decap_info.hdr_len); |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2582 | qdf_nbuf_pull_head(skb, decap_info.hdr_len); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2583 | |
| 2584 | /* Decapsulate to 802.3 format */ |
| 2585 | wma_decap_to_8023(skb, &decap_info); |
| 2586 | |
| 2587 | /* Zero out skb's context buffer for the driver to use */ |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2588 | qdf_mem_set(skb->cb, sizeof(skb->cb), 0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2589 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2590 | /* Terminate the (single-element) list of tx frames */ |
| 2591 | skb->next = NULL; |
| 2592 | |
| 2593 | /* Store the Ack Complete Cb */ |
| 2594 | wma_handle->umac_data_ota_ack_cb = tx_frm_ota_comp_cb; |
| 2595 | |
| 2596 | /* Store the timestamp and nbuf for this data Tx */ |
| 2597 | wma_handle->last_umac_data_ota_timestamp = curr_timestamp; |
| 2598 | wma_handle->last_umac_data_nbuf = skb; |
| 2599 | |
| 2600 | /* Send the Data frame to TxRx in Non Standard Path */ |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 2601 | cdp_hl_tdls_flag_reset(soc, |
| 2602 | txrx_vdev, tdlsFlag); |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 2603 | |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 2604 | ret = cdp_tx_non_std(soc, |
| 2605 | txrx_vdev, |
| 2606 | OL_TX_SPEC_NO_FREE, skb); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2607 | |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 2608 | cdp_hl_tdls_flag_reset(soc, |
| 2609 | txrx_vdev, false); |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 2610 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2611 | if (ret) { |
| 2612 | WMA_LOGE("TxRx Rejected. Fail to do Tx"); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2613 | /* Call Download Cb so that umac can free the buffer */ |
| 2614 | if (tx_frm_download_comp_cb) |
| 2615 | tx_frm_download_comp_cb(wma_handle->mac_context, |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2616 | tx_frame, |
| 2617 | WMA_TX_FRAME_BUFFER_FREE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2618 | wma_handle->umac_data_ota_ack_cb = NULL; |
| 2619 | wma_handle->last_umac_data_nbuf = NULL; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2620 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2621 | } |
| 2622 | |
| 2623 | /* Call Download Callback if passed */ |
| 2624 | if (tx_frm_download_comp_cb) |
| 2625 | tx_frm_download_comp_cb(wma_handle->mac_context, |
| 2626 | tx_frame, |
| 2627 | WMA_TX_FRAME_BUFFER_NO_FREE); |
| 2628 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2629 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2630 | } |
| 2631 | |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 2632 | ctrl_pdev = cdp_get_ctrl_pdev_from_vdev(soc, |
| 2633 | txrx_vdev); |
Manjunathappa Prakash | 10d357a | 2016-03-31 19:20:49 -0700 | [diff] [blame] | 2634 | if (ctrl_pdev == NULL) { |
| 2635 | WMA_LOGE("ol_pdev_handle is NULL\n"); |
Manikandan Mohan | 41e2d6f | 2017-04-10 16:17:39 +0530 | [diff] [blame] | 2636 | cds_packet_free((void *)tx_frame); |
Manjunathappa Prakash | 10d357a | 2016-03-31 19:20:49 -0700 | [diff] [blame] | 2637 | return QDF_STATUS_E_FAILURE; |
| 2638 | } |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 2639 | is_high_latency = cdp_cfg_is_high_latency(soc, ctrl_pdev); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2640 | |
Mukul Sharma | dfc804c | 2016-09-03 16:31:20 +0530 | [diff] [blame] | 2641 | downld_comp_required = tx_frm_download_comp_cb && is_high_latency && |
| 2642 | tx_frm_ota_comp_cb; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2643 | |
| 2644 | /* Fill the frame index to send */ |
| 2645 | if (pFc->type == SIR_MAC_MGMT_FRAME) { |
| 2646 | if (tx_frm_ota_comp_cb) { |
| 2647 | if (downld_comp_required) |
| 2648 | tx_frm_index = |
| 2649 | GENERIC_DOWNLD_COMP_ACK_COMP_INDEX; |
| 2650 | else |
| 2651 | tx_frm_index = GENERIC_NODOWLOAD_ACK_COMP_INDEX; |
| 2652 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2653 | } else { |
| 2654 | if (downld_comp_required) |
| 2655 | tx_frm_index = |
| 2656 | GENERIC_DOWNLD_COMP_NOACK_COMP_INDEX; |
| 2657 | else |
| 2658 | tx_frm_index = |
| 2659 | GENERIC_NODOWNLD_NOACK_COMP_INDEX; |
| 2660 | } |
| 2661 | } |
| 2662 | |
| 2663 | /* |
| 2664 | * If Dowload Complete is required |
| 2665 | * Wait for download complete |
| 2666 | */ |
| 2667 | if (downld_comp_required) { |
| 2668 | /* Store Tx Comp Cb */ |
| 2669 | wma_handle->tx_frm_download_comp_cb = tx_frm_download_comp_cb; |
| 2670 | |
| 2671 | /* Reset the Tx Frame Complete Event */ |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 2672 | qdf_status = qdf_event_reset( |
| 2673 | &wma_handle->tx_frm_download_comp_event); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2674 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2675 | if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2676 | WMA_LOGP("%s: Event Reset failed tx comp event %x", |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2677 | __func__, qdf_status); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2678 | goto error; |
| 2679 | } |
| 2680 | } |
| 2681 | |
| 2682 | /* If the frame has to be sent at BD Rate2 inform TxRx */ |
| 2683 | if (tx_flag & HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME) |
| 2684 | use_6mbps = 1; |
| 2685 | |
Deepak Dhamdhere | d97bfb3 | 2015-10-11 15:16:18 -0700 | [diff] [blame] | 2686 | if (wma_handle->interfaces[vdev_id].scan_info.chan_freq != 0) { |
| 2687 | chanfreq = wma_handle->interfaces[vdev_id].scan_info.chan_freq; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2688 | WMA_LOGI("%s: Preauth frame on channel %d", __func__, chanfreq); |
| 2689 | } else if (pFc->subType == SIR_MAC_MGMT_PROBE_RSP) { |
Manishekar Chandrasekaran | 7edffe0 | 2016-04-28 20:52:14 +0530 | [diff] [blame] | 2690 | if ((wma_is_vdev_in_ap_mode(wma_handle, vdev_id)) && |
| 2691 | (0 != wma_handle->interfaces[vdev_id].mhz)) |
| 2692 | chanfreq = wma_handle->interfaces[vdev_id].mhz; |
| 2693 | else |
| 2694 | chanfreq = channel_freq; |
Varun Reddy Yeturu | beaf750 | 2017-05-07 08:19:52 -0700 | [diff] [blame] | 2695 | WMA_LOGD("%s: Probe response frame on channel %d vdev:%d", |
Manishekar Chandrasekaran | 7edffe0 | 2016-04-28 20:52:14 +0530 | [diff] [blame] | 2696 | __func__, chanfreq, vdev_id); |
| 2697 | if (wma_is_vdev_in_ap_mode(wma_handle, vdev_id) && !chanfreq) |
| 2698 | WMA_LOGE("%s: AP oper chan is zero", __func__); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2699 | } else if (pFc->subType == SIR_MAC_MGMT_ACTION) { |
| 2700 | chanfreq = channel_freq; |
| 2701 | } else { |
| 2702 | chanfreq = 0; |
| 2703 | } |
| 2704 | if (pMac->fEnableDebugLog & 0x1) { |
| 2705 | if ((pFc->type == SIR_MAC_MGMT_FRAME) && |
| 2706 | (pFc->subType != SIR_MAC_MGMT_PROBE_REQ) && |
| 2707 | (pFc->subType != SIR_MAC_MGMT_PROBE_RSP)) { |
Srinivas Girigowda | f147212 | 2017-03-09 15:44:12 -0800 | [diff] [blame] | 2708 | WMA_LOGD("TX MGMT - Type %hu, SubType %hu seq_num[%d]", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2709 | pFc->type, pFc->subType, |
| 2710 | ((mHdr->seqControl.seqNumHi << 4) | |
| 2711 | mHdr->seqControl.seqNumLo)); |
| 2712 | } |
| 2713 | } |
| 2714 | |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 2715 | mgmt_param.tx_frame = tx_frame; |
| 2716 | mgmt_param.frm_len = frmLen; |
| 2717 | mgmt_param.vdev_id = vdev_id; |
| 2718 | mgmt_param.pdata = pData; |
| 2719 | mgmt_param.chanfreq = chanfreq; |
| 2720 | mgmt_param.qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); |
| 2721 | mgmt_param.use_6mbps = use_6mbps; |
| 2722 | mgmt_param.tx_type = tx_frm_index; |
Himanshu Agarwal | 2fdf77a | 2016-12-29 11:41:00 +0530 | [diff] [blame] | 2723 | |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 2724 | /* |
| 2725 | * Update the tx_params TLV only for rates |
| 2726 | * other than 1Mbps and 6 Mbps |
| 2727 | */ |
| 2728 | if (rid < RATEID_DEFAULT && |
| 2729 | (rid != RATEID_1MBPS && rid != RATEID_6MBPS)) { |
| 2730 | WMA_LOGD(FL("using rate id: %d for Tx"), rid); |
| 2731 | mgmt_param.tx_params_valid = true; |
| 2732 | wma_update_tx_send_params(&mgmt_param.tx_param, rid); |
| 2733 | } |
Naveen Rawat | 296a518 | 2017-09-25 14:02:48 -0700 | [diff] [blame] | 2734 | |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 2735 | psoc = wma_handle->psoc; |
| 2736 | if (!psoc) { |
| 2737 | WMA_LOGE("%s: psoc ctx is NULL", __func__); |
| 2738 | goto error; |
| 2739 | } |
Himanshu Agarwal | 2fdf77a | 2016-12-29 11:41:00 +0530 | [diff] [blame] | 2740 | |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 2741 | wh = (struct ieee80211_frame *)(qdf_nbuf_data(tx_frame)); |
| 2742 | mac_addr = wh->i_addr1; |
| 2743 | peer = wlan_objmgr_get_peer(psoc, mac_addr, WLAN_MGMT_NB_ID); |
| 2744 | if (!peer) { |
| 2745 | mac_addr = wh->i_addr2; |
| 2746 | peer = wlan_objmgr_get_peer(psoc, mac_addr, |
| 2747 | WLAN_MGMT_NB_ID); |
| 2748 | } |
Himanshu Agarwal | 2fdf77a | 2016-12-29 11:41:00 +0530 | [diff] [blame] | 2749 | |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 2750 | status = wlan_mgmt_txrx_mgmt_frame_tx(peer, |
| 2751 | (tpAniSirGlobal)wma_handle->mac_context, |
| 2752 | (qdf_nbuf_t)tx_frame, |
| 2753 | NULL, tx_frm_ota_comp_cb, |
| 2754 | WLAN_UMAC_COMP_MLME, &mgmt_param); |
Himanshu Agarwal | df9c8ac | 2017-03-27 15:57:31 +0530 | [diff] [blame] | 2755 | |
Sravan Kumar Kairam | 905b4c5 | 2017-10-17 19:38:14 +0530 | [diff] [blame] | 2756 | wlan_objmgr_peer_release_ref(peer, WLAN_MGMT_NB_ID); |
| 2757 | if (status != QDF_STATUS_SUCCESS) { |
| 2758 | WMA_LOGE("%s: mgmt tx failed", __func__); |
| 2759 | qdf_nbuf_free((qdf_nbuf_t)tx_frame); |
| 2760 | goto error; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2761 | } |
| 2762 | |
| 2763 | /* |
| 2764 | * Failed to send Tx Mgmt Frame |
| 2765 | */ |
| 2766 | if (status) { |
| 2767 | /* Call Download Cb so that umac can free the buffer */ |
Nirav Shah | eb017be | 2018-02-15 11:20:58 +0530 | [diff] [blame^] | 2768 | u32 rem; |
| 2769 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2770 | if (tx_frm_download_comp_cb) |
| 2771 | tx_frm_download_comp_cb(wma_handle->mac_context, |
| 2772 | tx_frame, |
| 2773 | WMA_TX_FRAME_BUFFER_FREE); |
Nirav Shah | eb017be | 2018-02-15 11:20:58 +0530 | [diff] [blame^] | 2774 | rem = qdf_do_div_rem(wma_handle->tx_fail_cnt, |
| 2775 | MAX_PRINT_FAILURE_CNT); |
| 2776 | if (!rem) |
Kapil Gupta | 10800b9 | 2017-05-31 19:14:47 +0530 | [diff] [blame] | 2777 | WMA_LOGE("%s: Failed to send Mgmt Frame", __func__); |
| 2778 | else |
| 2779 | WMA_LOGD("%s: Failed to send Mgmt Frame", __func__); |
| 2780 | wma_handle->tx_fail_cnt++; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2781 | goto error; |
| 2782 | } |
| 2783 | |
| 2784 | if (!tx_frm_download_comp_cb) |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2785 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2786 | |
| 2787 | /* |
| 2788 | * Wait for Download Complete |
| 2789 | * if required |
| 2790 | */ |
| 2791 | if (downld_comp_required) { |
| 2792 | /* |
| 2793 | * Wait for Download Complete |
| 2794 | * @ Integrated : Dxe Complete |
| 2795 | * @ Discrete : Target Download Complete |
| 2796 | */ |
Anurag Chouhan | ce0dc99 | 2016-02-16 18:18:03 +0530 | [diff] [blame] | 2797 | qdf_status = |
Nachiket Kukade | 0396b73 | 2017-11-14 16:35:16 +0530 | [diff] [blame] | 2798 | qdf_wait_for_event_completion(&wma_handle-> |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2799 | tx_frm_download_comp_event, |
| 2800 | WMA_TX_FRAME_COMPLETE_TIMEOUT); |
| 2801 | |
Anurag Chouhan | ce0dc99 | 2016-02-16 18:18:03 +0530 | [diff] [blame] | 2802 | if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2803 | WMA_LOGP("Wait Event failed txfrm_comp_event"); |
| 2804 | /* |
| 2805 | * @Integrated: Something Wrong with Dxe |
| 2806 | * TODO: Some Debug Code |
| 2807 | * Here We need to trigger SSR since |
| 2808 | * since system went into a bad state where |
| 2809 | * we didn't get Download Complete for almost |
| 2810 | * WMA_TX_FRAME_COMPLETE_TIMEOUT (1 sec) |
| 2811 | */ |
Poddar, Siddarth | 5a91f5b | 2016-04-28 12:24:10 +0530 | [diff] [blame] | 2812 | /* display scheduler stats */ |
Mohit Khanna | ca4173b | 2017-09-12 21:52:19 -0700 | [diff] [blame] | 2813 | return cdp_display_stats(soc, CDP_SCHEDULER_STATS, |
| 2814 | QDF_STATS_VERBOSITY_LEVEL_HIGH); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2815 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2816 | } |
| 2817 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2818 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2819 | |
| 2820 | error: |
| 2821 | wma_handle->tx_frm_download_comp_cb = NULL; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2822 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2823 | } |
| 2824 | |
| 2825 | /** |
| 2826 | * wma_ds_peek_rx_packet_info() - peek rx packet info |
| 2827 | * @pkt: packet |
| 2828 | * @pkt_meta: packet meta |
| 2829 | * @bSwap: byte swap |
| 2830 | * |
| 2831 | * Function fills the rx packet meta info from the the cds packet |
| 2832 | * |
Anurag Chouhan | f04e84f | 2016-03-03 10:12:12 +0530 | [diff] [blame] | 2833 | * Return: QDF status |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2834 | */ |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2835 | QDF_STATUS wma_ds_peek_rx_packet_info(cds_pkt_t *pkt, void **pkt_meta, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2836 | bool bSwap) |
| 2837 | { |
| 2838 | /* Sanity Check */ |
| 2839 | if (pkt == NULL) { |
| 2840 | WMA_LOGE("wma:Invalid parameter sent on wma_peek_rx_pkt_info"); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2841 | return QDF_STATUS_E_FAULT; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2842 | } |
| 2843 | |
| 2844 | *pkt_meta = &(pkt->pkt_meta); |
| 2845 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2846 | return QDF_STATUS_SUCCESS; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2847 | } |
| 2848 | |
lifeng | 74c9a6d | 2017-02-22 15:15:38 +0800 | [diff] [blame] | 2849 | #ifdef HL_RX_AGGREGATION_HOLE_DETECTION |
| 2850 | void ol_rx_aggregation_hole(uint32_t hole_info) |
| 2851 | { |
| 2852 | struct sir_sme_rx_aggr_hole_ind *rx_aggr_hole_event; |
| 2853 | uint32_t alloc_len; |
| 2854 | cds_msg_t cds_msg = { 0 }; |
| 2855 | QDF_STATUS status; |
| 2856 | |
| 2857 | alloc_len = sizeof(*rx_aggr_hole_event) + |
| 2858 | sizeof(rx_aggr_hole_event->hole_info_array[0]); |
| 2859 | rx_aggr_hole_event = qdf_mem_malloc(alloc_len); |
| 2860 | if (NULL == rx_aggr_hole_event) { |
| 2861 | WMA_LOGE("%s: Memory allocation failure", __func__); |
| 2862 | return; |
| 2863 | } |
| 2864 | |
| 2865 | rx_aggr_hole_event->hole_cnt = 1; |
| 2866 | rx_aggr_hole_event->hole_info_array[0] = hole_info; |
| 2867 | |
| 2868 | cds_msg.type = eWNI_SME_RX_AGGR_HOLE_IND; |
| 2869 | cds_msg.bodyptr = rx_aggr_hole_event; |
| 2870 | cds_msg.bodyval = 0; |
| 2871 | |
| 2872 | status = cds_mq_post_message(CDS_MQ_ID_SME, &cds_msg); |
| 2873 | if (status != QDF_STATUS_SUCCESS) { |
| 2874 | WMA_LOGE("%s: Failed to post aggr event to SME", __func__); |
| 2875 | qdf_mem_free(rx_aggr_hole_event); |
| 2876 | return; |
| 2877 | } |
| 2878 | } |
| 2879 | #endif |
| 2880 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2881 | /** |
| 2882 | * ol_rx_err() - ol rx err handler |
| 2883 | * @pdev: ol pdev |
| 2884 | * @vdev_id: vdev id |
| 2885 | * @peer_mac_addr: peer mac address |
| 2886 | * @tid: TID |
| 2887 | * @tsf32: TSF |
| 2888 | * @err_type: error type |
| 2889 | * @rx_frame: rx frame |
| 2890 | * @pn: PN Number |
| 2891 | * @key_id: key id |
| 2892 | * |
| 2893 | * This function handles rx error and send MIC error failure to LIM |
| 2894 | * |
| 2895 | * Return: none |
| 2896 | */ |
Jeff Johnson | bd6ebd2 | 2017-01-17 13:46:38 -0800 | [diff] [blame] | 2897 | /* |
| 2898 | * Local prototype added to temporarily address warning caused by |
| 2899 | * -Wmissing-prototypes. A more correct solution will come later |
| 2900 | * as a solution to IR-196435 at whihc point this prototype will |
| 2901 | * be removed. |
| 2902 | */ |
| 2903 | void ol_rx_err(void *pdev, uint8_t vdev_id, |
| 2904 | uint8_t *peer_mac_addr, int tid, uint32_t tsf32, |
| 2905 | enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame, |
| 2906 | uint64_t *pn, uint8_t key_id); |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 2907 | void ol_rx_err(void *pdev, uint8_t vdev_id, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2908 | uint8_t *peer_mac_addr, int tid, uint32_t tsf32, |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2909 | enum ol_rx_err_type err_type, qdf_nbuf_t rx_frame, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2910 | uint64_t *pn, uint8_t key_id) |
| 2911 | { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 2912 | tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2913 | tpSirSmeMicFailureInd mic_err_ind; |
| 2914 | struct ether_header *eth_hdr; |
Rajeev Kumar | cf7bd80 | 2017-04-18 11:11:42 -0700 | [diff] [blame] | 2915 | struct scheduler_msg cds_msg = {0}; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2916 | |
| 2917 | if (NULL == wma) { |
| 2918 | WMA_LOGE("%s: Failed to get wma", __func__); |
| 2919 | return; |
| 2920 | } |
| 2921 | |
| 2922 | if (err_type != OL_RX_ERR_TKIP_MIC) |
| 2923 | return; |
| 2924 | |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2925 | if (qdf_nbuf_len(rx_frame) < sizeof(*eth_hdr)) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2926 | return; |
Nirav Shah | cbc6d72 | 2016-03-01 16:24:53 +0530 | [diff] [blame] | 2927 | eth_hdr = (struct ether_header *)qdf_nbuf_data(rx_frame); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2928 | mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2929 | if (!mic_err_ind) { |
| 2930 | WMA_LOGE("%s: Failed to allocate memory for MIC indication message", |
| 2931 | __func__); |
| 2932 | return; |
| 2933 | } |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2934 | |
| 2935 | mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND; |
| 2936 | mic_err_ind->length = sizeof(*mic_err_ind); |
| 2937 | mic_err_ind->sessionId = vdev_id; |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 2938 | qdf_copy_macaddr(&mic_err_ind->bssId, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 2939 | (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2940 | qdf_mem_copy(mic_err_ind->info.taMacAddr, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 2941 | (struct qdf_mac_addr *) peer_mac_addr, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2942 | sizeof(tSirMacAddr)); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2943 | qdf_mem_copy(mic_err_ind->info.srcMacAddr, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 2944 | (struct qdf_mac_addr *) eth_hdr->ether_shost, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2945 | sizeof(tSirMacAddr)); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2946 | qdf_mem_copy(mic_err_ind->info.dstMacAddr, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 2947 | (struct qdf_mac_addr *) eth_hdr->ether_dhost, |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2948 | sizeof(tSirMacAddr)); |
| 2949 | mic_err_ind->info.keyId = key_id; |
| 2950 | mic_err_ind->info.multicast = |
| 2951 | IEEE80211_IS_MULTICAST(eth_hdr->ether_dhost); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2952 | qdf_mem_copy(mic_err_ind->info.TSC, pn, SIR_CIPHER_SEQ_CTR_SIZE); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2953 | |
Rajeev Kumar | b60abe4 | 2017-01-21 15:39:31 -0800 | [diff] [blame] | 2954 | qdf_mem_set(&cds_msg, sizeof(struct scheduler_msg), 0); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2955 | cds_msg.type = eWNI_SME_MIC_FAILURE_IND; |
| 2956 | cds_msg.bodyptr = (void *) mic_err_ind; |
| 2957 | |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 2958 | if (QDF_STATUS_SUCCESS != |
Rajeev Kumar | b60abe4 | 2017-01-21 15:39:31 -0800 | [diff] [blame] | 2959 | scheduler_post_msg(QDF_MODULE_ID_SME, |
Rajeev Kumar | 156188e | 2017-01-21 17:23:52 -0800 | [diff] [blame] | 2960 | &cds_msg)) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2961 | WMA_LOGE("%s: could not post mic failure indication to SME", |
| 2962 | __func__); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 2963 | qdf_mem_free((void *)mic_err_ind); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2964 | } |
| 2965 | } |
| 2966 | |
| 2967 | /** |
| 2968 | * wma_tx_abort() - abort tx |
| 2969 | * @vdev_id: vdev id |
| 2970 | * |
| 2971 | * In case of deauth host abort transmitting packet. |
| 2972 | * |
| 2973 | * Return: none |
| 2974 | */ |
| 2975 | void wma_tx_abort(uint8_t vdev_id) |
| 2976 | { |
| 2977 | #define PEER_ALL_TID_BITMASK 0xffffffff |
| 2978 | tp_wma_handle wma; |
| 2979 | uint32_t peer_tid_bitmap = PEER_ALL_TID_BITMASK; |
| 2980 | struct wma_txrx_node *iface; |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 2981 | struct peer_flush_params param = {0}; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2982 | |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 2983 | wma = cds_get_context(QDF_MODULE_ID_WMA); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2984 | if (NULL == wma) { |
| 2985 | WMA_LOGE("%s: wma is NULL", __func__); |
| 2986 | return; |
| 2987 | } |
| 2988 | |
| 2989 | iface = &wma->interfaces[vdev_id]; |
| 2990 | if (!iface->handle) { |
Jeff Johnson | adba396 | 2017-09-18 08:12:35 -0700 | [diff] [blame] | 2991 | WMA_LOGE("%s: Failed to get iface handle: %pK", |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 2992 | __func__, iface->handle); |
| 2993 | return; |
| 2994 | } |
Srinivas Girigowda | f147212 | 2017-03-09 15:44:12 -0800 | [diff] [blame] | 2995 | WMA_LOGD("%s: vdevid %d bssid %pM", __func__, vdev_id, iface->bssid); |
Mukul Sharma | 6411bb8 | 2017-03-01 15:57:07 +0530 | [diff] [blame] | 2996 | wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST); |
Leo Chang | 9646490 | 2016-10-28 11:10:54 -0700 | [diff] [blame] | 2997 | cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC), |
Venkata Sharath Chandra Manchala | 0d44d45 | 2016-11-23 17:48:15 -0800 | [diff] [blame] | 2998 | iface->handle, |
| 2999 | OL_TXQ_PAUSE_REASON_TX_ABORT); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3000 | |
| 3001 | /* Flush all TIDs except MGMT TID for this peer in Target */ |
| 3002 | peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID); |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 3003 | param.peer_tid_bitmap = peer_tid_bitmap; |
| 3004 | param.vdev_id = vdev_id; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3005 | wmi_unified_peer_flush_tids_send(wma->wmi_handle, iface->bssid, |
Govind Singh | d76a5b0 | 2016-03-08 15:12:14 +0530 | [diff] [blame] | 3006 | ¶m); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3007 | } |
| 3008 | |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3009 | /** |
| 3010 | * wma_lro_config_cmd() - process the LRO config command |
| 3011 | * @wma: Pointer to WMA handle |
| 3012 | * @wma_lro_cmd: Pointer to LRO configuration parameters |
| 3013 | * |
| 3014 | * This function sends down the LRO configuration parameters to |
| 3015 | * the firmware to enable LRO, sets the TCP flags and sets the |
| 3016 | * seed values for the toeplitz hash generation |
| 3017 | * |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 3018 | * Return: QDF_STATUS_SUCCESS for success otherwise failure |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3019 | */ |
Dhanashri Atre | 09828f1 | 2016-11-13 10:36:58 -0800 | [diff] [blame] | 3020 | QDF_STATUS wma_lro_config_cmd(void *handle, |
| 3021 | struct cdp_lro_hash_config *wma_lro_cmd) |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3022 | { |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 3023 | struct wmi_lro_config_cmd_t wmi_lro_cmd = {0}; |
Dhanashri Atre | 09828f1 | 2016-11-13 10:36:58 -0800 | [diff] [blame] | 3024 | tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3025 | |
Dhanashri Atre | 09828f1 | 2016-11-13 10:36:58 -0800 | [diff] [blame] | 3026 | if (NULL == wma || NULL == wma_lro_cmd) { |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3027 | WMA_LOGE("wma_lro_config_cmd': invalid input!"); |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 3028 | return QDF_STATUS_E_FAILURE; |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3029 | } |
| 3030 | |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 3031 | wmi_lro_cmd.lro_enable = wma_lro_cmd->lro_enable; |
| 3032 | wmi_lro_cmd.tcp_flag = wma_lro_cmd->tcp_flag; |
| 3033 | wmi_lro_cmd.tcp_flag_mask = wma_lro_cmd->tcp_flag_mask; |
| 3034 | qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv4, |
| 3035 | wma_lro_cmd->toeplitz_hash_ipv4, |
| 3036 | LRO_IPV4_SEED_ARR_SZ * sizeof(uint32_t)); |
| 3037 | qdf_mem_copy(wmi_lro_cmd.toeplitz_hash_ipv6, |
| 3038 | wma_lro_cmd->toeplitz_hash_ipv6, |
| 3039 | LRO_IPV6_SEED_ARR_SZ * sizeof(uint32_t)); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3040 | |
Dhanashri Atre | 09828f1 | 2016-11-13 10:36:58 -0800 | [diff] [blame] | 3041 | return wmi_unified_lro_config_cmd(wma->wmi_handle, |
Himanshu Agarwal | 17dea6e | 2016-03-09 12:11:22 +0530 | [diff] [blame] | 3042 | &wmi_lro_cmd); |
Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame] | 3043 | } |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3044 | |
| 3045 | /** |
| 3046 | * wma_indicate_err() - indicate an error to the protocol stack |
| 3047 | * @err_type: error type |
| 3048 | * @err_info: information associated with the error |
| 3049 | * |
| 3050 | * This function indicates an error encountered in the data path |
| 3051 | * to the protocol stack |
| 3052 | * |
| 3053 | * Return: none |
| 3054 | */ |
| 3055 | void |
| 3056 | wma_indicate_err( |
| 3057 | enum ol_rx_err_type err_type, |
| 3058 | struct ol_error_info *err_info) |
| 3059 | { |
| 3060 | switch (err_type) { |
| 3061 | case OL_RX_ERR_TKIP_MIC: |
| 3062 | { |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 3063 | tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA); |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3064 | tpSirSmeMicFailureInd mic_err_ind; |
Rajeev Kumar | cf7bd80 | 2017-04-18 11:11:42 -0700 | [diff] [blame] | 3065 | struct scheduler_msg cds_msg = {0}; |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3066 | uint8_t vdev_id; |
| 3067 | |
| 3068 | if (NULL == wma) { |
| 3069 | WMA_LOGE("%s: Failed to get wma context", |
| 3070 | __func__); |
| 3071 | return; |
| 3072 | } |
| 3073 | |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 3074 | mic_err_ind = qdf_mem_malloc(sizeof(*mic_err_ind)); |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3075 | if (!mic_err_ind) { |
| 3076 | WMA_LOGE("%s: MIC indication mem alloc failed", |
| 3077 | __func__); |
| 3078 | return; |
| 3079 | } |
| 3080 | |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 3081 | qdf_mem_set((void *) mic_err_ind, 0, |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3082 | sizeof(*mic_err_ind)); |
| 3083 | mic_err_ind->messageType = eWNI_SME_MIC_FAILURE_IND; |
| 3084 | mic_err_ind->length = sizeof(*mic_err_ind); |
| 3085 | vdev_id = err_info->u.mic_err.vdev_id; |
Anurag Chouhan | c554842 | 2016-02-24 18:33:27 +0530 | [diff] [blame] | 3086 | qdf_copy_macaddr(&mic_err_ind->bssId, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 3087 | (struct qdf_mac_addr *) &wma->interfaces[vdev_id].bssid); |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3088 | WMA_LOGE("MIC error: BSSID:%02x:%02x:%02x:%02x:%02x:%02x\n", |
Manikandan Mohan | 1dd8b5d | 2017-04-18 15:54:09 -0700 | [diff] [blame] | 3089 | mic_err_ind->bssId.bytes[0], |
| 3090 | mic_err_ind->bssId.bytes[1], |
| 3091 | mic_err_ind->bssId.bytes[2], |
| 3092 | mic_err_ind->bssId.bytes[3], |
| 3093 | mic_err_ind->bssId.bytes[4], |
| 3094 | mic_err_ind->bssId.bytes[5]); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 3095 | qdf_mem_copy(mic_err_ind->info.taMacAddr, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 3096 | (struct qdf_mac_addr *) err_info->u.mic_err.ta, |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3097 | sizeof(tSirMacAddr)); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 3098 | qdf_mem_copy(mic_err_ind->info.srcMacAddr, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 3099 | (struct qdf_mac_addr *) err_info->u.mic_err.sa, |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3100 | sizeof(tSirMacAddr)); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 3101 | qdf_mem_copy(mic_err_ind->info.dstMacAddr, |
Anurag Chouhan | 6d76066 | 2016-02-20 16:05:43 +0530 | [diff] [blame] | 3102 | (struct qdf_mac_addr *) err_info->u.mic_err.da, |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3103 | sizeof(tSirMacAddr)); |
| 3104 | mic_err_ind->info.keyId = err_info->u.mic_err.key_id; |
| 3105 | mic_err_ind->info.multicast = |
| 3106 | IEEE80211_IS_MULTICAST(err_info->u.mic_err.da); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 3107 | qdf_mem_copy(mic_err_ind->info.TSC, |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3108 | (void *)&err_info-> |
| 3109 | u.mic_err.pn, SIR_CIPHER_SEQ_CTR_SIZE); |
| 3110 | |
Rajeev Kumar | b60abe4 | 2017-01-21 15:39:31 -0800 | [diff] [blame] | 3111 | qdf_mem_set(&cds_msg, sizeof(struct scheduler_msg), 0); |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3112 | cds_msg.type = eWNI_SME_MIC_FAILURE_IND; |
| 3113 | cds_msg.bodyptr = (void *) mic_err_ind; |
Anurag Chouhan | fb54ab0 | 2016-02-18 18:00:46 +0530 | [diff] [blame] | 3114 | if (QDF_STATUS_SUCCESS != |
Rajeev Kumar | b60abe4 | 2017-01-21 15:39:31 -0800 | [diff] [blame] | 3115 | scheduler_post_msg(QDF_MODULE_ID_SME, |
Rajeev Kumar | 156188e | 2017-01-21 17:23:52 -0800 | [diff] [blame] | 3116 | &cds_msg)) { |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3117 | WMA_LOGE("%s: mic failure ind post to SME failed", |
| 3118 | __func__); |
Anurag Chouhan | 600c3a0 | 2016-03-01 10:33:54 +0530 | [diff] [blame] | 3119 | qdf_mem_free((void *)mic_err_ind); |
Dhanashri Atre | 1f0cbe4 | 2015-11-19 10:56:53 -0800 | [diff] [blame] | 3120 | } |
| 3121 | break; |
| 3122 | } |
| 3123 | default: |
| 3124 | { |
| 3125 | WMA_LOGE("%s: unhandled ol error type %d", __func__, err_type); |
| 3126 | break; |
| 3127 | } |
| 3128 | } |
| 3129 | } |
jiad | cd49ec7 | 2017-12-05 13:33:11 +0800 | [diff] [blame] | 3130 | |
| 3131 | void wma_rx_mic_error_ind(void *scn_handle, uint16_t vdev_id, void *wh) |
| 3132 | { |
| 3133 | struct ieee80211_frame *w = (struct ieee80211_frame *)wh; |
| 3134 | struct ol_error_info err_info; |
| 3135 | |
| 3136 | err_info.u.mic_err.vdev_id = vdev_id; |
| 3137 | qdf_mem_copy(err_info.u.mic_err.da, w->i_addr1, OL_TXRX_MAC_ADDR_LEN); |
| 3138 | qdf_mem_copy(err_info.u.mic_err.ta, w->i_addr2, OL_TXRX_MAC_ADDR_LEN); |
| 3139 | |
| 3140 | WMA_LOGD("MIC vdev_id %d\n", vdev_id); |
| 3141 | WMA_LOGD("MIC DA: %02x:%02x:%02x:%02x:%02x:%02x\n", |
| 3142 | err_info.u.mic_err.da[0], |
| 3143 | err_info.u.mic_err.da[1], |
| 3144 | err_info.u.mic_err.da[2], |
| 3145 | err_info.u.mic_err.da[3], |
| 3146 | err_info.u.mic_err.da[4], |
| 3147 | err_info.u.mic_err.da[5]); |
| 3148 | WMA_LOGD("MIC TA: %02x:%02x:%02x:%02x:%02x:%02x\n", |
| 3149 | err_info.u.mic_err.ta[0], |
| 3150 | err_info.u.mic_err.ta[1], |
| 3151 | err_info.u.mic_err.ta[2], |
| 3152 | err_info.u.mic_err.ta[3], |
| 3153 | err_info.u.mic_err.ta[4], |
| 3154 | err_info.u.mic_err.ta[5]); |
| 3155 | |
| 3156 | wma_indicate_err(OL_RX_ERR_TKIP_MIC, &err_info); |
| 3157 | } |