blob: 06902f385d36f445d611ab350456b79f7271a967 [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
nobelj34f6fe22019-12-27 09:53:04 -08002 * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <htt.h>
Balamurugan Mahalingamf72cb1f2018-06-25 12:18:34 +053020#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070021#include <hal_api.h>
Jeff Johnson2cb8fc72016-12-17 10:45:08 -080022#include "dp_peer.h"
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -080023#include "dp_types.h"
Pramod Simhab17d0672017-03-06 17:20:13 -080024#include "dp_internal.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080025#include "dp_rx_mon.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053026#include "htt_stats.h"
Pamidipati, Vijay038d0902017-07-17 09:53:31 +053027#include "htt_ppdu_stats.h"
nobeljdebe2b32019-04-23 11:18:47 -070028#include "dp_htt.h"
Ruben Columbus43194932019-05-24 09:56:52 -070029#include "dp_rx.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070030#include "qdf_mem.h" /* qdf_mem_malloc,free */
Pamidipati, Vijay038d0902017-07-17 09:53:31 +053031#include "cdp_txrx_cmn_struct.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053032
ydb247452018-08-08 00:23:16 +053033#ifdef FEATURE_PERPKT_INFO
34#include "dp_ratetable.h"
35#endif
36
Ishank Jain6290a3c2017-03-21 10:49:39 +053037#define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070038
39#define HTT_HTC_PKT_POOL_INIT_SIZE 64
40
41#define HTT_MSG_BUF_SIZE(msg_bytes) \
42 ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43
Prathyusha Guduri43bb0562018-02-12 18:30:54 +053044#define HTT_PID_BIT_MASK 0x3
45
Ishank Jain6290a3c2017-03-21 10:49:39 +053046#define DP_EXT_MSG_LENGTH 2048
Pramod Simhae0baa442017-06-27 15:21:39 -070047
Soumya Bhat51240dc2018-05-24 18:00:57 +053048#define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49
nobelj182938a2019-11-25 14:09:08 -080050#define HTT_SHIFT_UPPER_TIMESTAMP 32
51#define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
52
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070053/*
Amir Patel36a79a62019-01-17 11:23:37 +053054 * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
55 * bitmap for sniffer mode
56 * @bitmap: received bitmap
57 *
58 * Return: expected bitmap value, returns zero if doesn't match with
59 * either 64-bit Tx window or 256-bit window tlv bitmap
60 */
nobeljdebe2b32019-04-23 11:18:47 -070061int
Amir Patel36a79a62019-01-17 11:23:37 +053062dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
63{
64 if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
65 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
66 else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
67 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
68
69 return 0;
70}
71
nobeljdebe2b32019-04-23 11:18:47 -070072#ifdef FEATURE_PERPKT_INFO
Amir Patel36a79a62019-01-17 11:23:37 +053073/*
nobelj7b0e2732019-05-31 00:19:07 -070074 * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
75 * @peer: Datapath peer handle
76 * @ppdu: PPDU Descriptor
77 *
78 * Return: None
79 *
80 * on Tx data frame, we may get delayed ba set
81 * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
82 * request Block Ack Request(BAR). Successful msdu is received only after Block
83 * Ack. To populate peer stats we need successful msdu(data frame).
84 * So we hold the Tx data stats on delayed_ba for stats update.
85 */
86static inline void
87dp_peer_copy_delay_stats(struct dp_peer *peer,
88 struct cdp_tx_completion_ppdu_user *ppdu)
89{
90 struct dp_pdev *pdev;
91 struct dp_vdev *vdev;
92
93 if (peer->last_delayed_ba) {
94 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
95 "BA not yet recv for prev delayed ppdu[%d]\n",
96 peer->last_delayed_ba_ppduid);
97 vdev = peer->vdev;
98 if (vdev) {
99 pdev = vdev->pdev;
100 pdev->stats.cdp_delayed_ba_not_recev++;
101 }
102 }
103
104 peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
105 peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
106 peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
107 peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
108 peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
109 peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
110 peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
111 peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
112 peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
113 peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
114 peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
115 peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
116 peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
117 peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
118 peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
119 peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
120
121 peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
122 peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
123 peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
124
125 peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
126 peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
127
128 peer->last_delayed_ba = true;
129}
130
131/*
132 * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
133 * @peer: Datapath peer handle
134 * @ppdu: PPDU Descriptor
135 *
136 * Return: None
137 *
138 * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
139 * from Tx BAR frame not required to populate peer stats.
140 * But we need successful MPDU and MSDU to update previous
141 * transmitted Tx data frame. Overwrite ppdu stats with the previous
142 * stored ppdu stats.
143 */
144static void
145dp_peer_copy_stats_to_bar(struct dp_peer *peer,
146 struct cdp_tx_completion_ppdu_user *ppdu)
147{
148 ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
149 ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
150 ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
151 ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
152 ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
153 ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
154 ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
155 ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
156 ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
157 ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
158 ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
159 ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
160 ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
161 ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
162 ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
163 ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
164
165 ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
166 ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
167 ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
168
169 ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
170 ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
171
172 peer->last_delayed_ba = false;
173}
174
175/*
nobeljdebe2b32019-04-23 11:18:47 -0700176 * dp_tx_rate_stats_update() - Update rate per-peer statistics
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530177 * @peer: Datapath peer handle
178 * @ppdu: PPDU Descriptor
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530179 *
180 * Return: None
181 */
nobeljdebe2b32019-04-23 11:18:47 -0700182static void
ydb247452018-08-08 00:23:16 +0530183dp_tx_rate_stats_update(struct dp_peer *peer,
184 struct cdp_tx_completion_ppdu_user *ppdu)
185{
186 uint32_t ratekbps = 0;
Shashikala Prabhuf7786d32019-04-05 14:25:09 +0530187 uint64_t ppdu_tx_rate = 0;
Amir Patel78824b12019-02-23 10:54:32 +0530188 uint32_t rix;
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530189 uint16_t ratecode = 0;
ydb247452018-08-08 00:23:16 +0530190
191 if (!peer || !ppdu)
192 return;
193
Anish Nataraj376d9b12018-08-13 14:12:01 +0530194 ratekbps = dp_getrateindex(ppdu->gi,
195 ppdu->mcs,
ydb247452018-08-08 00:23:16 +0530196 ppdu->nss,
197 ppdu->preamble,
Amir Patel78824b12019-02-23 10:54:32 +0530198 ppdu->bw,
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530199 &rix,
200 &ratecode);
ydb247452018-08-08 00:23:16 +0530201
202 DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
203
204 if (!ratekbps)
205 return;
206
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530207 /* Calculate goodput in non-training period
208 * In training period, don't do anything as
209 * pending pkt is send as goodput.
210 */
211 if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
212 ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
213 (CDP_PERCENT_MACRO - ppdu->current_rate_per));
214 }
Amir Patel468bded2019-03-21 11:42:31 +0530215 ppdu->rix = rix;
Amir Patelac7d9462019-03-28 16:16:01 +0530216 ppdu->tx_ratekbps = ratekbps;
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530217 ppdu->tx_ratecode = ratecode;
Amir Patelc2cc2522018-11-29 20:44:47 +0530218 peer->stats.tx.avg_tx_rate =
219 dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
ydb247452018-08-08 00:23:16 +0530220 ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
221 DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
222
223 if (peer->vdev) {
Chaitanya Kiran Godavarthi8c880d32019-06-06 19:32:48 +0530224 /*
225 * In STA mode:
226 * We get ucast stats as BSS peer stats.
227 *
228 * In AP mode:
229 * We get mcast stats as BSS peer stats.
230 * We get ucast stats as assoc peer stats.
231 */
232 if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
Amir Patel1de3d3d2018-09-14 11:47:02 +0530233 peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
234 peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
235 } else {
236 peer->vdev->stats.tx.last_tx_rate = ratekbps;
237 peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
238 }
ydb247452018-08-08 00:23:16 +0530239 }
240}
241
nobeljdebe2b32019-04-23 11:18:47 -0700242/*
243 * dp_tx_stats_update() - Update per-peer statistics
Tallapragada Kalyand92f5982019-08-02 18:22:46 +0530244 * @pdev: Datapath pdev handle
nobeljdebe2b32019-04-23 11:18:47 -0700245 * @peer: Datapath peer handle
246 * @ppdu: PPDU Descriptor
247 * @ack_rssi: RSSI of last ack received
248 *
249 * Return: None
250 */
251static void
Tallapragada Kalyand92f5982019-08-02 18:22:46 +0530252dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
nobeljdebe2b32019-04-23 11:18:47 -0700253 struct cdp_tx_completion_ppdu_user *ppdu,
254 uint32_t ack_rssi)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530255{
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530256 uint8_t preamble, mcs;
257 uint16_t num_msdu;
nobelj7b0e2732019-05-31 00:19:07 -0700258 uint16_t num_mpdu;
259 uint16_t mpdu_tried;
nobeljab929fe2019-09-16 15:38:20 -0700260 uint16_t mpdu_failed;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530261
262 preamble = ppdu->preamble;
263 mcs = ppdu->mcs;
264 num_msdu = ppdu->num_msdu;
nobelj7b0e2732019-05-31 00:19:07 -0700265 num_mpdu = ppdu->mpdu_success;
266 mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
nobeljab929fe2019-09-16 15:38:20 -0700267 mpdu_failed = mpdu_tried - num_mpdu;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530268
Pamidipati, Vijaybd9c13f2017-10-17 20:12:03 +0530269 /* If the peer statistics are already processed as part of
270 * per-MSDU completion handler, do not process these again in per-PPDU
271 * indications */
Tallapragada Kalyand92f5982019-08-02 18:22:46 +0530272 if (pdev->soc->process_tx_status)
Pamidipati, Vijaybd9c13f2017-10-17 20:12:03 +0530273 return;
274
Santosh Anbu4de9ffb2019-03-01 17:20:29 +0530275 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
nobeljab929fe2019-09-16 15:38:20 -0700276 /*
277 * All failed mpdu will be retried, so incrementing
278 * retries mpdu based on mpdu failed. Even for
279 * ack failure i.e for long retries we get
280 * mpdu failed equal mpdu tried.
281 */
282 DP_STATS_INC(peer, tx.retries, mpdu_failed);
283 DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
Santosh Anbu4de9ffb2019-03-01 17:20:29 +0530284 return;
285 }
286
Varsha Mishra27c5bd32019-05-28 11:54:46 +0530287 if (ppdu->is_ppdu_cookie_valid)
288 DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
289
nobelj79666332019-01-31 17:00:51 -0800290 if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
291 ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
nobelj4e9d51f2018-08-07 19:36:47 -0700292 if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
293 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
294 "mu_group_id out of bound!!\n");
295 else
296 DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
297 (ppdu->user_pos + 1));
298 }
299
nobelj79666332019-01-31 17:00:51 -0800300 if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
301 ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
nobelj4e9d51f2018-08-07 19:36:47 -0700302 DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
303 DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
304 switch (ppdu->ru_tones) {
305 case RU_26:
nobelj7b0e2732019-05-31 00:19:07 -0700306 DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
307 num_msdu);
308 DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
309 num_mpdu);
310 DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
311 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700312 break;
313 case RU_52:
nobelj7b0e2732019-05-31 00:19:07 -0700314 DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
315 num_msdu);
316 DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
317 num_mpdu);
318 DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
319 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700320 break;
321 case RU_106:
nobelj7b0e2732019-05-31 00:19:07 -0700322 DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
323 num_msdu);
324 DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
325 num_mpdu);
326 DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
327 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700328 break;
329 case RU_242:
nobelj7b0e2732019-05-31 00:19:07 -0700330 DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
331 num_msdu);
332 DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
333 num_mpdu);
334 DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
335 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700336 break;
337 case RU_484:
nobelj7b0e2732019-05-31 00:19:07 -0700338 DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
339 num_msdu);
340 DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
341 num_mpdu);
342 DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
343 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700344 break;
345 case RU_996:
nobelj7b0e2732019-05-31 00:19:07 -0700346 DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
347 num_msdu);
348 DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
349 num_mpdu);
350 DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
351 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700352 break;
353 }
354 }
355
nobeljab929fe2019-09-16 15:38:20 -0700356 /*
357 * All failed mpdu will be retried, so incrementing
358 * retries mpdu based on mpdu failed. Even for
359 * ack failure i.e for long retries we get
360 * mpdu failed equal mpdu tried.
361 */
362 DP_STATS_INC(peer, tx.retries, mpdu_failed);
363 DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
364
nobelj7b0e2732019-05-31 00:19:07 -0700365 DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
366 num_msdu);
367 DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
368 num_mpdu);
369 DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
370 mpdu_tried);
371
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530372 DP_STATS_INC_PKT(peer, tx.comp_pkt,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530373 num_msdu, (ppdu->success_bytes +
374 ppdu->retry_bytes + ppdu->failed_bytes));
Pranita Solankea12b4b32017-11-20 23:04:14 +0530375 DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
376 DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
377 DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
Pranita Solankeed0aba62018-01-12 19:14:31 +0530378 DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
Amir Patel70f3c302019-05-21 12:49:35 +0530379 if (ppdu->tid < CDP_DATA_TID_MAX)
380 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
381 num_msdu);
Pranita Solankea12b4b32017-11-20 23:04:14 +0530382 DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
383 DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
Anish Nataraj7235d9b2018-08-20 13:10:25 +0530384 if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
Anish Nataraj57614da2018-02-07 23:04:24 +0530385 DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
Pranita Solankea12b4b32017-11-20 23:04:14 +0530386
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530387 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530388 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530389 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
390 DP_STATS_INCC(peer,
391 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
392 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
393 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530394 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530395 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
396 DP_STATS_INCC(peer,
397 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
398 ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
399 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530400 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530401 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
402 DP_STATS_INCC(peer,
403 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
404 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
405 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530406 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530407 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
408 DP_STATS_INCC(peer,
409 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
410 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
411 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530412 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530413 ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
414 DP_STATS_INCC(peer,
415 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
416 ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
Chaitanya Kiran Godavarthie541e9c2019-04-02 21:43:43 +0530417 DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
418 DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
ydb247452018-08-08 00:23:16 +0530419
Tallapragada Kalyand92f5982019-08-02 18:22:46 +0530420 dp_peer_stats_notify(pdev, peer);
ydb247452018-08-08 00:23:16 +0530421
Amir Patel756d05e2018-10-10 12:35:30 +0530422#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
423 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
424 &peer->stats, ppdu->peer_id,
425 UPDATE_PEER_STATS, pdev->pdev_id);
426#endif
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530427}
428#endif
429
nobeljdebe2b32019-04-23 11:18:47 -0700430#ifdef WLAN_TX_PKT_CAPTURE_ENH
431#include "dp_tx_capture.h"
432#else
433static inline void
434dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
435 void *data,
436 uint32_t ppdu_id,
437 uint32_t size)
438{
439}
440#endif
441
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530442/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700443 * htt_htc_pkt_alloc() - Allocate HTC packet buffer
444 * @htt_soc: HTT SOC handle
445 *
446 * Return: Pointer to htc packet buffer
447 */
448static struct dp_htt_htc_pkt *
449htt_htc_pkt_alloc(struct htt_soc *soc)
450{
451 struct dp_htt_htc_pkt_union *pkt = NULL;
452
453 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
454 if (soc->htt_htc_pkt_freelist) {
455 pkt = soc->htt_htc_pkt_freelist;
456 soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
457 }
458 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
459
Jeff Johnsona8edf332019-03-18 09:51:52 -0700460 if (!pkt)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700461 pkt = qdf_mem_malloc(sizeof(*pkt));
462 return &pkt->u.pkt; /* not actually a dereference */
463}
464
465/*
466 * htt_htc_pkt_free() - Free HTC packet buffer
467 * @htt_soc: HTT SOC handle
468 */
469static void
470htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
471{
472 struct dp_htt_htc_pkt_union *u_pkt =
473 (struct dp_htt_htc_pkt_union *)pkt;
474
475 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
476 u_pkt->u.next = soc->htt_htc_pkt_freelist;
477 soc->htt_htc_pkt_freelist = u_pkt;
478 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
479}
480
481/*
482 * htt_htc_pkt_pool_free() - Free HTC packet pool
483 * @htt_soc: HTT SOC handle
484 */
485static void
486htt_htc_pkt_pool_free(struct htt_soc *soc)
487{
488 struct dp_htt_htc_pkt_union *pkt, *next;
489 pkt = soc->htt_htc_pkt_freelist;
490 while (pkt) {
491 next = pkt->u.next;
492 qdf_mem_free(pkt);
493 pkt = next;
494 }
495 soc->htt_htc_pkt_freelist = NULL;
496}
497
498/*
Pramod Simhae0baa442017-06-27 15:21:39 -0700499 * htt_htc_misc_pkt_list_trim() - trim misc list
500 * @htt_soc: HTT SOC handle
501 * @level: max no. of pkts in list
502 */
503static void
504htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
505{
506 struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
507 int i = 0;
508 qdf_nbuf_t netbuf;
509
510 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
511 pkt = soc->htt_htc_pkt_misclist;
512 while (pkt) {
513 next = pkt->u.next;
514 /* trim the out grown list*/
515 if (++i > level) {
516 netbuf =
517 (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
518 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
519 qdf_nbuf_free(netbuf);
520 qdf_mem_free(pkt);
521 pkt = NULL;
522 if (prev)
523 prev->u.next = NULL;
524 }
525 prev = pkt;
526 pkt = next;
527 }
528 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
529}
530
531/*
532 * htt_htc_misc_pkt_list_add() - Add pkt to misc list
533 * @htt_soc: HTT SOC handle
534 * @dp_htt_htc_pkt: pkt to be added to list
535 */
536static void
537htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
538{
539 struct dp_htt_htc_pkt_union *u_pkt =
540 (struct dp_htt_htc_pkt_union *)pkt;
541 int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
542 pkt->htc_pkt.Endpoint)
543 + DP_HTT_HTC_PKT_MISCLIST_SIZE;
544
545 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
546 if (soc->htt_htc_pkt_misclist) {
547 u_pkt->u.next = soc->htt_htc_pkt_misclist;
548 soc->htt_htc_pkt_misclist = u_pkt;
549 } else {
550 soc->htt_htc_pkt_misclist = u_pkt;
551 }
552 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
553
554 /* only ce pipe size + tx_queue_depth could possibly be in use
555 * free older packets in the misclist
556 */
557 htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
558}
559
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530560/**
561 * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
562 * @soc : HTT SOC handle
563 * @pkt: pkt to be send
564 * @cmd : command to be recorded in dp htt logger
565 * @buf : Pointer to buffer needs to be recored for above cmd
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530566 *
567 * Return: None
568 */
569static inline void DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
570 struct dp_htt_htc_pkt *pkt, uint8_t cmd,
571 uint8_t *buf)
572{
573 htt_command_record(soc->htt_logger_handle, cmd, buf);
574 if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==
575 QDF_STATUS_SUCCESS)
576 htt_htc_misc_pkt_list_add(soc, pkt);
577}
578
Pramod Simhae0baa442017-06-27 15:21:39 -0700579/*
580 * htt_htc_misc_pkt_pool_free() - free pkts in misc list
581 * @htt_soc: HTT SOC handle
582 */
583static void
584htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
585{
586 struct dp_htt_htc_pkt_union *pkt, *next;
587 qdf_nbuf_t netbuf;
588
589 pkt = soc->htt_htc_pkt_misclist;
590
591 while (pkt) {
592 next = pkt->u.next;
593 netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
594 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
595
596 soc->stats.htc_pkt_free++;
Houston Hoffman41b912c2017-08-30 14:27:51 -0700597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Aditya Sathishded018e2018-07-02 16:25:21 +0530598 "%s: Pkt free count %d",
Pramod Simhae0baa442017-06-27 15:21:39 -0700599 __func__, soc->stats.htc_pkt_free);
600
601 qdf_nbuf_free(netbuf);
602 qdf_mem_free(pkt);
603 pkt = next;
604 }
605 soc->htt_htc_pkt_misclist = NULL;
606}
607
608/*
Jeff Johnsonff2dfb22018-05-12 10:27:57 -0700609 * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700610 * @tgt_mac_addr: Target MAC
611 * @buffer: Output buffer
612 */
613static u_int8_t *
614htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
615{
616#ifdef BIG_ENDIAN_HOST
617 /*
618 * The host endianness is opposite of the target endianness.
619 * To make u_int32_t elements come out correctly, the target->host
620 * upload has swizzled the bytes in each u_int32_t element of the
621 * message.
622 * For byte-array message fields like the MAC address, this
623 * upload swizzling puts the bytes in the wrong order, and needs
624 * to be undone.
625 */
626 buffer[0] = tgt_mac_addr[3];
627 buffer[1] = tgt_mac_addr[2];
628 buffer[2] = tgt_mac_addr[1];
629 buffer[3] = tgt_mac_addr[0];
630 buffer[4] = tgt_mac_addr[7];
631 buffer[5] = tgt_mac_addr[6];
632 return buffer;
633#else
634 /*
635 * The host endianness matches the target endianness -
636 * we can use the mac addr directly from the message buffer.
637 */
638 return tgt_mac_addr;
639#endif
640}
641
642/*
643 * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
644 * @soc: SOC handle
645 * @status: Completion status
646 * @netbuf: HTT buffer
647 */
648static void
649dp_htt_h2t_send_complete_free_netbuf(
650 void *soc, A_STATUS status, qdf_nbuf_t netbuf)
651{
652 qdf_nbuf_free(netbuf);
653}
654
655/*
656 * dp_htt_h2t_send_complete() - H2T completion handler
657 * @context: Opaque context (HTT SOC handle)
658 * @htc_pkt: HTC packet
659 */
Jeff Johnson32140742017-01-05 15:30:47 -0800660static void
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700661dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
662{
663 void (*send_complete_part2)(
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +0530664 void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700665 struct htt_soc *soc = (struct htt_soc *) context;
666 struct dp_htt_htc_pkt *htt_pkt;
667 qdf_nbuf_t netbuf;
668
669 send_complete_part2 = htc_pkt->pPktContext;
670
671 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
672
673 /* process (free or keep) the netbuf that held the message */
674 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
675 /*
676 * adf sendcomplete is required for windows only
677 */
678 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
Jeff Johnsona8edf332019-03-18 09:51:52 -0700679 if (send_complete_part2) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700680 send_complete_part2(
681 htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
682 }
683 /* free the htt_htc_pkt / HTC_PACKET object */
684 htt_htc_pkt_free(soc, htt_pkt);
685}
686
687/*
688 * htt_h2t_ver_req_msg() - Send HTT version request message to target
689 * @htt_soc: HTT SOC handle
690 *
691 * Return: 0 on success; error code on failure
692 */
693static int htt_h2t_ver_req_msg(struct htt_soc *soc)
694{
695 struct dp_htt_htc_pkt *pkt;
696 qdf_nbuf_t msg;
697 uint32_t *msg_word;
698
699 msg = qdf_nbuf_alloc(
700 soc->osdev,
701 HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
702 /* reserve room for the HTC header */
703 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
704 if (!msg)
705 return QDF_STATUS_E_NOMEM;
706
707 /*
708 * Set the length of the message.
709 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
710 * separately during the below call to qdf_nbuf_push_head.
711 * The contribution from the HTC header is added separately inside HTC.
712 */
713 if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
714 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530715 "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700716 __func__);
717 return QDF_STATUS_E_FAILURE;
718 }
719
720 /* fill in the message contents */
721 msg_word = (u_int32_t *) qdf_nbuf_data(msg);
722
723 /* rewind beyond alignment pad to get to the HTC header reserved area */
724 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
725
726 *msg_word = 0;
727 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
728
729 pkt = htt_htc_pkt_alloc(soc);
730 if (!pkt) {
731 qdf_nbuf_free(msg);
732 return QDF_STATUS_E_FAILURE;
733 }
734 pkt->soc_ctxt = NULL; /* not used during send-done callback */
735
736 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
737 dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
738 qdf_nbuf_len(msg), soc->htc_endpoint,
739 1); /* tag - not relevant here */
740
741 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530742 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ, NULL);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700743 return 0;
744}
745
746/*
747 * htt_srng_setup() - Send SRNG setup message to target
748 * @htt_soc: HTT SOC handle
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800749 * @mac_id: MAC Id
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700750 * @hal_srng: Opaque HAL SRNG pointer
751 * @hal_ring_type: SRNG ring type
752 *
753 * Return: 0 on success; error code on failure
754 */
Akshay Kosigia5c46a42019-06-27 12:43:01 +0530755int htt_srng_setup(struct htt_soc *soc, int mac_id,
756 hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530757 int hal_ring_type)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700758{
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700759 struct dp_htt_htc_pkt *pkt;
760 qdf_nbuf_t htt_msg;
761 uint32_t *msg_word;
762 struct hal_srng_params srng_params;
763 qdf_dma_addr_t hp_addr, tp_addr;
764 uint32_t ring_entry_size =
765 hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
766 int htt_ring_type, htt_ring_id;
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530767 uint8_t *htt_logger_bufp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700768
769 /* Sizes should be set in 4-byte words */
770 ring_entry_size = ring_entry_size >> 2;
771
772 htt_msg = qdf_nbuf_alloc(soc->osdev,
773 HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
774 /* reserve room for the HTC header */
775 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
776 if (!htt_msg)
777 goto fail0;
778
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530779 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
780 hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
781 tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700782
783 switch (hal_ring_type) {
784 case RXDMA_BUF:
Dhanashri Atre7351d172016-10-12 13:08:09 -0700785#ifdef QCA_HOST2FW_RXBUF_RING
Manoj Ekbote46c03162017-02-16 21:32:00 -0800786 if (srng_params.ring_id ==
Yun Parkfde6b9e2017-06-26 17:13:11 -0700787 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
Dhanashri Atre7351d172016-10-12 13:08:09 -0700788 htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
789 htt_ring_type = HTT_SW_TO_SW_RING;
Yun Parkfde6b9e2017-06-26 17:13:11 -0700790#ifdef IPA_OFFLOAD
791 } else if (srng_params.ring_id ==
792 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
793 htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
794 htt_ring_type = HTT_SW_TO_SW_RING;
795#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700796#else
Manoj Ekbote46c03162017-02-16 21:32:00 -0800797 if (srng_params.ring_id ==
Yun Parkfde6b9e2017-06-26 17:13:11 -0700798 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
Manoj Ekbote46c03162017-02-16 21:32:00 -0800799 (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
Dhanashri Atre7351d172016-10-12 13:08:09 -0700800 htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
801 htt_ring_type = HTT_SW_TO_HW_RING;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700802#endif
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800803 } else if (srng_params.ring_id ==
Yun Parkfde6b9e2017-06-26 17:13:11 -0700804#ifdef IPA_OFFLOAD
805 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
806#else
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800807 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
Yun Parkfde6b9e2017-06-26 17:13:11 -0700808#endif
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800809 (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
Dhanashri Atre7351d172016-10-12 13:08:09 -0700810 htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
811 htt_ring_type = HTT_SW_TO_HW_RING;
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800812 } else {
Dhanashri Atre7351d172016-10-12 13:08:09 -0700813 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530814 "%s: Ring %d currently not supported",
Yun Parkfde6b9e2017-06-26 17:13:11 -0700815 __func__, srng_params.ring_id);
Dhanashri Atre7351d172016-10-12 13:08:09 -0700816 goto fail1;
817 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800818
Mohit Khanna81179cb2018-08-16 20:50:43 -0700819 dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
820 hal_ring_type, srng_params.ring_id, htt_ring_id,
821 (uint64_t)hp_addr,
822 (uint64_t)tp_addr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700823 break;
824 case RXDMA_MONITOR_BUF:
825 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
826 htt_ring_type = HTT_SW_TO_HW_RING;
827 break;
828 case RXDMA_MONITOR_STATUS:
829 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
830 htt_ring_type = HTT_SW_TO_HW_RING;
831 break;
832 case RXDMA_MONITOR_DST:
833 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
834 htt_ring_type = HTT_HW_TO_SW_RING;
835 break;
Kai Chen6eca1a62017-01-12 10:17:53 -0800836 case RXDMA_MONITOR_DESC:
837 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
838 htt_ring_type = HTT_SW_TO_HW_RING;
839 break;
Pramod Simhae382ff82017-06-05 18:09:26 -0700840 case RXDMA_DST:
841 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
842 htt_ring_type = HTT_HW_TO_SW_RING;
843 break;
Kai Chen6eca1a62017-01-12 10:17:53 -0800844
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700845 default:
846 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530847 "%s: Ring currently not supported", __func__);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700848 goto fail1;
849 }
850
851 /*
852 * Set the length of the message.
853 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
854 * separately during the below call to qdf_nbuf_push_head.
855 * The contribution from the HTC header is added separately inside HTC.
856 */
857 if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
858 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530859 "%s: Failed to expand head for SRING_SETUP msg",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700860 __func__);
861 return QDF_STATUS_E_FAILURE;
862 }
863
864 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
865
866 /* rewind beyond alignment pad to get to the HTC header reserved area */
867 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
868
869 /* word 0 */
870 *msg_word = 0;
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530871 htt_logger_bufp = (uint8_t *)msg_word;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700872 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -0800873
Kai Chencbe4c342017-06-12 20:06:35 -0700874 if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
Ravi Joshi8851f4e2017-06-07 21:22:08 -0700875 (htt_ring_type == HTT_HW_TO_SW_RING))
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -0800876 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
877 DP_SW2HW_MACID(mac_id));
878 else
879 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
880
Rakesh Pillai51264a62019-05-08 19:15:56 +0530881 dp_info("%s: mac_id %d", __func__, mac_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700882 HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
883 /* TODO: Discuss with FW on changing this to unique ID and using
884 * htt_ring_type to send the type of ring
885 */
886 HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
887
888 /* word 1 */
889 msg_word++;
890 *msg_word = 0;
891 HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
892 srng_params.ring_base_paddr & 0xffffffff);
893
894 /* word 2 */
895 msg_word++;
896 *msg_word = 0;
897 HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
898 (uint64_t)srng_params.ring_base_paddr >> 32);
899
900 /* word 3 */
901 msg_word++;
902 *msg_word = 0;
903 HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
904 HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
905 (ring_entry_size * srng_params.num_entries));
Rakesh Pillai51264a62019-05-08 19:15:56 +0530906 dp_info("%s: entry_size %d", __func__, ring_entry_size);
907 dp_info("%s: num_entries %d", __func__, srng_params.num_entries);
908 dp_info("%s: ring_size %d", __func__,
909 (ring_entry_size * srng_params.num_entries));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700910 if (htt_ring_type == HTT_SW_TO_HW_RING)
Leo Chang5ea93a42016-11-03 12:39:49 -0700911 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
912 *msg_word, 1);
913 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700914 !!(srng_params.flags & HAL_SRNG_MSI_SWAP));
Leo Chang5ea93a42016-11-03 12:39:49 -0700915 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700916 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
Leo Chang5ea93a42016-11-03 12:39:49 -0700917 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700918 !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
919
920 /* word 4 */
921 msg_word++;
922 *msg_word = 0;
923 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
924 hp_addr & 0xffffffff);
925
926 /* word 5 */
927 msg_word++;
928 *msg_word = 0;
929 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
930 (uint64_t)hp_addr >> 32);
931
932 /* word 6 */
933 msg_word++;
934 *msg_word = 0;
935 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
936 tp_addr & 0xffffffff);
937
938 /* word 7 */
939 msg_word++;
940 *msg_word = 0;
941 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
942 (uint64_t)tp_addr >> 32);
943
944 /* word 8 */
945 msg_word++;
946 *msg_word = 0;
947 HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
948 srng_params.msi_addr & 0xffffffff);
949
950 /* word 9 */
951 msg_word++;
952 *msg_word = 0;
953 HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
954 (uint64_t)(srng_params.msi_addr) >> 32);
955
956 /* word 10 */
957 msg_word++;
958 *msg_word = 0;
959 HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
960 srng_params.msi_data);
961
962 /* word 11 */
963 msg_word++;
964 *msg_word = 0;
965 HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
966 srng_params.intr_batch_cntr_thres_entries *
967 ring_entry_size);
968 HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
969 srng_params.intr_timer_thres_us >> 3);
970
971 /* word 12 */
972 msg_word++;
973 *msg_word = 0;
974 if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
975 /* TODO: Setting low threshold to 1/8th of ring size - see
976 * if this needs to be configurable
977 */
978 HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
979 srng_params.low_threshold);
980 }
981 /* "response_required" field should be set if a HTT response message is
982 * required after setting up the ring.
983 */
984 pkt = htt_htc_pkt_alloc(soc);
985 if (!pkt)
986 goto fail1;
987
988 pkt->soc_ctxt = NULL; /* not used during send-done callback */
989
990 SET_HTC_PACKET_INFO_TX(
991 &pkt->htc_pkt,
992 dp_htt_h2t_send_complete_free_netbuf,
993 qdf_nbuf_data(htt_msg),
994 qdf_nbuf_len(htt_msg),
995 soc->htc_endpoint,
Yue Ma245b47b2017-02-21 16:35:31 -0800996 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700997
998 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530999 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1000 htt_logger_bufp);
Kai Chen6eca1a62017-01-12 10:17:53 -08001001
1002 return QDF_STATUS_SUCCESS;
1003
1004fail1:
1005 qdf_nbuf_free(htt_msg);
1006fail0:
1007 return QDF_STATUS_E_FAILURE;
1008}
1009
1010/*
1011 * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
1012 * config message to target
1013 * @htt_soc: HTT SOC handle
1014 * @pdev_id: PDEV Id
1015 * @hal_srng: Opaque HAL SRNG pointer
1016 * @hal_ring_type: SRNG ring type
1017 * @ring_buf_size: SRNG buffer size
1018 * @htt_tlv_filter: Rx SRNG TLV and filter setting
1019 * Return: 0 on success; error code on failure
1020 */
Akshay Kosigia5c46a42019-06-27 12:43:01 +05301021int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301022 hal_ring_handle_t hal_ring_hdl,
1023 int hal_ring_type, int ring_buf_size,
1024 struct htt_rx_ring_tlv_filter *htt_tlv_filter)
Kai Chen6eca1a62017-01-12 10:17:53 -08001025{
1026 struct htt_soc *soc = (struct htt_soc *)htt_soc;
1027 struct dp_htt_htc_pkt *pkt;
1028 qdf_nbuf_t htt_msg;
1029 uint32_t *msg_word;
1030 struct hal_srng_params srng_params;
1031 uint32_t htt_ring_type, htt_ring_id;
1032 uint32_t tlv_filter;
Ankit Kumar0ead45c2019-04-29 15:32:49 +05301033 uint8_t *htt_logger_bufp;
Ruben Columbusb7a1c572019-08-12 11:11:29 -07001034 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1035 uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
Kai Chen6eca1a62017-01-12 10:17:53 -08001036
1037 htt_msg = qdf_nbuf_alloc(soc->osdev,
1038 HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1039 /* reserve room for the HTC header */
1040 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1041 if (!htt_msg)
1042 goto fail0;
1043
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301044 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
Kai Chen6eca1a62017-01-12 10:17:53 -08001045
1046 switch (hal_ring_type) {
1047 case RXDMA_BUF:
Kai Chen6eca1a62017-01-12 10:17:53 -08001048 htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1049 htt_ring_type = HTT_SW_TO_HW_RING;
Kai Chen6eca1a62017-01-12 10:17:53 -08001050 break;
1051 case RXDMA_MONITOR_BUF:
1052 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1053 htt_ring_type = HTT_SW_TO_HW_RING;
1054 break;
1055 case RXDMA_MONITOR_STATUS:
1056 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1057 htt_ring_type = HTT_SW_TO_HW_RING;
1058 break;
1059 case RXDMA_MONITOR_DST:
1060 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1061 htt_ring_type = HTT_HW_TO_SW_RING;
1062 break;
1063 case RXDMA_MONITOR_DESC:
1064 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1065 htt_ring_type = HTT_SW_TO_HW_RING;
1066 break;
Pramod Simhae382ff82017-06-05 18:09:26 -07001067 case RXDMA_DST:
1068 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1069 htt_ring_type = HTT_HW_TO_SW_RING;
1070 break;
Kai Chen6eca1a62017-01-12 10:17:53 -08001071
1072 default:
1073 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301074 "%s: Ring currently not supported", __func__);
Kai Chen6eca1a62017-01-12 10:17:53 -08001075 goto fail1;
1076 }
1077
1078 /*
1079 * Set the length of the message.
1080 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1081 * separately during the below call to qdf_nbuf_push_head.
1082 * The contribution from the HTC header is added separately inside HTC.
1083 */
1084 if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1085 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301086 "%s: Failed to expand head for RX Ring Cfg msg",
Kai Chen6eca1a62017-01-12 10:17:53 -08001087 __func__);
1088 goto fail1; /* failure */
1089 }
1090
1091 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1092
1093 /* rewind beyond alignment pad to get to the HTC header reserved area */
1094 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1095
1096 /* word 0 */
Ankit Kumar0ead45c2019-04-29 15:32:49 +05301097 htt_logger_bufp = (uint8_t *)msg_word;
Kai Chen6eca1a62017-01-12 10:17:53 -08001098 *msg_word = 0;
1099 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
Ravi Joshi8851f4e2017-06-07 21:22:08 -07001100
1101 /*
1102 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1103 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1104 */
1105 if (htt_ring_type == HTT_SW_TO_SW_RING ||
1106 htt_ring_type == HTT_SW_TO_HW_RING)
1107 HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1108 DP_SW2HW_MACID(pdev_id));
1109
Kai Chen6eca1a62017-01-12 10:17:53 -08001110 /* TODO: Discuss with FW on changing this to unique ID and using
1111 * htt_ring_type to send the type of ring
1112 */
1113 HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1114
1115 HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1116 !!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1117
1118 HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
1119 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
1120
Kiran Venkatappa07921612019-03-02 23:14:12 +05301121 HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1122 htt_tlv_filter->offset_valid);
1123
Ruben Columbusb7a1c572019-08-12 11:11:29 -07001124 if (mon_drop_th > 0)
1125 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1126 1);
1127 else
1128 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1129 0);
1130
Kai Chen6eca1a62017-01-12 10:17:53 -08001131 /* word 1 */
1132 msg_word++;
1133 *msg_word = 0;
1134 HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1135 ring_buf_size);
1136
1137 /* word 2 */
1138 msg_word++;
1139 *msg_word = 0;
1140
1141 if (htt_tlv_filter->enable_fp) {
nobeljd124b742017-10-16 11:59:12 -07001142 /* TYPE: MGMT */
1143 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1144 FP, MGMT, 0000,
1145 (htt_tlv_filter->fp_mgmt_filter &
1146 FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1147 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1148 FP, MGMT, 0001,
1149 (htt_tlv_filter->fp_mgmt_filter &
1150 FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1151 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1152 FP, MGMT, 0010,
1153 (htt_tlv_filter->fp_mgmt_filter &
1154 FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1155 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1156 FP, MGMT, 0011,
1157 (htt_tlv_filter->fp_mgmt_filter &
1158 FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1159 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1160 FP, MGMT, 0100,
1161 (htt_tlv_filter->fp_mgmt_filter &
1162 FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1163 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1164 FP, MGMT, 0101,
1165 (htt_tlv_filter->fp_mgmt_filter &
1166 FILTER_MGMT_PROBE_RES) ? 1 : 0);
1167 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1168 FP, MGMT, 0110,
1169 (htt_tlv_filter->fp_mgmt_filter &
1170 FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1171 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001172 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
nobeljd124b742017-10-16 11:59:12 -07001173 MGMT, 0111,
1174 (htt_tlv_filter->fp_mgmt_filter &
1175 FILTER_MGMT_RESERVED_7) ? 1 : 0);
1176 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1177 FP, MGMT, 1000,
1178 (htt_tlv_filter->fp_mgmt_filter &
1179 FILTER_MGMT_BEACON) ? 1 : 0);
1180 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1181 FP, MGMT, 1001,
1182 (htt_tlv_filter->fp_mgmt_filter &
1183 FILTER_MGMT_ATIM) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001184 }
1185
1186 if (htt_tlv_filter->enable_md) {
sumedh baikady59a2d332018-05-22 01:50:38 -07001187 /* TYPE: MGMT */
1188 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1189 MD, MGMT, 0000,
1190 (htt_tlv_filter->md_mgmt_filter &
1191 FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1192 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1193 MD, MGMT, 0001,
1194 (htt_tlv_filter->md_mgmt_filter &
1195 FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1196 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1197 MD, MGMT, 0010,
1198 (htt_tlv_filter->md_mgmt_filter &
1199 FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1200 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1201 MD, MGMT, 0011,
1202 (htt_tlv_filter->md_mgmt_filter &
1203 FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1204 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1205 MD, MGMT, 0100,
1206 (htt_tlv_filter->md_mgmt_filter &
1207 FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1208 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1209 MD, MGMT, 0101,
1210 (htt_tlv_filter->md_mgmt_filter &
1211 FILTER_MGMT_PROBE_RES) ? 1 : 0);
1212 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1213 MD, MGMT, 0110,
1214 (htt_tlv_filter->md_mgmt_filter &
1215 FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1216 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001217 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001218 MGMT, 0111,
1219 (htt_tlv_filter->md_mgmt_filter &
1220 FILTER_MGMT_RESERVED_7) ? 1 : 0);
1221 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1222 MD, MGMT, 1000,
1223 (htt_tlv_filter->md_mgmt_filter &
1224 FILTER_MGMT_BEACON) ? 1 : 0);
1225 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1226 MD, MGMT, 1001,
1227 (htt_tlv_filter->md_mgmt_filter &
1228 FILTER_MGMT_ATIM) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001229 }
1230
1231 if (htt_tlv_filter->enable_mo) {
nobeljd124b742017-10-16 11:59:12 -07001232 /* TYPE: MGMT */
1233 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1234 MO, MGMT, 0000,
1235 (htt_tlv_filter->mo_mgmt_filter &
1236 FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1237 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1238 MO, MGMT, 0001,
1239 (htt_tlv_filter->mo_mgmt_filter &
1240 FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1241 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1242 MO, MGMT, 0010,
1243 (htt_tlv_filter->mo_mgmt_filter &
1244 FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1245 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1246 MO, MGMT, 0011,
1247 (htt_tlv_filter->mo_mgmt_filter &
1248 FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1249 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1250 MO, MGMT, 0100,
1251 (htt_tlv_filter->mo_mgmt_filter &
1252 FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1253 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1254 MO, MGMT, 0101,
1255 (htt_tlv_filter->mo_mgmt_filter &
1256 FILTER_MGMT_PROBE_RES) ? 1 : 0);
1257 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1258 MO, MGMT, 0110,
1259 (htt_tlv_filter->mo_mgmt_filter &
1260 FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1261 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001262 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
nobeljd124b742017-10-16 11:59:12 -07001263 MGMT, 0111,
1264 (htt_tlv_filter->mo_mgmt_filter &
1265 FILTER_MGMT_RESERVED_7) ? 1 : 0);
1266 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1267 MO, MGMT, 1000,
1268 (htt_tlv_filter->mo_mgmt_filter &
1269 FILTER_MGMT_BEACON) ? 1 : 0);
1270 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1271 MO, MGMT, 1001,
1272 (htt_tlv_filter->mo_mgmt_filter &
1273 FILTER_MGMT_ATIM) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001274 }
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001275
Kai Chen6eca1a62017-01-12 10:17:53 -08001276 /* word 3 */
1277 msg_word++;
1278 *msg_word = 0;
1279
1280 if (htt_tlv_filter->enable_fp) {
nobeljd124b742017-10-16 11:59:12 -07001281 /* TYPE: MGMT */
1282 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1283 FP, MGMT, 1010,
1284 (htt_tlv_filter->fp_mgmt_filter &
1285 FILTER_MGMT_DISASSOC) ? 1 : 0);
1286 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1287 FP, MGMT, 1011,
1288 (htt_tlv_filter->fp_mgmt_filter &
1289 FILTER_MGMT_AUTH) ? 1 : 0);
1290 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1291 FP, MGMT, 1100,
1292 (htt_tlv_filter->fp_mgmt_filter &
1293 FILTER_MGMT_DEAUTH) ? 1 : 0);
1294 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1295 FP, MGMT, 1101,
1296 (htt_tlv_filter->fp_mgmt_filter &
1297 FILTER_MGMT_ACTION) ? 1 : 0);
1298 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1299 FP, MGMT, 1110,
1300 (htt_tlv_filter->fp_mgmt_filter &
1301 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1302 /* reserved*/
Kai Chen6eca1a62017-01-12 10:17:53 -08001303 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
nobeljd124b742017-10-16 11:59:12 -07001304 MGMT, 1111,
1305 (htt_tlv_filter->fp_mgmt_filter &
1306 FILTER_MGMT_RESERVED_15) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001307 }
1308
1309 if (htt_tlv_filter->enable_md) {
sumedh baikady59a2d332018-05-22 01:50:38 -07001310 /* TYPE: MGMT */
1311 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1312 MD, MGMT, 1010,
1313 (htt_tlv_filter->md_mgmt_filter &
1314 FILTER_MGMT_DISASSOC) ? 1 : 0);
1315 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1316 MD, MGMT, 1011,
1317 (htt_tlv_filter->md_mgmt_filter &
1318 FILTER_MGMT_AUTH) ? 1 : 0);
1319 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1320 MD, MGMT, 1100,
1321 (htt_tlv_filter->md_mgmt_filter &
1322 FILTER_MGMT_DEAUTH) ? 1 : 0);
1323 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1324 MD, MGMT, 1101,
1325 (htt_tlv_filter->md_mgmt_filter &
1326 FILTER_MGMT_ACTION) ? 1 : 0);
1327 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1328 MD, MGMT, 1110,
1329 (htt_tlv_filter->md_mgmt_filter &
1330 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001331 }
1332
1333 if (htt_tlv_filter->enable_mo) {
nobeljd124b742017-10-16 11:59:12 -07001334 /* TYPE: MGMT */
1335 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1336 MO, MGMT, 1010,
1337 (htt_tlv_filter->mo_mgmt_filter &
1338 FILTER_MGMT_DISASSOC) ? 1 : 0);
1339 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1340 MO, MGMT, 1011,
1341 (htt_tlv_filter->mo_mgmt_filter &
1342 FILTER_MGMT_AUTH) ? 1 : 0);
1343 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1344 MO, MGMT, 1100,
1345 (htt_tlv_filter->mo_mgmt_filter &
1346 FILTER_MGMT_DEAUTH) ? 1 : 0);
1347 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1348 MO, MGMT, 1101,
1349 (htt_tlv_filter->mo_mgmt_filter &
1350 FILTER_MGMT_ACTION) ? 1 : 0);
1351 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1352 MO, MGMT, 1110,
1353 (htt_tlv_filter->mo_mgmt_filter &
1354 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1355 /* reserved*/
Kai Chen6eca1a62017-01-12 10:17:53 -08001356 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
nobeljd124b742017-10-16 11:59:12 -07001357 MGMT, 1111,
1358 (htt_tlv_filter->mo_mgmt_filter &
1359 FILTER_MGMT_RESERVED_15) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001360 }
1361
1362 /* word 4 */
1363 msg_word++;
1364 *msg_word = 0;
1365
1366 if (htt_tlv_filter->enable_fp) {
nobeljd124b742017-10-16 11:59:12 -07001367 /* TYPE: CTRL */
1368 /* reserved */
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001369 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001370 CTRL, 0000,
1371 (htt_tlv_filter->fp_ctrl_filter &
1372 FILTER_CTRL_RESERVED_1) ? 1 : 0);
1373 /* reserved */
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001374 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001375 CTRL, 0001,
1376 (htt_tlv_filter->fp_ctrl_filter &
1377 FILTER_CTRL_RESERVED_2) ? 1 : 0);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001378 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001379 CTRL, 0010,
1380 (htt_tlv_filter->fp_ctrl_filter &
1381 FILTER_CTRL_TRIGGER) ? 1 : 0);
1382 /* reserved */
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001383 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001384 CTRL, 0011,
1385 (htt_tlv_filter->fp_ctrl_filter &
1386 FILTER_CTRL_RESERVED_4) ? 1 : 0);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001387 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001388 CTRL, 0100,
1389 (htt_tlv_filter->fp_ctrl_filter &
1390 FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001391 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001392 CTRL, 0101,
1393 (htt_tlv_filter->fp_ctrl_filter &
1394 FILTER_CTRL_VHT_NDP) ? 1 : 0);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001395 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001396 CTRL, 0110,
1397 (htt_tlv_filter->fp_ctrl_filter &
1398 FILTER_CTRL_FRAME_EXT) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001399 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001400 CTRL, 0111,
1401 (htt_tlv_filter->fp_ctrl_filter &
1402 FILTER_CTRL_CTRLWRAP) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001403 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001404 CTRL, 1000,
1405 (htt_tlv_filter->fp_ctrl_filter &
1406 FILTER_CTRL_BA_REQ) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001407 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001408 CTRL, 1001,
1409 (htt_tlv_filter->fp_ctrl_filter &
1410 FILTER_CTRL_BA) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001411 }
1412
1413 if (htt_tlv_filter->enable_md) {
sumedh baikady59a2d332018-05-22 01:50:38 -07001414 /* TYPE: CTRL */
1415 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001416 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001417 CTRL, 0000,
1418 (htt_tlv_filter->md_ctrl_filter &
1419 FILTER_CTRL_RESERVED_1) ? 1 : 0);
1420 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001421 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001422 CTRL, 0001,
1423 (htt_tlv_filter->md_ctrl_filter &
1424 FILTER_CTRL_RESERVED_2) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001425 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001426 CTRL, 0010,
1427 (htt_tlv_filter->md_ctrl_filter &
1428 FILTER_CTRL_TRIGGER) ? 1 : 0);
1429 /* reserved */
1430 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1431 CTRL, 0011,
1432 (htt_tlv_filter->md_ctrl_filter &
1433 FILTER_CTRL_RESERVED_4) ? 1 : 0);
1434 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1435 CTRL, 0100,
1436 (htt_tlv_filter->md_ctrl_filter &
1437 FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1438 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1439 CTRL, 0101,
1440 (htt_tlv_filter->md_ctrl_filter &
1441 FILTER_CTRL_VHT_NDP) ? 1 : 0);
1442 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1443 CTRL, 0110,
1444 (htt_tlv_filter->md_ctrl_filter &
1445 FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1446 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1447 CTRL, 0111,
1448 (htt_tlv_filter->md_ctrl_filter &
1449 FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1450 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1451 CTRL, 1000,
1452 (htt_tlv_filter->md_ctrl_filter &
1453 FILTER_CTRL_BA_REQ) ? 1 : 0);
1454 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1455 CTRL, 1001,
1456 (htt_tlv_filter->md_ctrl_filter &
1457 FILTER_CTRL_BA) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001458 }
1459
1460 if (htt_tlv_filter->enable_mo) {
nobeljd124b742017-10-16 11:59:12 -07001461 /* TYPE: CTRL */
1462 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001463 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001464 CTRL, 0000,
1465 (htt_tlv_filter->mo_ctrl_filter &
1466 FILTER_CTRL_RESERVED_1) ? 1 : 0);
1467 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001468 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001469 CTRL, 0001,
1470 (htt_tlv_filter->mo_ctrl_filter &
1471 FILTER_CTRL_RESERVED_2) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001472 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001473 CTRL, 0010,
1474 (htt_tlv_filter->mo_ctrl_filter &
1475 FILTER_CTRL_TRIGGER) ? 1 : 0);
1476 /* reserved */
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001477 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001478 CTRL, 0011,
1479 (htt_tlv_filter->mo_ctrl_filter &
1480 FILTER_CTRL_RESERVED_4) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001481 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001482 CTRL, 0100,
1483 (htt_tlv_filter->mo_ctrl_filter &
1484 FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001485 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001486 CTRL, 0101,
1487 (htt_tlv_filter->mo_ctrl_filter &
1488 FILTER_CTRL_VHT_NDP) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001489 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001490 CTRL, 0110,
1491 (htt_tlv_filter->mo_ctrl_filter &
1492 FILTER_CTRL_FRAME_EXT) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001493 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001494 CTRL, 0111,
1495 (htt_tlv_filter->mo_ctrl_filter &
1496 FILTER_CTRL_CTRLWRAP) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001497 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001498 CTRL, 1000,
1499 (htt_tlv_filter->mo_ctrl_filter &
1500 FILTER_CTRL_BA_REQ) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001501 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001502 CTRL, 1001,
1503 (htt_tlv_filter->mo_ctrl_filter &
1504 FILTER_CTRL_BA) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001505 }
1506
1507 /* word 5 */
1508 msg_word++;
1509 *msg_word = 0;
1510 if (htt_tlv_filter->enable_fp) {
nobeljd124b742017-10-16 11:59:12 -07001511 /* TYPE: CTRL */
Kai Chen6eca1a62017-01-12 10:17:53 -08001512 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001513 CTRL, 1010,
1514 (htt_tlv_filter->fp_ctrl_filter &
1515 FILTER_CTRL_PSPOLL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001516 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001517 CTRL, 1011,
1518 (htt_tlv_filter->fp_ctrl_filter &
1519 FILTER_CTRL_RTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001520 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001521 CTRL, 1100,
1522 (htt_tlv_filter->fp_ctrl_filter &
1523 FILTER_CTRL_CTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001524 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001525 CTRL, 1101,
1526 (htt_tlv_filter->fp_ctrl_filter &
1527 FILTER_CTRL_ACK) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001528 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001529 CTRL, 1110,
1530 (htt_tlv_filter->fp_ctrl_filter &
1531 FILTER_CTRL_CFEND) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001532 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001533 CTRL, 1111,
1534 (htt_tlv_filter->fp_ctrl_filter &
1535 FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1536 /* TYPE: DATA */
Kai Chen6eca1a62017-01-12 10:17:53 -08001537 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001538 DATA, MCAST,
1539 (htt_tlv_filter->fp_data_filter &
1540 FILTER_DATA_MCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001541 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001542 DATA, UCAST,
1543 (htt_tlv_filter->fp_data_filter &
1544 FILTER_DATA_UCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001545 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001546 DATA, NULL,
1547 (htt_tlv_filter->fp_data_filter &
1548 FILTER_DATA_NULL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001549 }
1550
1551 if (htt_tlv_filter->enable_md) {
sumedh baikady59a2d332018-05-22 01:50:38 -07001552 /* TYPE: CTRL */
Kai Chen6eca1a62017-01-12 10:17:53 -08001553 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001554 CTRL, 1010,
1555 (htt_tlv_filter->md_ctrl_filter &
1556 FILTER_CTRL_PSPOLL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001557 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001558 CTRL, 1011,
1559 (htt_tlv_filter->md_ctrl_filter &
1560 FILTER_CTRL_RTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001561 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001562 CTRL, 1100,
1563 (htt_tlv_filter->md_ctrl_filter &
1564 FILTER_CTRL_CTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001565 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001566 CTRL, 1101,
1567 (htt_tlv_filter->md_ctrl_filter &
1568 FILTER_CTRL_ACK) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001569 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001570 CTRL, 1110,
1571 (htt_tlv_filter->md_ctrl_filter &
1572 FILTER_CTRL_CFEND) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001573 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001574 CTRL, 1111,
1575 (htt_tlv_filter->md_ctrl_filter &
1576 FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1577 /* TYPE: DATA */
Kai Chen6eca1a62017-01-12 10:17:53 -08001578 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001579 DATA, MCAST,
1580 (htt_tlv_filter->md_data_filter &
1581 FILTER_DATA_MCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001582 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001583 DATA, UCAST,
1584 (htt_tlv_filter->md_data_filter &
1585 FILTER_DATA_UCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001586 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001587 DATA, NULL,
1588 (htt_tlv_filter->md_data_filter &
1589 FILTER_DATA_NULL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001590 }
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001591
Kai Chen6eca1a62017-01-12 10:17:53 -08001592 if (htt_tlv_filter->enable_mo) {
nobeljd124b742017-10-16 11:59:12 -07001593 /* TYPE: CTRL */
Kai Chen6eca1a62017-01-12 10:17:53 -08001594 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001595 CTRL, 1010,
1596 (htt_tlv_filter->mo_ctrl_filter &
1597 FILTER_CTRL_PSPOLL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001598 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001599 CTRL, 1011,
1600 (htt_tlv_filter->mo_ctrl_filter &
1601 FILTER_CTRL_RTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001602 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001603 CTRL, 1100,
1604 (htt_tlv_filter->mo_ctrl_filter &
1605 FILTER_CTRL_CTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001606 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001607 CTRL, 1101,
1608 (htt_tlv_filter->mo_ctrl_filter &
1609 FILTER_CTRL_ACK) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001610 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001611 CTRL, 1110,
1612 (htt_tlv_filter->mo_ctrl_filter &
1613 FILTER_CTRL_CFEND) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001614 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001615 CTRL, 1111,
1616 (htt_tlv_filter->mo_ctrl_filter &
1617 FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1618 /* TYPE: DATA */
Kai Chen6eca1a62017-01-12 10:17:53 -08001619 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001620 DATA, MCAST,
1621 (htt_tlv_filter->mo_data_filter &
1622 FILTER_DATA_MCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001623 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001624 DATA, UCAST,
1625 (htt_tlv_filter->mo_data_filter &
1626 FILTER_DATA_UCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001627 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001628 DATA, NULL,
1629 (htt_tlv_filter->mo_data_filter &
1630 FILTER_DATA_NULL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001631 }
1632
1633 /* word 6 */
1634 msg_word++;
1635 *msg_word = 0;
1636 tlv_filter = 0;
1637 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1638 htt_tlv_filter->mpdu_start);
1639 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1640 htt_tlv_filter->msdu_start);
1641 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1642 htt_tlv_filter->packet);
1643 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1644 htt_tlv_filter->msdu_end);
1645 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1646 htt_tlv_filter->mpdu_end);
1647 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1648 htt_tlv_filter->packet_header);
1649 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
Karunakar Dasineni40555682017-03-26 22:44:39 -07001650 htt_tlv_filter->attention);
Kai Chen6eca1a62017-01-12 10:17:53 -08001651 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1652 htt_tlv_filter->ppdu_start);
1653 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1654 htt_tlv_filter->ppdu_end);
1655 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1656 htt_tlv_filter->ppdu_end_user_stats);
1657 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1658 PPDU_END_USER_STATS_EXT,
1659 htt_tlv_filter->ppdu_end_user_stats_ext);
1660 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1661 htt_tlv_filter->ppdu_end_status_done);
sumedh baikady308ff002017-09-18 16:24:36 -07001662 /* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1663 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1664 htt_tlv_filter->header_per_msdu);
Kai Chen6eca1a62017-01-12 10:17:53 -08001665
1666 HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1667
Kiran Venkatappa07921612019-03-02 23:14:12 +05301668 msg_word++;
1669 *msg_word = 0;
1670 if (htt_tlv_filter->offset_valid) {
1671 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1672 htt_tlv_filter->rx_packet_offset);
1673 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1674 htt_tlv_filter->rx_header_offset);
1675
1676 msg_word++;
1677 *msg_word = 0;
1678 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1679 htt_tlv_filter->rx_mpdu_end_offset);
1680 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1681 htt_tlv_filter->rx_mpdu_start_offset);
1682
1683 msg_word++;
1684 *msg_word = 0;
1685 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1686 htt_tlv_filter->rx_msdu_end_offset);
1687 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1688 htt_tlv_filter->rx_msdu_start_offset);
1689
1690 msg_word++;
1691 *msg_word = 0;
1692 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1693 htt_tlv_filter->rx_attn_offset);
Ruben Columbusb7a1c572019-08-12 11:11:29 -07001694 msg_word++;
1695 *msg_word = 0;
1696 } else {
1697 msg_word += 4;
1698 *msg_word = 0;
Kiran Venkatappa07921612019-03-02 23:14:12 +05301699 }
1700
Ruben Columbusb7a1c572019-08-12 11:11:29 -07001701 if (mon_drop_th > 0)
1702 HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1703 mon_drop_th);
1704
Kai Chen6eca1a62017-01-12 10:17:53 -08001705 /* "response_required" field should be set if a HTT response message is
1706 * required after setting up the ring.
1707 */
1708 pkt = htt_htc_pkt_alloc(soc);
1709 if (!pkt)
1710 goto fail1;
1711
1712 pkt->soc_ctxt = NULL; /* not used during send-done callback */
1713
1714 SET_HTC_PACKET_INFO_TX(
1715 &pkt->htc_pkt,
1716 dp_htt_h2t_send_complete_free_netbuf,
1717 qdf_nbuf_data(htt_msg),
1718 qdf_nbuf_len(htt_msg),
1719 soc->htc_endpoint,
1720 1); /* tag - not relevant here */
1721
1722 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05301723 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1724 htt_logger_bufp);
Kai Chen6eca1a62017-01-12 10:17:53 -08001725 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001726
1727fail1:
1728 qdf_nbuf_free(htt_msg);
1729fail0:
1730 return QDF_STATUS_E_FAILURE;
1731}
1732
Pranita Solanke05862962019-01-09 11:39:29 +05301733#if defined(HTT_STATS_ENABLE)
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301734static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1735 struct dp_soc *soc, qdf_nbuf_t htt_msg)
1736
1737{
1738 uint32_t pdev_id;
1739 uint32_t *msg_word = NULL;
1740 uint32_t msg_remain_len = 0;
1741
1742 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1743
1744 /*COOKIE MSB*/
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301745 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301746
1747 /* stats message length + 16 size of HTT header*/
1748 msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1749 (uint32_t)DP_EXT_MSG_LENGTH);
1750
1751 dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1752 msg_word, msg_remain_len,
1753 WDI_NO_VAL, pdev_id);
1754
1755 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1756 htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1757 }
1758 /* Need to be freed here as WDI handler will
1759 * make a copy of pkt to send data to application
1760 */
1761 qdf_nbuf_free(htt_msg);
1762 return QDF_STATUS_SUCCESS;
1763}
1764#else
Pranita Solanke05862962019-01-09 11:39:29 +05301765static inline QDF_STATUS
1766dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1767 struct dp_soc *soc, qdf_nbuf_t htt_msg)
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301768{
1769 return QDF_STATUS_E_NOSUPPORT;
1770}
1771#endif
Ishank Jain6290a3c2017-03-21 10:49:39 +05301772/**
1773 * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301774 * @htt_stats: htt stats info
Ishank Jain6290a3c2017-03-21 10:49:39 +05301775 *
1776 * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1777 * contains sub messages which are identified by a TLV header.
1778 * In this function we will process the stream of T2H messages and read all the
1779 * TLV contained in the message.
1780 *
1781 * THe following cases have been taken care of
1782 * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1783 * In this case the buffer will contain multiple tlvs.
1784 * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1785 * Only one tlv will be contained in the HTT message and this tag
1786 * will extend onto the next buffer.
1787 * Case 3: When the buffer is the continuation of the previous message
1788 * Case 4: tlv length is 0. which will indicate the end of message
1789 *
1790 * return: void
1791 */
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301792static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1793 struct dp_soc *soc)
Ishank Jain6290a3c2017-03-21 10:49:39 +05301794{
1795 htt_tlv_tag_t tlv_type = 0xff;
1796 qdf_nbuf_t htt_msg = NULL;
1797 uint32_t *msg_word;
1798 uint8_t *tlv_buf_head = NULL;
1799 uint8_t *tlv_buf_tail = NULL;
1800 uint32_t msg_remain_len = 0;
1801 uint32_t tlv_remain_len = 0;
1802 uint32_t *tlv_start;
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301803 int cookie_val;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301804 int cookie_msb;
1805 int pdev_id;
1806 bool copy_stats = false;
1807 struct dp_pdev *pdev;
Ishank Jain6290a3c2017-03-21 10:49:39 +05301808
1809 /* Process node in the HTT message queue */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301810 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1811 != NULL) {
Ishank Jain6290a3c2017-03-21 10:49:39 +05301812 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301813 cookie_val = *(msg_word + 1);
Venkata Sharath Chandra Manchalaf5e25702018-04-26 08:41:04 -07001814 htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1815 *(msg_word +
1816 HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1817
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301818 if (cookie_val) {
1819 if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1820 == QDF_STATUS_SUCCESS) {
1821 continue;
1822 }
1823 }
Venkata Sharath Chandra Manchalae37bc462018-02-22 22:03:27 -08001824
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301825 cookie_msb = *(msg_word + 2);
1826 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1827 pdev = soc->pdev_list[pdev_id];
1828
1829 if (cookie_msb >> 2) {
1830 copy_stats = true;
1831 }
Venkata Sharath Chandra Manchalae37bc462018-02-22 22:03:27 -08001832
Ishank Jain6290a3c2017-03-21 10:49:39 +05301833 /* read 5th word */
1834 msg_word = msg_word + 4;
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301835 msg_remain_len = qdf_min(htt_stats->msg_len,
1836 (uint32_t) DP_EXT_MSG_LENGTH);
Ishank Jain6290a3c2017-03-21 10:49:39 +05301837 /* Keep processing the node till node length is 0 */
1838 while (msg_remain_len) {
1839 /*
1840 * if message is not a continuation of previous message
1841 * read the tlv type and tlv length
1842 */
1843 if (!tlv_buf_head) {
1844 tlv_type = HTT_STATS_TLV_TAG_GET(
1845 *msg_word);
1846 tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1847 *msg_word);
1848 }
1849
1850 if (tlv_remain_len == 0) {
1851 msg_remain_len = 0;
1852
1853 if (tlv_buf_head) {
1854 qdf_mem_free(tlv_buf_head);
1855 tlv_buf_head = NULL;
1856 tlv_buf_tail = NULL;
1857 }
1858
1859 goto error;
1860 }
1861
chenguo26495542017-12-14 21:56:46 +08001862 if (!tlv_buf_head)
1863 tlv_remain_len += HTT_TLV_HDR_LEN;
Ishank Jain6290a3c2017-03-21 10:49:39 +05301864
1865 if ((tlv_remain_len <= msg_remain_len)) {
1866 /* Case 3 */
1867 if (tlv_buf_head) {
1868 qdf_mem_copy(tlv_buf_tail,
1869 (uint8_t *)msg_word,
1870 tlv_remain_len);
1871 tlv_start = (uint32_t *)tlv_buf_head;
1872 } else {
1873 /* Case 1 */
1874 tlv_start = msg_word;
1875 }
1876
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301877 if (copy_stats)
nobelj14531642019-06-25 17:41:55 -07001878 dp_htt_stats_copy_tag(pdev,
1879 tlv_type,
1880 tlv_start);
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301881 else
nobelj14531642019-06-25 17:41:55 -07001882 dp_htt_stats_print_tag(pdev,
1883 tlv_type,
1884 tlv_start);
Ishank Jain6290a3c2017-03-21 10:49:39 +05301885
Amir Patel1ea85d42019-01-09 15:19:10 +05301886 if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1887 tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1888 dp_peer_update_inactive_time(pdev,
1889 tlv_type,
1890 tlv_start);
1891
Ishank Jain6290a3c2017-03-21 10:49:39 +05301892 msg_remain_len -= tlv_remain_len;
1893
1894 msg_word = (uint32_t *)
1895 (((uint8_t *)msg_word) +
1896 tlv_remain_len);
1897
1898 tlv_remain_len = 0;
1899
1900 if (tlv_buf_head) {
1901 qdf_mem_free(tlv_buf_head);
1902 tlv_buf_head = NULL;
1903 tlv_buf_tail = NULL;
1904 }
1905
1906 } else { /* tlv_remain_len > msg_remain_len */
1907 /* Case 2 & 3 */
1908 if (!tlv_buf_head) {
1909 tlv_buf_head = qdf_mem_malloc(
1910 tlv_remain_len);
1911
1912 if (!tlv_buf_head) {
1913 QDF_TRACE(QDF_MODULE_ID_TXRX,
1914 QDF_TRACE_LEVEL_ERROR,
1915 "Alloc failed");
1916 goto error;
1917 }
1918
1919 tlv_buf_tail = tlv_buf_head;
1920 }
1921
1922 qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1923 msg_remain_len);
1924 tlv_remain_len -= msg_remain_len;
1925 tlv_buf_tail += msg_remain_len;
Ishank Jain6290a3c2017-03-21 10:49:39 +05301926 }
1927 }
1928
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301929 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1930 htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
Ishank Jain6290a3c2017-03-21 10:49:39 +05301931 }
1932
1933 qdf_nbuf_free(htt_msg);
1934 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05301935 return;
1936
1937error:
1938 qdf_nbuf_free(htt_msg);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301939 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
Ishank Jain6290a3c2017-03-21 10:49:39 +05301940 != NULL)
1941 qdf_nbuf_free(htt_msg);
1942}
1943
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05301944void htt_t2h_stats_handler(void *context)
1945{
1946 struct dp_soc *soc = (struct dp_soc *)context;
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301947 struct htt_stats_context htt_stats;
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301948 uint32_t *msg_word;
1949 qdf_nbuf_t htt_msg = NULL;
1950 uint8_t done;
Ankit Kumarf3557ff2019-10-12 15:47:23 +05301951 uint32_t rem_stats;
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05301952
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301953 if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1954 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001955 "soc: 0x%pK, init_done: %d", soc,
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301956 qdf_atomic_read(&soc->cmn_init_done));
1957 return;
1958 }
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05301959
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301960 qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1961 qdf_nbuf_queue_init(&htt_stats.msg);
1962
1963 /* pull one completed stats from soc->htt_stats_msg and process */
1964 qdf_spin_lock_bh(&soc->htt_stats.lock);
1965 if (!soc->htt_stats.num_stats) {
1966 qdf_spin_unlock_bh(&soc->htt_stats.lock);
1967 return;
1968 }
1969 while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1970 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1971 msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301972 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1973 qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1974 /*
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301975 * Done bit signifies that this is the last T2H buffer in the
1976 * stream of HTT EXT STATS message
1977 */
1978 if (done)
1979 break;
1980 }
1981 rem_stats = --soc->htt_stats.num_stats;
1982 qdf_spin_unlock_bh(&soc->htt_stats.lock);
1983
Ankit Kumarf3557ff2019-10-12 15:47:23 +05301984 /* If there are more stats to process, schedule stats work again.
1985 * Scheduling prior to processing ht_stats to queue with early
1986 * index
1987 */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301988 if (rem_stats)
1989 qdf_sched_work(0, &soc->htt_stats.work);
Ankit Kumarf3557ff2019-10-12 15:47:23 +05301990
1991 dp_process_htt_stat_msg(&htt_stats, soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05301992}
1993
Soumya Bhat539ecfa2017-09-08 12:50:30 +05301994/*
Anish Natarajb9e7d012018-02-16 00:38:10 +05301995 * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1996 * if a new peer id arrives in a PPDU
Soumya Bhat539ecfa2017-09-08 12:50:30 +05301997 * pdev: DP pdev handle
Anish Natarajb9e7d012018-02-16 00:38:10 +05301998 * @peer_id : peer unique identifier
1999 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302000 *
2001 * return:user index to be populated
2002 */
2003#ifdef FEATURE_PERPKT_INFO
2004static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
Anish Natarajb9e7d012018-02-16 00:38:10 +05302005 uint16_t peer_id,
2006 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302007{
2008 uint8_t user_index = 0;
2009 struct cdp_tx_completion_ppdu *ppdu_desc;
2010 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2011
Anish Natarajb9e7d012018-02-16 00:38:10 +05302012 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302013
Anish Natarajb9e7d012018-02-16 00:38:10 +05302014 while ((user_index + 1) <= ppdu_info->last_user) {
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302015 ppdu_user_desc = &ppdu_desc->user[user_index];
2016 if (ppdu_user_desc->peer_id != peer_id) {
2017 user_index++;
2018 continue;
2019 } else {
Soumya Bhat835033e2017-10-04 22:21:46 +05302020 /* Max users possible is 8 so user array index should
2021 * not exceed 7
2022 */
2023 qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302024 return user_index;
2025 }
2026 }
2027
Anish Natarajb9e7d012018-02-16 00:38:10 +05302028 ppdu_info->last_user++;
Soumya Bhat835033e2017-10-04 22:21:46 +05302029 /* Max users possible is 8 so last user should not exceed 8 */
Anish Natarajb9e7d012018-02-16 00:38:10 +05302030 qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
2031 return ppdu_info->last_user - 1;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302032}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302033
2034/*
2035 * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2036 * pdev: DP pdev handle
2037 * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302038 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302039 *
2040 * return:void
2041 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302042static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
Anish Natarajb9e7d012018-02-16 00:38:10 +05302043 uint32_t *tag_buf, struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302044{
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302045 uint16_t frame_type;
nobelj57055e52019-07-11 00:38:49 -07002046 uint16_t frame_ctrl;
Venkateswara Swamy Bandaru2907bc52017-11-15 19:04:49 +05302047 uint16_t freq;
2048 struct dp_soc *soc = NULL;
Anish Natarajb9e7d012018-02-16 00:38:10 +05302049 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
nobelj182938a2019-11-25 14:09:08 -08002050 uint64_t ppdu_start_timestamp;
2051 uint32_t *start_tag_buf;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302052
nobelj182938a2019-11-25 14:09:08 -08002053 start_tag_buf = tag_buf;
Anish Natarajb9e7d012018-02-16 00:38:10 +05302054 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302055
nobelj182938a2019-11-25 14:09:08 -08002056 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
nobeljdebe2b32019-04-23 11:18:47 -07002057 ppdu_info->sched_cmdid =
2058 HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302059 ppdu_desc->num_users =
2060 HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
nobelj182938a2019-11-25 14:09:08 -08002061
2062 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302063 frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
nobelj68930ca2019-10-03 17:22:47 -07002064 ppdu_desc->htt_frame_type = frame_type;
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302065
nobelj57055e52019-07-11 00:38:49 -07002066 frame_ctrl = ppdu_desc->frame_ctrl;
2067
nobeljdebe2b32019-04-23 11:18:47 -07002068 switch (frame_type) {
2069 case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2070 case HTT_STATS_FTYPE_TIDQ_DATA_MU:
nobelj219e7e52019-10-18 13:53:12 -07002071 case HTT_STATS_FTYPE_SGEN_QOS_NULL:
nobeljdebe2b32019-04-23 11:18:47 -07002072 /*
2073 * for management packet, frame type come as DATA_SU
2074 * need to check frame_ctrl before setting frame_type
2075 */
nobelj57055e52019-07-11 00:38:49 -07002076 if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
nobeljdebe2b32019-04-23 11:18:47 -07002077 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2078 else
2079 ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2080 break;
2081 case HTT_STATS_FTYPE_SGEN_MU_BAR:
2082 case HTT_STATS_FTYPE_SGEN_BAR:
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302083 ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
nobelj68930ca2019-10-03 17:22:47 -07002084 ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
nobeljdebe2b32019-04-23 11:18:47 -07002085 break;
2086 default:
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302087 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
nobeljdebe2b32019-04-23 11:18:47 -07002088 break;
2089 }
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302090
nobelj182938a2019-11-25 14:09:08 -08002091 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302092 ppdu_desc->tx_duration = *tag_buf;
nobelj182938a2019-11-25 14:09:08 -08002093
2094 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302095 ppdu_desc->ppdu_start_timestamp = *tag_buf;
nobelj8c07d612018-03-01 12:18:07 -08002096
nobelj182938a2019-11-25 14:09:08 -08002097 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
Venkateswara Swamy Bandaru2907bc52017-11-15 19:04:49 +05302098 freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2099 if (freq != ppdu_desc->channel) {
2100 soc = pdev->soc;
2101 ppdu_desc->channel = freq;
2102 if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2103 pdev->operating_channel =
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05302104 soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
2105 pdev->pdev_id, freq);
Venkateswara Swamy Bandaru2907bc52017-11-15 19:04:49 +05302106 }
Pranita Solankea12b4b32017-11-20 23:04:14 +05302107
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302108 ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
nobelj4771edb2019-12-09 12:47:41 -08002109
Ruben Columbus1bec34c2019-11-22 11:26:25 -08002110 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RESV_NUM_UL_BEAM);
2111 ppdu_desc->beam_change =
2112 HTT_PPDU_STATS_COMMON_TLV_BEAM_CHANGE_GET(*tag_buf);
2113
nobelj4771edb2019-12-09 12:47:41 -08002114 dp_tx_capture_htt_frame_counter(pdev, frame_type);
nobelj182938a2019-11-25 14:09:08 -08002115
2116 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
2117 ppdu_start_timestamp = *tag_buf;
2118 ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
2119 HTT_SHIFT_UPPER_TIMESTAMP) &
2120 HTT_MASK_UPPER_TIMESTAMP);
2121
2122 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2123 ppdu_desc->tx_duration;
2124 /* Ack time stamp is same as end time stamp*/
2125 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2126
2127 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2128 ppdu_desc->tx_duration;
2129
2130 ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
2131 ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
2132 ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
2133
2134 /* Ack time stamp is same as end time stamp*/
2135 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302136}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302137
2138/*
2139 * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2140 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302141 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302142 *
2143 * return:void
2144 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302145static void dp_process_ppdu_stats_user_common_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302146 struct dp_pdev *pdev, uint32_t *tag_buf,
2147 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302148{
Soumya Bhat835033e2017-10-04 22:21:46 +05302149 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302150 struct cdp_tx_completion_ppdu *ppdu_desc;
2151 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2152 uint8_t curr_user_index = 0;
nobelj68930ca2019-10-03 17:22:47 -07002153 struct dp_peer *peer;
2154 struct dp_vdev *vdev;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302155
Anish Nataraj37b64952018-08-03 22:11:13 +05302156 ppdu_desc =
2157 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302158
2159 tag_buf++;
Anish Nataraj37b64952018-08-03 22:11:13 +05302160 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302161
Anish Nataraj37b64952018-08-03 22:11:13 +05302162 curr_user_index =
2163 dp_get_ppdu_info_user_index(pdev,
2164 peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302165 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2166
Anish Nataraj37b64952018-08-03 22:11:13 +05302167 if (peer_id == DP_SCAN_PEER_ID) {
2168 ppdu_desc->vdev_id =
2169 HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
nobelj68930ca2019-10-03 17:22:47 -07002170 vdev =
2171 dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2172 ppdu_desc->vdev_id);
2173 if (!vdev)
Anish Nataraj37b64952018-08-03 22:11:13 +05302174 return;
nobelj68930ca2019-10-03 17:22:47 -07002175 qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2176 QDF_MAC_ADDR_SIZE);
2177 } else {
2178 peer = dp_peer_find_by_id(pdev->soc, peer_id);
2179 if (!peer)
2180 return;
2181 qdf_mem_copy(ppdu_user_desc->mac_addr,
2182 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2183 dp_peer_unref_del_find_by_id(peer);
Anish Nataraj37b64952018-08-03 22:11:13 +05302184 }
2185
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302186 ppdu_user_desc->peer_id = peer_id;
2187
2188 tag_buf++;
2189
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302190 if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2191 ppdu_user_desc->delayed_ba = 1;
nobelj7b0e2732019-05-31 00:19:07 -07002192 ppdu_desc->delayed_ba = 1;
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302193 }
2194
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302195 if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2196 ppdu_user_desc->is_mcast = true;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302197 ppdu_user_desc->mpdu_tried_mcast =
2198 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
Anish Nataraj0dae6762018-03-02 22:31:45 +05302199 ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302200 } else {
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302201 ppdu_user_desc->mpdu_tried_ucast =
2202 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
Soumya Bhat606fb392017-10-27 12:42:45 +05302203 }
2204
2205 tag_buf++;
2206
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302207 ppdu_user_desc->qos_ctrl =
2208 HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2209 ppdu_user_desc->frame_ctrl =
2210 HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302211 ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302212
nobelj7b0e2732019-05-31 00:19:07 -07002213 if (ppdu_user_desc->delayed_ba)
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302214 ppdu_user_desc->mpdu_success = 0;
Varsha Mishra27c5bd32019-05-28 11:54:46 +05302215
2216 tag_buf += 3;
2217
2218 if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2219 ppdu_user_desc->ppdu_cookie =
2220 HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2221 ppdu_user_desc->is_ppdu_cookie_valid = 1;
2222 }
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302223}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302224
2225
Ishank Jain6290a3c2017-03-21 10:49:39 +05302226/**
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302227 * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2228 * @pdev: DP pdev handle
2229 * @tag_buf: T2H message buffer carrying the user rate TLV
Anish Natarajb9e7d012018-02-16 00:38:10 +05302230 * @ppdu_info: per ppdu tlv structure
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302231 *
2232 * return:void
2233 */
2234static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
Anish Natarajb9e7d012018-02-16 00:38:10 +05302235 uint32_t *tag_buf,
2236 struct ppdu_info *ppdu_info)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302237{
Soumya Bhat835033e2017-10-04 22:21:46 +05302238 uint16_t peer_id;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302239 struct dp_peer *peer;
2240 struct cdp_tx_completion_ppdu *ppdu_desc;
2241 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302242 uint8_t curr_user_index = 0;
Anish Nataraj37b64952018-08-03 22:11:13 +05302243 struct dp_vdev *vdev;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302244
Anish Natarajb9e7d012018-02-16 00:38:10 +05302245 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302246
2247 tag_buf++;
2248 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302249
Anish Nataraj37b64952018-08-03 22:11:13 +05302250 curr_user_index =
2251 dp_get_ppdu_info_user_index(pdev,
2252 peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302253 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Anish Nataraj37b64952018-08-03 22:11:13 +05302254 if (peer_id == DP_SCAN_PEER_ID) {
2255 vdev =
2256 dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2257 ppdu_desc->vdev_id);
Chaithanya Garrepalli3ba616a2018-10-05 19:11:00 +05302258 if (!vdev)
2259 return;
Anish Nataraj37b64952018-08-03 22:11:13 +05302260 } else {
2261 peer = dp_peer_find_by_id(pdev->soc, peer_id);
2262 if (!peer)
2263 return;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302264 dp_peer_unref_del_find_by_id(peer);
Anish Nataraj37b64952018-08-03 22:11:13 +05302265 }
2266
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302267 ppdu_user_desc->peer_id = peer_id;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302268
2269 ppdu_user_desc->tid =
2270 HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302271
nobelj4e9d51f2018-08-07 19:36:47 -07002272 tag_buf += 1;
Soumya Bhat28541112017-11-22 16:58:29 +05302273
nobelj4e9d51f2018-08-07 19:36:47 -07002274 ppdu_user_desc->user_pos =
2275 HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2276 ppdu_user_desc->mu_group_id =
2277 HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2278
2279 tag_buf += 1;
2280
2281 ppdu_user_desc->ru_start =
2282 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
Anish Nataraj37b64952018-08-03 22:11:13 +05302283 ppdu_user_desc->ru_tones =
2284 (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2285 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
Soumya Bhat28541112017-11-22 16:58:29 +05302286
2287 tag_buf += 2;
Soumya Bhat606fb392017-10-27 12:42:45 +05302288
2289 ppdu_user_desc->ppdu_type =
2290 HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2291
2292 tag_buf++;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302293 ppdu_user_desc->tx_rate = *tag_buf;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302294
2295 ppdu_user_desc->ltf_size =
2296 HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2297 ppdu_user_desc->stbc =
2298 HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2299 ppdu_user_desc->he_re =
2300 HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2301 ppdu_user_desc->txbf =
2302 HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2303 ppdu_user_desc->bw =
Keyur Parekhd005ca22018-06-26 11:26:03 -07002304 HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302305 ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2306 ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2307 ppdu_user_desc->preamble =
2308 HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2309 ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2310 ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2311 ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302312}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302313
2314/*
2315 * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2316 * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302317 * pdev: DP PDEV handle
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302318 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302319 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302320 *
2321 * return:void
2322 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302323static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302324 struct dp_pdev *pdev, uint32_t *tag_buf,
2325 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302326{
2327 htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2328 (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2329
2330 struct cdp_tx_completion_ppdu *ppdu_desc;
2331 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2332 uint8_t curr_user_index = 0;
Soumya Bhat835033e2017-10-04 22:21:46 +05302333 uint16_t peer_id;
nobeljdebe2b32019-04-23 11:18:47 -07002334 uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302335
Anish Natarajb9e7d012018-02-16 00:38:10 +05302336 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302337
2338 tag_buf++;
2339
2340 peer_id =
Soumya Bhat835033e2017-10-04 22:21:46 +05302341 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302342
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302343 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302344 return;
2345
Anish Natarajb9e7d012018-02-16 00:38:10 +05302346 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302347
2348 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302349 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302350
2351 ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2352 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
nobeljdebe2b32019-04-23 11:18:47 -07002353 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2354
2355 dp_process_ppdu_stats_update_failed_bitmap(pdev,
2356 (void *)ppdu_user_desc,
2357 ppdu_info->ppdu_id,
2358 size);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302359}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302360
2361/*
2362 * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2363 * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2364 * soc: DP SOC handle
2365 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302366 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302367 *
2368 * return:void
2369 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302370static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302371 struct dp_pdev *pdev, uint32_t *tag_buf,
2372 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302373{
2374 htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2375 (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2376
2377 struct cdp_tx_completion_ppdu *ppdu_desc;
2378 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2379 uint8_t curr_user_index = 0;
Soumya Bhat835033e2017-10-04 22:21:46 +05302380 uint16_t peer_id;
nobeljdebe2b32019-04-23 11:18:47 -07002381 uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302382
Anish Natarajb9e7d012018-02-16 00:38:10 +05302383 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302384
2385 tag_buf++;
2386
2387 peer_id =
Soumya Bhat835033e2017-10-04 22:21:46 +05302388 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302389
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302390 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302391 return;
2392
Anish Natarajb9e7d012018-02-16 00:38:10 +05302393 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302394
2395 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302396 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302397
2398 ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2399 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
nobeljdebe2b32019-04-23 11:18:47 -07002400 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2401
2402 dp_process_ppdu_stats_update_failed_bitmap(pdev,
2403 (void *)ppdu_user_desc,
2404 ppdu_info->ppdu_id,
2405 size);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302406}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302407
2408/*
2409 * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2410 * htt_ppdu_stats_user_cmpltn_common_tlv
2411 * soc: DP SOC handle
2412 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302413 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302414 *
2415 * return:void
2416 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302417static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302418 struct dp_pdev *pdev, uint32_t *tag_buf,
2419 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302420{
Soumya Bhat835033e2017-10-04 22:21:46 +05302421 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302422 struct cdp_tx_completion_ppdu *ppdu_desc;
2423 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2424 uint8_t curr_user_index = 0;
Ankit Kumarcd66fff2019-07-02 20:54:44 +05302425 uint8_t bw_iter;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302426 htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2427 (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2428
Anish Natarajb9e7d012018-02-16 00:38:10 +05302429 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302430
2431 tag_buf++;
2432 peer_id =
2433 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302434
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302435 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302436 return;
2437
Anish Natarajb9e7d012018-02-16 00:38:10 +05302438 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302439 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2440 ppdu_user_desc->peer_id = peer_id;
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05302441 ppdu_desc->last_usr_index = curr_user_index;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302442
2443 ppdu_user_desc->completion_status =
2444 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2445 *tag_buf);
2446
2447 ppdu_user_desc->tid =
2448 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2449
2450
2451 tag_buf++;
Viyom Mittal27fe1232018-11-15 12:24:50 +05302452 if (qdf_likely(ppdu_user_desc->completion_status ==
2453 HTT_PPDU_STATS_USER_STATUS_OK)) {
Anish Nataraj7235d9b2018-08-20 13:10:25 +05302454 ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2455 ppdu_user_desc->ack_rssi_valid = 1;
2456 } else {
2457 ppdu_user_desc->ack_rssi_valid = 0;
2458 }
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302459
2460 tag_buf++;
2461
2462 ppdu_user_desc->mpdu_success =
2463 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2464
phadimanf09509b2019-07-09 14:58:38 +05302465 ppdu_user_desc->mpdu_failed =
2466 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2467 ppdu_user_desc->mpdu_success;
2468
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302469 tag_buf++;
2470
2471 ppdu_user_desc->long_retries =
2472 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2473
2474 ppdu_user_desc->short_retries =
2475 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302476 ppdu_user_desc->retry_msdus =
2477 ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302478
2479 ppdu_user_desc->is_ampdu =
2480 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302481 ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302482
nobelj68930ca2019-10-03 17:22:47 -07002483 ppdu_desc->resp_type =
2484 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2485 ppdu_desc->mprot_type =
2486 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2487 ppdu_desc->rts_success =
2488 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2489 ppdu_desc->rts_failure =
2490 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2491
nobelj7b0e2732019-05-31 00:19:07 -07002492 /*
2493 * increase successful mpdu counter from
2494 * htt_ppdu_stats_user_cmpltn_common_tlv
2495 */
2496 ppdu_info->mpdu_compltn_common_tlv += ppdu_user_desc->mpdu_success;
2497
2498 /*
2499 * MU BAR may send request to n users but we may received ack only from
2500 * m users. To have count of number of users respond back, we have a
2501 * separate counter bar_num_users per PPDU that get increment for every
2502 * htt_ppdu_stats_user_cmpltn_common_tlv
2503 */
2504 ppdu_desc->bar_num_users++;
Ankit Kumarcd66fff2019-07-02 20:54:44 +05302505
2506 tag_buf++;
2507 for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2508 ppdu_user_desc->rssi_chain[bw_iter] =
2509 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2510 tag_buf++;
2511 }
2512
2513 ppdu_user_desc->sa_tx_antenna =
2514 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2515
2516 tag_buf++;
2517 ppdu_user_desc->sa_is_training =
2518 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2519 if (ppdu_user_desc->sa_is_training) {
2520 ppdu_user_desc->sa_goodput =
2521 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2522 }
2523
2524 tag_buf++;
2525 for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2526 ppdu_user_desc->sa_max_rates[bw_iter] =
2527 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2528 }
2529
2530 tag_buf += CDP_NUM_SA_BW;
2531 ppdu_user_desc->current_rate_per =
2532 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302533}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302534
2535/*
2536 * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2537 * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302538 * pdev: DP PDEV handle
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302539 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302540 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302541 *
2542 * return:void
2543 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302544static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302545 struct dp_pdev *pdev, uint32_t *tag_buf,
2546 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302547{
2548 htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2549 (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2550 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2551 struct cdp_tx_completion_ppdu *ppdu_desc;
2552 uint8_t curr_user_index = 0;
Soumya Bhat835033e2017-10-04 22:21:46 +05302553 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302554
Anish Natarajb9e7d012018-02-16 00:38:10 +05302555 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302556
2557 tag_buf++;
2558
2559 peer_id =
2560 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2561
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302562 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302563 return;
2564
Anish Natarajb9e7d012018-02-16 00:38:10 +05302565 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302566
2567 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302568 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302569
2570 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2571 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
nobeljdebe2b32019-04-23 11:18:47 -07002572 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
Karunakar Dasineni63429332019-10-15 18:49:33 -07002573 ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302574}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302575
2576/*
2577 * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2578 * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302579 * pdev: DP PDEV handle
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302580 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302581 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302582 *
2583 * return:void
2584 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302585static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302586 struct dp_pdev *pdev, uint32_t *tag_buf,
2587 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302588{
2589 htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2590 (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2591 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2592 struct cdp_tx_completion_ppdu *ppdu_desc;
2593 uint8_t curr_user_index = 0;
Soumya Bhat835033e2017-10-04 22:21:46 +05302594 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302595
Anish Natarajb9e7d012018-02-16 00:38:10 +05302596 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302597
2598 tag_buf++;
2599
2600 peer_id =
2601 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2602
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302603 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302604 return;
2605
Anish Natarajb9e7d012018-02-16 00:38:10 +05302606 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302607
2608 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302609 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302610
2611 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2612 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
nobeljdebe2b32019-04-23 11:18:47 -07002613 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
Karunakar Dasineni63429332019-10-15 18:49:33 -07002614 ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302615}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302616
2617/*
2618 * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2619 * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302620 * pdev: DP PDE handle
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302621 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302622 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302623 *
2624 * return:void
2625 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302626static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302627 struct dp_pdev *pdev, uint32_t *tag_buf,
2628 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302629{
Soumya Bhat835033e2017-10-04 22:21:46 +05302630 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302631 struct cdp_tx_completion_ppdu *ppdu_desc;
2632 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2633 uint8_t curr_user_index = 0;
2634
Anish Natarajb9e7d012018-02-16 00:38:10 +05302635 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302636
2637 tag_buf += 2;
2638 peer_id =
2639 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2640
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302641 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302642 return;
2643
Anish Natarajb9e7d012018-02-16 00:38:10 +05302644 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302645
2646 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302647 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302648
Soumya Bhat606fb392017-10-27 12:42:45 +05302649 tag_buf++;
nobeljc5cb3bf2019-11-19 14:47:14 -08002650 /* not to update ppdu_desc->tid from this TLV */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302651 ppdu_user_desc->num_mpdu =
Anish Natarajb9e7d012018-02-16 00:38:10 +05302652 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302653
2654 ppdu_user_desc->num_msdu =
Anish Natarajb9e7d012018-02-16 00:38:10 +05302655 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302656
2657 ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2658
Karunakar Dasinenifc8fac52019-09-27 15:15:45 -07002659 tag_buf++;
2660 ppdu_user_desc->start_seq =
2661 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2662 *tag_buf);
2663
2664 tag_buf++;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302665 ppdu_user_desc->success_bytes = *tag_buf;
2666
nobelj7b0e2732019-05-31 00:19:07 -07002667 /* increase successful mpdu counter */
2668 ppdu_info->mpdu_ack_ba_tlv += ppdu_user_desc->num_mpdu;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302669}
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302670
2671/*
2672 * dp_process_ppdu_stats_user_common_array_tlv: Process
2673 * htt_ppdu_stats_user_common_array_tlv
2674 * pdev: DP PDEV handle
2675 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302676 * @ppdu_info: per ppdu tlv structure
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302677 *
2678 * return:void
2679 */
Anish Natarajb9e7d012018-02-16 00:38:10 +05302680static void dp_process_ppdu_stats_user_common_array_tlv(
2681 struct dp_pdev *pdev, uint32_t *tag_buf,
2682 struct ppdu_info *ppdu_info)
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302683{
2684 uint32_t peer_id;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302685 struct cdp_tx_completion_ppdu *ppdu_desc;
2686 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2687 uint8_t curr_user_index = 0;
2688 struct htt_tx_ppdu_stats_info *dp_stats_buf;
2689
Anish Natarajb9e7d012018-02-16 00:38:10 +05302690 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302691
Pranita Solankea12b4b32017-11-20 23:04:14 +05302692 tag_buf++;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302693 dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302694 tag_buf += 3;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302695 peer_id =
2696 HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2697
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302698 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302699 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2700 "Invalid peer");
2701 return;
2702 }
2703
Anish Natarajb9e7d012018-02-16 00:38:10 +05302704 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302705
2706 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2707
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302708 ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2709 ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2710
2711 tag_buf++;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302712
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302713 ppdu_user_desc->success_msdus =
2714 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2715 ppdu_user_desc->retry_bytes =
2716 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2717 tag_buf++;
2718 ppdu_user_desc->failed_msdus =
2719 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302720}
2721
2722/*
2723 * dp_process_ppdu_stats_flush_tlv: Process
2724 * htt_ppdu_stats_flush_tlv
2725 * @pdev: DP PDEV handle
2726 * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
nobelj0e007762019-12-13 12:59:14 -08002727 * @ppdu_info: per ppdu tlv structure
Pranita Solankea12b4b32017-11-20 23:04:14 +05302728 *
2729 * return:void
2730 */
nobelj0e007762019-12-13 12:59:14 -08002731static void
2732dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2733 uint32_t *tag_buf,
2734 struct ppdu_info *ppdu_info)
Pranita Solankea12b4b32017-11-20 23:04:14 +05302735{
nobelj0e007762019-12-13 12:59:14 -08002736 struct cdp_tx_completion_ppdu *ppdu_desc;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302737 uint32_t peer_id;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302738 uint8_t tid;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302739 struct dp_peer *peer;
2740
nobelj0e007762019-12-13 12:59:14 -08002741 ppdu_desc = (struct cdp_tx_completion_ppdu *)
2742 qdf_nbuf_data(ppdu_info->nbuf);
2743 ppdu_desc->is_flush = 1;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302744
2745 tag_buf++;
nobelj0e007762019-12-13 12:59:14 -08002746 ppdu_desc->drop_reason = *tag_buf;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302747
2748 tag_buf++;
nobelj0e007762019-12-13 12:59:14 -08002749 ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2750 ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
2751 ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
2752
2753 tag_buf++;
2754 peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2755 tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2756
2757 ppdu_desc->user[0].peer_id = peer_id;
2758 ppdu_desc->user[0].tid = tid;
2759
2760 ppdu_desc->queue_type =
2761 HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302762
2763 peer = dp_peer_find_by_id(pdev->soc, peer_id);
2764 if (!peer)
2765 return;
2766
nobelj0e007762019-12-13 12:59:14 -08002767 if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2768 DP_STATS_INC(peer,
2769 tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2770 ppdu_desc->num_msdu);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302771 }
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302772
2773 dp_peer_unref_del_find_by_id(peer);
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302774}
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302775
nobeljdebe2b32019-04-23 11:18:47 -07002776#ifndef WLAN_TX_PKT_CAPTURE_ENH
2777/*
2778 * dp_deliver_mgmt_frm: Process
2779 * @pdev: DP PDEV handle
2780 * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2781 *
2782 * return: void
2783 */
Karunakar Dasineni13abde92019-09-10 12:40:41 -07002784void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
nobeljdebe2b32019-04-23 11:18:47 -07002785{
2786 if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2787 dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2788 nbuf, HTT_INVALID_PEER,
2789 WDI_NO_VAL, pdev->pdev_id);
2790 }
2791}
2792#endif
2793
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302794/*
2795 * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2796 * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2797 * @pdev: DP PDEV handle
2798 * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2799 * @length: tlv_length
2800 *
Soumya Bhat51240dc2018-05-24 18:00:57 +05302801 * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302802 */
Soumya Bhat51240dc2018-05-24 18:00:57 +05302803static QDF_STATUS
Soumya Bhat402fe1a2018-03-09 13:04:57 +05302804dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2805 qdf_nbuf_t tag_buf,
Soumya Bhat402fe1a2018-03-09 13:04:57 +05302806 uint32_t ppdu_id)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302807{
Soumya Bhat402fe1a2018-03-09 13:04:57 +05302808 uint32_t *nbuf_ptr;
Soumya Bhat51240dc2018-05-24 18:00:57 +05302809 uint8_t trim_size;
nobeljcf57a9a2019-12-06 14:23:27 -08002810 size_t head_size;
2811 struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
nobelj34f6fe22019-12-27 09:53:04 -08002812 uint32_t *msg_word;
2813 uint32_t tsf_hdr;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302814
Vinay Adella873dc402018-05-28 12:06:34 +05302815 if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
nobeljdebe2b32019-04-23 11:18:47 -07002816 (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
Soumya Bhat51240dc2018-05-24 18:00:57 +05302817 return QDF_STATUS_SUCCESS;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302818
nobelj34f6fe22019-12-27 09:53:04 -08002819 /*
2820 * get timestamp from htt_t2h_ppdu_stats_ind_hdr_t
2821 */
2822 msg_word = (uint32_t *)qdf_nbuf_data(tag_buf);
2823 msg_word = msg_word + 2;
2824 tsf_hdr = *msg_word;
2825
Soumya Bhat51240dc2018-05-24 18:00:57 +05302826 trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2827 HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2828 qdf_nbuf_data(tag_buf));
2829
2830 if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2831 return QDF_STATUS_SUCCESS;
2832
2833 qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2834 pdev->mgmtctrl_frm_info.mgmt_buf_len);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302835
nobeljcf57a9a2019-12-06 14:23:27 -08002836 if (pdev->tx_capture_enabled) {
2837 head_size = sizeof(struct cdp_tx_mgmt_comp_info);
2838 if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
2839 qdf_err("Fail to get headroom h_sz %d h_avail %d\n",
2840 head_size, qdf_nbuf_headroom(tag_buf));
2841 qdf_assert_always(0);
2842 return QDF_STATUS_E_NOMEM;
2843 }
2844 ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
2845 qdf_nbuf_push_head(tag_buf, head_size);
2846 qdf_assert_always(ptr_mgmt_comp_info);
2847 ptr_mgmt_comp_info->ppdu_id = ppdu_id;
2848 ptr_mgmt_comp_info->is_sgen_pkt = true;
nobelj34f6fe22019-12-27 09:53:04 -08002849 ptr_mgmt_comp_info->tx_tsf = tsf_hdr;
nobeljcf57a9a2019-12-06 14:23:27 -08002850 } else {
2851 head_size = sizeof(ppdu_id);
2852 nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
2853 *nbuf_ptr = ppdu_id;
2854 }
Soumya Bhat51240dc2018-05-24 18:00:57 +05302855
Vinay Adella873dc402018-05-28 12:06:34 +05302856 if (pdev->bpr_enable) {
2857 dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2858 tag_buf, HTT_INVALID_PEER,
2859 WDI_NO_VAL, pdev->pdev_id);
2860 }
nobeljdebe2b32019-04-23 11:18:47 -07002861
2862 dp_deliver_mgmt_frm(pdev, tag_buf);
Soumya Bhat51240dc2018-05-24 18:00:57 +05302863
Soumya Bhat51240dc2018-05-24 18:00:57 +05302864 return QDF_STATUS_E_ALREADY;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302865}
2866
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302867/**
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302868 * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
2869 *
2870 * If the TLV length sent as part of PPDU TLV is less that expected size i.e
2871 * size of corresponding data structure, pad the remaining bytes with zeros
2872 * and continue processing the TLVs
2873 *
2874 * @pdev: DP pdev handle
2875 * @tag_buf: TLV buffer
2876 * @tlv_expected_size: Expected size of Tag
2877 * @tlv_len: TLV length received from FW
2878 *
2879 * Return: Pointer to updated TLV
2880 */
2881static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
2882 uint32_t *tag_buf,
2883 uint16_t tlv_expected_size,
2884 uint16_t tlv_len)
2885{
2886 uint32_t *tlv_desc = tag_buf;
2887
2888 qdf_assert_always(tlv_len != 0);
2889
2890 if (tlv_len < tlv_expected_size) {
2891 qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
2892 qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
2893 tlv_desc = pdev->ppdu_tlv_buf;
2894 }
2895
2896 return tlv_desc;
2897}
2898
2899/**
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302900 * dp_process_ppdu_tag(): Function to process the PPDU TLVs
Anish Natarajb9e7d012018-02-16 00:38:10 +05302901 * @pdev: DP pdev handle
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302902 * @tag_buf: TLV buffer
Anish Natarajb9e7d012018-02-16 00:38:10 +05302903 * @tlv_len: length of tlv
2904 * @ppdu_info: per ppdu tlv structure
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302905 *
2906 * return: void
2907 */
2908static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
Anish Natarajb9e7d012018-02-16 00:38:10 +05302909 uint32_t tlv_len, struct ppdu_info *ppdu_info)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302910{
2911 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302912 uint16_t tlv_expected_size;
2913 uint32_t *tlv_desc;
Anish Natarajb9e7d012018-02-16 00:38:10 +05302914
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302915 switch (tlv_type) {
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302916 case HTT_PPDU_STATS_COMMON_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302917 tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
2918 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2919 tlv_expected_size, tlv_len);
2920 dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302921 break;
2922 case HTT_PPDU_STATS_USR_COMMON_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302923 tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
2924 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2925 tlv_expected_size, tlv_len);
2926 dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
2927 ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302928 break;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302929 case HTT_PPDU_STATS_USR_RATE_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302930 tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
2931 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2932 tlv_expected_size, tlv_len);
2933 dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
2934 ppdu_info);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302935 break;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302936 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302937 tlv_expected_size =
2938 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
2939 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2940 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302941 dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302942 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302943 break;
2944 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302945 tlv_expected_size =
2946 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
2947 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2948 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302949 dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302950 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302951 break;
2952 case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302953 tlv_expected_size =
2954 sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
2955 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2956 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302957 dp_process_ppdu_stats_user_cmpltn_common_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302958 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302959 break;
2960 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302961 tlv_expected_size =
2962 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
2963 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2964 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302965 dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302966 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302967 break;
2968 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302969 tlv_expected_size =
2970 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
2971 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2972 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302973 dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302974 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302975 break;
2976 case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302977 tlv_expected_size =
2978 sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
2979 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2980 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302981 dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302982 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302983 break;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302984 case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302985 tlv_expected_size =
2986 sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
2987 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2988 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302989 dp_process_ppdu_stats_user_common_array_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302990 pdev, tlv_desc, ppdu_info);
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302991 break;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302992 case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302993 tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
2994 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2995 tlv_expected_size, tlv_len);
nobelj0e007762019-12-13 12:59:14 -08002996 dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
2997 ppdu_info);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302998 break;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302999 default:
3000 break;
3001 }
3002}
3003
Anish Natarajb9e7d012018-02-16 00:38:10 +05303004/**
nobeljdebe2b32019-04-23 11:18:47 -07003005 * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
Anish Natarajb9e7d012018-02-16 00:38:10 +05303006 * @pdev: DP pdev handle
3007 * @ppdu_info: per PPDU TLV descriptor
3008 *
3009 * return: void
3010 */
nobeljdebe2b32019-04-23 11:18:47 -07003011void
3012dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
3013 struct ppdu_info *ppdu_info)
Anish Natarajb9e7d012018-02-16 00:38:10 +05303014{
3015 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3016 struct dp_peer *peer = NULL;
Amir Patel8ae68792018-10-26 12:42:01 +05303017 uint32_t tlv_bitmap_expected;
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303018 uint32_t tlv_bitmap_default;
nobeljdebe2b32019-04-23 11:18:47 -07003019 uint16_t i;
nobelj7b0e2732019-05-31 00:19:07 -07003020 uint32_t num_users;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303021
3022 ppdu_desc = (struct cdp_tx_completion_ppdu *)
3023 qdf_nbuf_data(ppdu_info->nbuf);
3024
3025 ppdu_desc->num_users = ppdu_info->last_user;
3026 ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3027
Amir Patel8ae68792018-10-26 12:42:01 +05303028 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
nobeljab929fe2019-09-16 15:38:20 -07003029 if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3030 pdev->tx_capture_enabled) {
Amir Patel8ae68792018-10-26 12:42:01 +05303031 if (ppdu_info->is_ampdu)
Amir Patel36a79a62019-01-17 11:23:37 +05303032 tlv_bitmap_expected =
3033 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3034 ppdu_info->tlv_bitmap);
Amir Patel8ae68792018-10-26 12:42:01 +05303035 }
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303036
3037 tlv_bitmap_default = tlv_bitmap_expected;
nobelj7b0e2732019-05-31 00:19:07 -07003038
3039 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3040 num_users = ppdu_desc->bar_num_users;
3041 ppdu_desc->num_users = ppdu_desc->bar_num_users;
3042 } else {
3043 num_users = ppdu_desc->num_users;
3044 }
3045
3046 for (i = 0; i < num_users; i++) {
Anish Natarajb9e7d012018-02-16 00:38:10 +05303047 ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
3048 ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
3049
Amir Patel1de3d3d2018-09-14 11:47:02 +05303050 peer = dp_peer_find_by_id(pdev->soc,
3051 ppdu_desc->user[i].peer_id);
3052 /**
3053 * This check is to make sure peer is not deleted
3054 * after processing the TLVs.
3055 */
3056 if (!peer)
3057 continue;
3058
Amir Patelac7d9462019-03-28 16:16:01 +05303059 ppdu_desc->user[i].cookie = (void *)peer->wlanstats_ctx;
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303060
nobelj7b0e2732019-05-31 00:19:07 -07003061 /*
3062 * different frame like DATA, BAR or CTRL has different
3063 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3064 * receive other tlv in-order/sequential from fw.
3065 * Since ACK_BA_STATUS TLV come from Hardware it is
3066 * asynchronous So we need to depend on some tlv to confirm
3067 * all tlv is received for a ppdu.
3068 * So we depend on both HTT_PPDU_STATS_COMMON_TLV and
nobeljab929fe2019-09-16 15:38:20 -07003069 * ACK_BA_STATUS_TLV. for failure packet we won't get
nobelj7b0e2732019-05-31 00:19:07 -07003070 * ACK_BA_STATUS_TLV.
3071 */
3072 if (!(ppdu_info->tlv_bitmap &
3073 (1 << HTT_PPDU_STATS_COMMON_TLV)) ||
nobeljab929fe2019-09-16 15:38:20 -07003074 (!(ppdu_info->tlv_bitmap &
3075 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3076 (ppdu_desc->user[i].completion_status ==
3077 HTT_PPDU_STATS_USER_STATUS_OK))) {
Amir Patel8ae68792018-10-26 12:42:01 +05303078 dp_peer_unref_del_find_by_id(peer);
3079 continue;
3080 }
nobelj7b0e2732019-05-31 00:19:07 -07003081
Amir Patel23216682019-03-28 18:16:25 +05303082 /**
3083 * Update tx stats for data frames having Qos as well as
3084 * non-Qos data tid
3085 */
Amir Patel318bcb82019-09-30 22:04:17 +05303086
Amir Patel23216682019-03-28 18:16:25 +05303087 if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
nobelj219e7e52019-10-18 13:53:12 -07003088 (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3089 (ppdu_desc->htt_frame_type ==
3090 HTT_STATS_FTYPE_SGEN_QOS_NULL)) &&
nobelj7b0e2732019-05-31 00:19:07 -07003091 (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
Anish Natarajb9e7d012018-02-16 00:38:10 +05303092
Tallapragada Kalyand92f5982019-08-02 18:22:46 +05303093 dp_tx_stats_update(pdev, peer,
nobeljdebe2b32019-04-23 11:18:47 -07003094 &ppdu_desc->user[i],
3095 ppdu_desc->ack_rssi);
Surya Prakash Raajenb9780dd2019-05-13 12:48:31 +05303096 dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303097 }
Amir Patel1de3d3d2018-09-14 11:47:02 +05303098
chenguob21a49a2018-11-19 19:17:12 +08003099 dp_peer_unref_del_find_by_id(peer);
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303100 tlv_bitmap_expected = tlv_bitmap_default;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303101 }
nobeljdebe2b32019-04-23 11:18:47 -07003102}
3103
3104#ifndef WLAN_TX_PKT_CAPTURE_ENH
3105
3106/**
3107 * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3108 * to upper layer
3109 * @pdev: DP pdev handle
3110 * @ppdu_info: per PPDU TLV descriptor
3111 *
3112 * return: void
3113 */
3114static
3115void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3116 struct ppdu_info *ppdu_info)
3117{
3118 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3119 qdf_nbuf_t nbuf;
3120
3121 ppdu_desc = (struct cdp_tx_completion_ppdu *)
3122 qdf_nbuf_data(ppdu_info->nbuf);
3123
3124 dp_ppdu_desc_user_stats_update(pdev, ppdu_info);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303125
3126 /*
3127 * Remove from the list
3128 */
3129 TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3130 nbuf = ppdu_info->nbuf;
3131 pdev->list_depth--;
3132 qdf_mem_free(ppdu_info);
3133
3134 qdf_assert_always(nbuf);
3135
3136 ppdu_desc = (struct cdp_tx_completion_ppdu *)
3137 qdf_nbuf_data(nbuf);
3138
3139 /**
3140 * Deliver PPDU stats only for valid (acked) data frames if
3141 * sniffer mode is not enabled.
3142 * If sniffer mode is enabled, PPDU stats for all frames
3143 * including mgmt/control frames should be delivered to upper layer
3144 */
3145 if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3146 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
3147 nbuf, HTT_INVALID_PEER,
3148 WDI_NO_VAL, pdev->pdev_id);
3149 } else {
3150 if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
3151 ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
3152
3153 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3154 pdev->soc, nbuf, HTT_INVALID_PEER,
3155 WDI_NO_VAL, pdev->pdev_id);
3156 } else
3157 qdf_nbuf_free(nbuf);
3158 }
3159 return;
3160}
3161
nobeljdebe2b32019-04-23 11:18:47 -07003162#endif
3163
Anish Natarajb9e7d012018-02-16 00:38:10 +05303164/**
3165 * dp_get_ppdu_desc(): Function to allocate new PPDU status
3166 * desc for new ppdu id
3167 * @pdev: DP pdev handle
3168 * @ppdu_id: PPDU unique identifier
3169 * @tlv_type: TLV type received
3170 *
3171 * return: ppdu_info per ppdu tlv structure
3172 */
3173static
3174struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3175 uint8_t tlv_type)
3176{
3177 struct ppdu_info *ppdu_info = NULL;
3178
3179 /*
3180 * Find ppdu_id node exists or not
3181 */
3182 TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
3183
3184 if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3185 break;
3186 }
3187 }
3188
3189 if (ppdu_info) {
nobelj23bb63a2018-08-28 16:19:33 -07003190 if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3191 /**
3192 * if we get tlv_type that is already been processed
3193 * for ppdu, that means we got a new ppdu with same
3194 * ppdu id. Hence Flush the older ppdu
3195 * for MUMIMO and OFDMA, In a PPDU we have
3196 * multiple user with same tlv types. tlv bitmap is
3197 * used to check whether SU or MU_MIMO/OFDMA
3198 */
3199 if (!(ppdu_info->tlv_bitmap &
3200 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3201 return ppdu_info;
3202
nobelj7b0e2732019-05-31 00:19:07 -07003203 /**
3204 * apart from ACK BA STATUS TLV rest all comes in order
3205 * so if tlv type not ACK BA STATUS TLV we can deliver
3206 * ppdu_info
3207 */
3208 if (tlv_type ==
3209 HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
3210 return ppdu_info;
3211
Anish Natarajb9e7d012018-02-16 00:38:10 +05303212 dp_ppdu_desc_deliver(pdev, ppdu_info);
nobelj23bb63a2018-08-28 16:19:33 -07003213 } else {
Anish Natarajb9e7d012018-02-16 00:38:10 +05303214 return ppdu_info;
nobelj23bb63a2018-08-28 16:19:33 -07003215 }
Anish Natarajb9e7d012018-02-16 00:38:10 +05303216 }
3217
3218 /**
3219 * Flush the head ppdu descriptor if ppdu desc list reaches max
3220 * threshold
3221 */
3222 if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3223 ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3224 dp_ppdu_desc_deliver(pdev, ppdu_info);
3225 }
3226
3227 /*
3228 * Allocate new ppdu_info node
3229 */
3230 ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3231 if (!ppdu_info)
3232 return NULL;
3233
3234 ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
3235 sizeof(struct cdp_tx_completion_ppdu), 0, 4,
3236 TRUE);
3237 if (!ppdu_info->nbuf) {
3238 qdf_mem_free(ppdu_info);
3239 return NULL;
3240 }
3241
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303242 ppdu_info->ppdu_desc =
3243 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303244 qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
3245 sizeof(struct cdp_tx_completion_ppdu));
3246
3247 if (qdf_nbuf_put_tail(ppdu_info->nbuf,
3248 sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
3249 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3250 "No tailroom for HTT PPDU");
3251 qdf_nbuf_free(ppdu_info->nbuf);
3252 ppdu_info->nbuf = NULL;
3253 ppdu_info->last_user = 0;
3254 qdf_mem_free(ppdu_info);
3255 return NULL;
3256 }
3257
3258 /**
3259 * No lock is needed because all PPDU TLVs are processed in
3260 * same context and this list is updated in same context
3261 */
3262 TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3263 ppdu_info_list_elem);
3264 pdev->list_depth++;
3265 return ppdu_info;
3266}
3267
3268/**
3269 * dp_htt_process_tlv(): Function to process each PPDU TLVs
3270 * @pdev: DP pdev handle
3271 * @htt_t2h_msg: HTT target to host message
3272 *
3273 * return: ppdu_info per ppdu tlv structure
3274 */
3275
3276static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
Soumya Bhat51240dc2018-05-24 18:00:57 +05303277 qdf_nbuf_t htt_t2h_msg)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303278{
3279 uint32_t length;
3280 uint32_t ppdu_id;
3281 uint8_t tlv_type;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303282 uint32_t tlv_length, tlv_bitmap_expected;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303283 uint8_t *tlv_buf;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303284 struct ppdu_info *ppdu_info = NULL;
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303285 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
nobelj7b0e2732019-05-31 00:19:07 -07003286 struct dp_peer *peer;
3287 uint32_t i = 0;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303288
3289 uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3290
3291 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3292
3293 msg_word = msg_word + 1;
3294 ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3295
Anish Natarajb9e7d012018-02-16 00:38:10 +05303296
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303297 msg_word = msg_word + 3;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303298 while (length > 0) {
3299 tlv_buf = (uint8_t *)msg_word;
3300 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3301 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05303302 if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3303 pdev->stats.ppdu_stats_counter[tlv_type]++;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303304
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303305 if (tlv_length == 0)
3306 break;
3307
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303308 tlv_length += HTT_TLV_HDR_LEN;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303309
Anish Natarajb9e7d012018-02-16 00:38:10 +05303310 /**
3311 * Not allocating separate ppdu descriptor for MGMT Payload
3312 * TLV as this is sent as separate WDI indication and it
3313 * doesn't contain any ppdu information
3314 */
3315 if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
Soumya Bhat51240dc2018-05-24 18:00:57 +05303316 pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
Soumya Bhat51240dc2018-05-24 18:00:57 +05303317 pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
Chaithanya Garrepallib8db1bd2019-07-15 16:03:57 +05303318 pdev->mgmtctrl_frm_info.mgmt_buf_len =
3319 HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3320 (*(msg_word + 1));
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303321 msg_word =
3322 (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303323 length -= (tlv_length);
Soumya Bhat51240dc2018-05-24 18:00:57 +05303324 continue;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303325 }
3326
3327 ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
3328 if (!ppdu_info)
3329 return NULL;
Ruben Columbus1bec34c2019-11-22 11:26:25 -08003330 ppdu_info->ppdu_desc->bss_color =
3331 pdev->rx_mon_recv_status.bsscolor;
3332
Anish Natarajb9e7d012018-02-16 00:38:10 +05303333 ppdu_info->ppdu_id = ppdu_id;
3334 ppdu_info->tlv_bitmap |= (1 << tlv_type);
3335
3336 dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
3337
3338 /**
3339 * Increment pdev level tlv count to monitor
3340 * missing TLVs
3341 */
3342 pdev->tlv_count++;
3343 ppdu_info->last_tlv_cnt = pdev->tlv_count;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303344 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3345 length -= (tlv_length);
3346 }
Anish Natarajb9e7d012018-02-16 00:38:10 +05303347
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303348 if (!ppdu_info)
3349 return NULL;
3350
Soumya Bhat1df94642018-01-31 15:38:21 +05303351 pdev->last_ppdu_id = ppdu_id;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303352
3353 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3354
nobeljab929fe2019-09-16 15:38:20 -07003355 if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3356 pdev->tx_capture_enabled) {
Anish Natarajb9e7d012018-02-16 00:38:10 +05303357 if (ppdu_info->is_ampdu)
Amir Patel36a79a62019-01-17 11:23:37 +05303358 tlv_bitmap_expected =
3359 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3360 ppdu_info->tlv_bitmap);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303361 }
3362
nobelj7b0e2732019-05-31 00:19:07 -07003363 ppdu_desc = ppdu_info->ppdu_desc;
3364
3365 if (!ppdu_desc)
3366 return NULL;
3367
3368 if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
3369 HTT_PPDU_STATS_USER_STATUS_OK) {
3370 tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
3371 }
3372
3373 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3374 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV)) &&
3375 ppdu_desc->delayed_ba) {
3376 for (i = 0; i < ppdu_desc->num_users; i++) {
3377 uint32_t ppdu_id;
3378
3379 ppdu_id = ppdu_desc->ppdu_id;
3380 peer = dp_peer_find_by_id(pdev->soc,
3381 ppdu_desc->user[i].peer_id);
3382 /**
3383 * This check is to make sure peer is not deleted
3384 * after processing the TLVs.
3385 */
3386 if (!peer)
3387 continue;
3388
3389 /**
3390 * save delayed ba user info
3391 */
3392 if (ppdu_desc->user[i].delayed_ba) {
3393 dp_peer_copy_delay_stats(peer,
3394 &ppdu_desc->user[i]);
3395 peer->last_delayed_ba_ppduid = ppdu_id;
3396 }
3397 dp_peer_unref_del_find_by_id(peer);
3398 }
3399 }
3400
3401 /*
3402 * when frame type is BAR and STATS_COMMON_TLV is set
3403 * copy the store peer delayed info to BAR status
3404 */
3405 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
3406 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) {
3407 for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3408 peer = dp_peer_find_by_id(pdev->soc,
3409 ppdu_desc->user[i].peer_id);
3410 /**
3411 * This check is to make sure peer is not deleted
3412 * after processing the TLVs.
3413 */
3414 if (!peer)
3415 continue;
3416
3417 if (peer->last_delayed_ba) {
3418 dp_peer_copy_stats_to_bar(peer,
3419 &ppdu_desc->user[i]);
nobelj68930ca2019-10-03 17:22:47 -07003420 ppdu_desc->bar_ppdu_id = ppdu_desc->ppdu_id;
3421 ppdu_desc->ppdu_id =
3422 peer->last_delayed_ba_ppduid;
nobelj7b0e2732019-05-31 00:19:07 -07003423 }
3424 dp_peer_unref_del_find_by_id(peer);
3425 }
3426 }
3427
3428 /*
3429 * for frame type DATA and BAR, we update stats based on MSDU,
3430 * successful msdu and mpdu are populate from ACK BA STATUS TLV
3431 * which comes out of order. successful mpdu also populated from
3432 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
3433 * we store successful mpdu from both tlv and compare before delivering
nobelj219e7e52019-10-18 13:53:12 -07003434 * to make sure we received ACK BA STATUS TLV. For some self generated
3435 * frame we won't get ack ba status tlv so no need to wait for
3436 * ack ba status tlv.
nobelj7b0e2732019-05-31 00:19:07 -07003437 */
nobelj219e7e52019-10-18 13:53:12 -07003438 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
3439 ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
nobelj7b0e2732019-05-31 00:19:07 -07003440 /*
3441 * successful mpdu count should match with both tlv
3442 */
3443 if (ppdu_info->mpdu_compltn_common_tlv !=
3444 ppdu_info->mpdu_ack_ba_tlv)
3445 return NULL;
3446 }
3447
Anish Natarajb9e7d012018-02-16 00:38:10 +05303448 /**
3449 * Once all the TLVs for a given PPDU has been processed,
nobeljab929fe2019-09-16 15:38:20 -07003450 * return PPDU status to be delivered to higher layer.
3451 * tlv_bitmap_expected can't be available for different frame type.
3452 * But STATS COMMON TLV is the last TLV from the FW for a ppdu.
3453 * apart from ACK BA TLV, FW sends other TLV in sequential order.
nobelj0e007762019-12-13 12:59:14 -08003454 * flush tlv comes separate.
Anish Natarajb9e7d012018-02-16 00:38:10 +05303455 */
nobelj0e007762019-12-13 12:59:14 -08003456 if ((ppdu_info->tlv_bitmap != 0 &&
3457 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) ||
3458 (ppdu_info->tlv_bitmap &
3459 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV)))
Anish Natarajb9e7d012018-02-16 00:38:10 +05303460 return ppdu_info;
3461
3462 return NULL;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303463}
Soumya Bhat1c73aa62017-09-20 22:18:22 +05303464#endif /* FEATURE_PERPKT_INFO */
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303465
3466/**
3467 * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
3468 * @soc: DP SOC handle
3469 * @pdev_id: pdev id
3470 * @htt_t2h_msg: HTT message nbuf
3471 *
3472 * return:void
3473 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003474#if defined(WDI_EVENT_ENABLE)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303475#ifdef FEATURE_PERPKT_INFO
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303476static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3477 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303478{
3479 struct dp_pdev *pdev = soc->pdev_list[pdev_id];
Anish Natarajb9e7d012018-02-16 00:38:10 +05303480 struct ppdu_info *ppdu_info = NULL;
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303481 bool free_buf = true;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303482
Nandha Kishore Easwarandd685082018-06-22 18:54:08 +05303483 if (!pdev)
3484 return true;
3485
Soumya Bhat89647ef2017-11-16 17:23:48 +05303486 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
Alok Singh40a622b2018-06-28 10:47:26 +05303487 !pdev->mcopy_mode && !pdev->bpr_enable)
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303488 return free_buf;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303489
Soumya Bhat51240dc2018-05-24 18:00:57 +05303490 ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
3491
3492 if (pdev->mgmtctrl_frm_info.mgmt_buf) {
3493 if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
3494 (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
3495 QDF_STATUS_SUCCESS)
3496 free_buf = false;
3497 }
3498
Anish Natarajb9e7d012018-02-16 00:38:10 +05303499 if (ppdu_info)
3500 dp_ppdu_desc_deliver(pdev, ppdu_info);
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303501
nobelj2a1312c2019-06-20 23:45:43 -07003502 pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
3503 pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
3504 pdev->mgmtctrl_frm_info.ppdu_id = 0;
3505
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303506 return free_buf;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303507}
3508#else
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303509static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3510 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303511{
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303512 return true;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303513}
3514#endif
3515#endif
3516
3517/**
3518 * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
Ishank Jain6290a3c2017-03-21 10:49:39 +05303519 * @soc: DP SOC handle
3520 * @htt_t2h_msg: HTT message nbuf
3521 *
3522 * return:void
3523 */
3524static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
3525 qdf_nbuf_t htt_t2h_msg)
3526{
Ishank Jain6290a3c2017-03-21 10:49:39 +05303527 uint8_t done;
3528 qdf_nbuf_t msg_copy;
3529 uint32_t *msg_word;
3530
3531 msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3532 msg_word = msg_word + 3;
Ishank Jain6290a3c2017-03-21 10:49:39 +05303533 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
3534
3535 /*
3536 * HTT EXT stats response comes as stream of TLVs which span over
3537 * multiple T2H messages.
3538 * The first message will carry length of the response.
3539 * For rest of the messages length will be zero.
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303540 *
Ishank Jain6290a3c2017-03-21 10:49:39 +05303541 * Clone the T2H message buffer and store it in a list to process
3542 * it later.
3543 *
3544 * The original T2H message buffers gets freed in the T2H HTT event
3545 * handler
3546 */
3547 msg_copy = qdf_nbuf_clone(htt_t2h_msg);
3548
3549 if (!msg_copy) {
3550 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3551 "T2H messge clone failed for HTT EXT STATS");
Ishank Jain6290a3c2017-03-21 10:49:39 +05303552 goto error;
3553 }
3554
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303555 qdf_spin_lock_bh(&soc->htt_stats.lock);
3556 qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
Ishank Jain6290a3c2017-03-21 10:49:39 +05303557 /*
3558 * Done bit signifies that this is the last T2H buffer in the stream of
3559 * HTT EXT STATS message
3560 */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303561 if (done) {
3562 soc->htt_stats.num_stats++;
3563 qdf_sched_work(0, &soc->htt_stats.work);
3564 }
3565 qdf_spin_unlock_bh(&soc->htt_stats.lock);
Ishank Jain6290a3c2017-03-21 10:49:39 +05303566
3567 return;
3568
3569error:
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303570 qdf_spin_lock_bh(&soc->htt_stats.lock);
3571 while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
Ishank Jain6290a3c2017-03-21 10:49:39 +05303572 != NULL) {
3573 qdf_nbuf_free(msg_copy);
3574 }
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303575 soc->htt_stats.num_stats = 0;
3576 qdf_spin_unlock_bh(&soc->htt_stats.lock);
Ishank Jain6290a3c2017-03-21 10:49:39 +05303577 return;
3578
3579}
3580
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003581/*
3582 * htt_soc_attach_target() - SOC level HTT setup
3583 * @htt_soc: HTT SOC handle
3584 *
3585 * Return: 0 on success; error code on failure
3586 */
Akshay Kosigia5c46a42019-06-27 12:43:01 +05303587int htt_soc_attach_target(struct htt_soc *htt_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003588{
3589 struct htt_soc *soc = (struct htt_soc *)htt_soc;
3590
3591 return htt_h2t_ver_req_msg(soc);
3592}
3593
Akshay Kosigi383b6d52019-07-12 12:24:30 +05303594void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
3595{
3596 htt_soc->htc_soc = htc_soc;
3597}
3598
3599HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
3600{
3601 return htt_soc->htc_soc;
3602}
3603
3604struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
3605{
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003606 int i;
3607 int j;
3608 int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
Akshay Kosigi383b6d52019-07-12 12:24:30 +05303609 struct htt_soc *htt_soc = NULL;
3610
3611 htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
3612 if (!htt_soc) {
3613 dp_err("HTT attach failed");
3614 return NULL;
3615 }
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003616
3617 for (i = 0; i < MAX_PDEV_CNT; i++) {
3618 htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
3619 if (!htt_soc->pdevid_tt[i].umac_ttt)
3620 break;
3621 qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
3622 htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
3623 if (!htt_soc->pdevid_tt[i].lmac_ttt) {
3624 qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3625 break;
3626 }
3627 qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
3628 }
3629 if (i != MAX_PDEV_CNT) {
3630 for (j = 0; j < i; j++) {
3631 qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3632 qdf_mem_free(htt_soc->pdevid_tt[i].lmac_ttt);
3633 }
3634 return NULL;
3635 }
3636
Akshay Kosigi383b6d52019-07-12 12:24:30 +05303637 htt_soc->dp_soc = soc;
3638 htt_soc->htc_soc = htc_handle;
3639 HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
3640
3641 return htt_soc;
3642}
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003643
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003644#if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
3645/*
3646 * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
3647 * @htt_soc: HTT SOC handle
3648 * @msg_word: Pointer to payload
3649 * @htt_t2h_msg: HTT msg nbuf
3650 *
Kiran Venkatappad1a16872018-04-24 18:39:09 +05303651 * Return: True if buffer should be freed by caller.
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003652 */
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303653static bool
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003654dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3655 uint32_t *msg_word,
3656 qdf_nbuf_t htt_t2h_msg)
3657{
3658 u_int8_t pdev_id;
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303659 bool free_buf;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003660 qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
Keyur Parekh73554f92018-01-05 12:01:10 -08003661 pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003662 pdev_id = DP_HW2SW_MACID(pdev_id);
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303663 free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
3664 htt_t2h_msg);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003665 dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
3666 htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
3667 pdev_id);
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303668 return free_buf;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003669}
3670#else
Kiran Venkatappad1a16872018-04-24 18:39:09 +05303671static bool
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003672dp_ppdu_stats_ind_handler(struct htt_soc *soc,
Kiran Venkatappad1a16872018-04-24 18:39:09 +05303673 uint32_t *msg_word,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003674 qdf_nbuf_t htt_t2h_msg)
3675{
Kiran Venkatappad1a16872018-04-24 18:39:09 +05303676 return true;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003677}
3678#endif
3679
3680#if defined(WDI_EVENT_ENABLE) && \
Venkata Sharath Chandra Manchala0ce469e2018-10-28 21:01:23 -07003681 !defined(REMOVE_PKT_LOG)
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003682/*
3683 * dp_pktlog_msg_handler() - Pktlog msg handler
3684 * @htt_soc: HTT SOC handle
3685 * @msg_word: Pointer to payload
3686 *
3687 * Return: None
3688 */
3689static void
3690dp_pktlog_msg_handler(struct htt_soc *soc,
Venkata Sharath Chandra Manchala0ce469e2018-10-28 21:01:23 -07003691 uint32_t *msg_word)
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003692{
3693 uint8_t pdev_id;
3694 uint32_t *pl_hdr;
Venkata Sharath Chandra Manchala96e36332018-12-10 16:42:31 -08003695
Keyur Parekh73554f92018-01-05 12:01:10 -08003696 pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003697 pdev_id = DP_HW2SW_MACID(pdev_id);
3698 pl_hdr = (msg_word + 1);
3699 dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
3700 pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
3701 pdev_id);
3702}
3703#else
3704static void
3705dp_pktlog_msg_handler(struct htt_soc *soc,
Venkata Sharath Chandra Manchala0ce469e2018-10-28 21:01:23 -07003706 uint32_t *msg_word)
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003707{
3708}
3709#endif
Ruben Columbus43194932019-05-24 09:56:52 -07003710
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003711/*
3712 * time_allow_print() - time allow print
3713 * @htt_ring_tt: ringi_id array of timestamps
3714 * @ring_id: ring_id (index)
3715 *
3716 * Return: 1 for successfully saving timestamp in array
3717 * and 0 for timestamp falling within 2 seconds after last one
3718 */
3719static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
3720{
3721 unsigned long tstamp;
3722 unsigned long delta;
3723
3724 tstamp = qdf_get_system_timestamp();
3725
3726 if (!htt_ring_tt)
3727 return 0; //unable to print backpressure messages
3728
3729 if (htt_ring_tt[ring_id] == -1) {
3730 htt_ring_tt[ring_id] = tstamp;
3731 return 1;
3732 }
3733 delta = tstamp - htt_ring_tt[ring_id];
3734 if (delta >= 2000) {
3735 htt_ring_tt[ring_id] = tstamp;
3736 return 1;
3737 }
3738
3739 return 0;
3740}
3741
Ruben Columbus43194932019-05-24 09:56:52 -07003742static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
3743 u_int8_t pdev_id, u_int8_t ring_id,
3744 u_int16_t hp_idx, u_int16_t tp_idx,
3745 u_int32_t bkp_time, char *ring_stype)
3746{
3747 dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
3748 msg_type, pdev_id, ring_stype);
3749 dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
3750 ring_id, hp_idx, tp_idx, bkp_time);
3751}
3752
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003753/*
3754 * dp_htt_bkp_event_alert() - htt backpressure event alert
3755 * @msg_word: htt packet context
3756 * @htt_soc: HTT SOC handle
3757 *
3758 * Return: after attempting to print stats
3759 */
3760static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
Ruben Columbus43194932019-05-24 09:56:52 -07003761{
3762 u_int8_t ring_type;
3763 u_int8_t pdev_id;
3764 u_int8_t ring_id;
3765 u_int16_t hp_idx;
3766 u_int16_t tp_idx;
3767 u_int32_t bkp_time;
3768 enum htt_t2h_msg_type msg_type;
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003769 struct dp_soc *dpsoc;
3770 struct dp_pdev *pdev;
3771 struct dp_htt_timestamp *radio_tt;
Ruben Columbus43194932019-05-24 09:56:52 -07003772
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003773 if (!soc)
3774 return;
3775
3776 dpsoc = (struct dp_soc *)soc->dp_soc;
Ruben Columbus43194932019-05-24 09:56:52 -07003777 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3778 ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
3779 pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
3780 pdev_id = DP_HW2SW_MACID(pdev_id);
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003781 pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
Ruben Columbus43194932019-05-24 09:56:52 -07003782 ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
3783 hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
3784 tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
3785 bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003786 radio_tt = &soc->pdevid_tt[pdev_id];
Ruben Columbus43194932019-05-24 09:56:52 -07003787
3788 switch (ring_type) {
3789 case HTT_SW_RING_TYPE_UMAC:
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003790 if (!time_allow_print(radio_tt->umac_ttt, ring_id))
3791 return;
Ruben Columbus43194932019-05-24 09:56:52 -07003792 dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
Jeffin Mammen084f9362019-11-08 10:07:27 +05303793 bkp_time, "HTT_SW_RING_TYPE_UMAC");
Ruben Columbus43194932019-05-24 09:56:52 -07003794 break;
3795 case HTT_SW_RING_TYPE_LMAC:
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003796 if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
3797 return;
Ruben Columbus43194932019-05-24 09:56:52 -07003798 dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3799 bkp_time, "HTT_SW_RING_TYPE_LMAC");
3800 break;
Ruben Columbus43194932019-05-24 09:56:52 -07003801 default:
3802 dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3803 bkp_time, "UNKNOWN");
3804 break;
3805 }
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003806
3807 dp_print_ring_stats(pdev);
3808 dp_print_napi_stats(pdev->soc);
Ruben Columbus43194932019-05-24 09:56:52 -07003809}
3810
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003811/*
3812 * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3813 * @context: Opaque context (HTT SOC handle)
3814 * @pkt: HTC packet
3815 */
3816static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3817{
3818 struct htt_soc *soc = (struct htt_soc *) context;
3819 qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3820 u_int32_t *msg_word;
3821 enum htt_t2h_msg_type msg_type;
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303822 bool free_buf = true;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003823
3824 /* check for successful message reception */
Rakesh Pillai13146452017-06-22 12:52:31 +05303825 if (pkt->Status != QDF_STATUS_SUCCESS) {
3826 if (pkt->Status != QDF_STATUS_E_CANCELED)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003827 soc->stats.htc_err_cnt++;
3828
3829 qdf_nbuf_free(htt_t2h_msg);
3830 return;
3831 }
3832
3833 /* TODO: Check if we should pop the HTC/HTT header alignment padding */
3834
3835 msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3836 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05303837 htt_event_record(soc->htt_logger_handle,
3838 msg_type, (uint8_t *)msg_word);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003839 switch (msg_type) {
Ruben Columbus43194932019-05-24 09:56:52 -07003840 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3841 {
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003842 dp_htt_bkp_event_alert(msg_word, soc);
Ruben Columbus43194932019-05-24 09:56:52 -07003843 break;
3844 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003845 case HTT_T2H_MSG_TYPE_PEER_MAP:
3846 {
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08003847 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003848 u_int8_t *peer_mac_addr;
3849 u_int16_t peer_id;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303850 u_int16_t hw_peer_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003851 u_int8_t vdev_id;
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303852 u_int8_t is_wds;
3853 struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003854
3855 peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303856 hw_peer_id =
3857 HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003858 vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3859 peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3860 (u_int8_t *) (msg_word+1),
3861 &mac_addr_deswizzle_buf[0]);
Pramod Simhab17d0672017-03-06 17:20:13 -08003862 QDF_TRACE(QDF_MODULE_ID_TXRX,
3863 QDF_TRACE_LEVEL_INFO,
3864 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3865 peer_id, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003866
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303867 /*
3868 * check if peer already exists for this peer_id, if so
3869 * this peer map event is in response for a wds peer add
3870 * wmi command sent during wds source port learning.
3871 * in this case just add the ast entry to the existing
3872 * peer ast_list.
3873 */
3874 is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303875 dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303876 vdev_id, peer_mac_addr, 0,
3877 is_wds);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003878 break;
3879 }
3880 case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3881 {
3882 u_int16_t peer_id;
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303883 u_int8_t vdev_id;
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08003884 u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003885 peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303886 vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003887
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303888 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05303889 vdev_id, mac_addr, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003890 break;
3891 }
3892 case HTT_T2H_MSG_TYPE_SEC_IND:
3893 {
3894 u_int16_t peer_id;
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07003895 enum cdp_sec_type sec_type;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003896 int is_unicast;
3897
3898 peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3899 sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3900 is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3901 /* point to the first part of the Michael key */
3902 msg_word++;
3903 dp_rx_sec_ind_handler(
3904 soc->dp_soc, peer_id, sec_type, is_unicast,
3905 msg_word, msg_word + 2);
3906 break;
3907 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003908
Keyur Parekhfad6d082017-05-07 08:54:47 -07003909 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3910 {
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303911 free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
3912 htt_t2h_msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003913 break;
3914 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003915
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07003916 case HTT_T2H_MSG_TYPE_PKTLOG:
3917 {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003918 dp_pktlog_msg_handler(soc, msg_word);
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07003919 break;
3920 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003921
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003922 case HTT_T2H_MSG_TYPE_VERSION_CONF:
3923 {
Yue Ma245b47b2017-02-21 16:35:31 -08003924 htc_pm_runtime_put(soc->htc_soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003925 soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3926 soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3927 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05303928 "target uses HTT version %d.%d; host uses %d.%d",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003929 soc->tgt_ver.major, soc->tgt_ver.minor,
3930 HTT_CURRENT_VERSION_MAJOR,
3931 HTT_CURRENT_VERSION_MINOR);
3932 if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3933 QDF_TRACE(QDF_MODULE_ID_TXRX,
3934 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303935 "*** Incompatible host/target HTT versions!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003936 }
3937 /* abort if the target is incompatible with the host */
3938 qdf_assert(soc->tgt_ver.major ==
3939 HTT_CURRENT_VERSION_MAJOR);
3940 if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3941 QDF_TRACE(QDF_MODULE_ID_TXRX,
3942 QDF_TRACE_LEVEL_WARN,
3943 "*** Warning: host/target HTT versions"
Aditya Sathishded018e2018-07-02 16:25:21 +05303944 " are different, though compatible!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003945 }
3946 break;
3947 }
Pramod Simhab17d0672017-03-06 17:20:13 -08003948 case HTT_T2H_MSG_TYPE_RX_ADDBA:
3949 {
3950 uint16_t peer_id;
3951 uint8_t tid;
3952 uint8_t win_sz;
3953 uint16_t status;
3954 struct dp_peer *peer;
3955
3956 /*
3957 * Update REO Queue Desc with new values
3958 */
3959 peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3960 tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3961 win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3962 peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3963
Pramod Simha30e81352017-03-27 10:03:31 -07003964 /*
3965 * Window size needs to be incremented by 1
3966 * since fw needs to represent a value of 256
3967 * using just 8 bits
3968 */
Pramod Simhab17d0672017-03-06 17:20:13 -08003969 if (peer) {
Pavankumar Nandeshwara2347162019-12-18 23:20:31 +05303970 status = dp_addba_requestprocess_wifi3(
3971 (struct cdp_soc_t *)soc->dp_soc,
3972 peer->mac_addr.raw, peer->vdev->vdev_id,
3973 0, tid, 0, win_sz + 1, 0xffff);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303974
3975 /*
3976 * If PEER_LOCK_REF_PROTECT enbled dec ref
3977 * which is inc by dp_peer_find_by_id
3978 */
3979 dp_peer_unref_del_find_by_id(peer);
3980
Pramod Simhab17d0672017-03-06 17:20:13 -08003981 QDF_TRACE(QDF_MODULE_ID_TXRX,
3982 QDF_TRACE_LEVEL_INFO,
Aditya Sathishded018e2018-07-02 16:25:21 +05303983 FL("PeerID %d BAW %d TID %d stat %d"),
Pramod Simhab17d0672017-03-06 17:20:13 -08003984 peer_id, win_sz, tid, status);
3985
3986 } else {
3987 QDF_TRACE(QDF_MODULE_ID_TXRX,
3988 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303989 FL("Peer not found peer id %d"),
Pramod Simhab17d0672017-03-06 17:20:13 -08003990 peer_id);
3991 }
3992 break;
3993 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05303994 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3995 {
3996 dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3997 break;
3998 }
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303999 case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
4000 {
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004001 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05304002 u_int8_t *peer_mac_addr;
4003 u_int16_t peer_id;
4004 u_int16_t hw_peer_id;
4005 u_int8_t vdev_id;
4006 bool is_wds;
4007 u_int16_t ast_hash;
4008
4009 peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
4010 hw_peer_id =
4011 HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
4012 vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
4013 peer_mac_addr =
4014 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4015 &mac_addr_deswizzle_buf[0]);
4016 is_wds =
4017 HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
4018 ast_hash =
4019 HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
4020 QDF_TRACE(QDF_MODULE_ID_TXRX,
4021 QDF_TRACE_LEVEL_INFO,
4022 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4023 peer_id, vdev_id);
4024
4025 dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4026 hw_peer_id, vdev_id,
4027 peer_mac_addr, ast_hash,
4028 is_wds);
4029 break;
4030 }
4031 case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
4032 {
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004033 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304034 u_int8_t *mac_addr;
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05304035 u_int16_t peer_id;
4036 u_int8_t vdev_id;
4037 u_int8_t is_wds;
4038
4039 peer_id =
4040 HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
4041 vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304042 mac_addr =
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05304043 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4044 &mac_addr_deswizzle_buf[0]);
4045 is_wds =
4046 HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
4047 QDF_TRACE(QDF_MODULE_ID_TXRX,
4048 QDF_TRACE_LEVEL_INFO,
chenguo26c518a2019-01-07 13:47:51 +08004049 "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05304050 peer_id, vdev_id);
4051
4052 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304053 vdev_id, mac_addr,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05304054 is_wds);
4055 break;
4056 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004057 default:
4058 break;
4059 };
4060
4061 /* Free the indication buffer */
Soumya Bhat402fe1a2018-03-09 13:04:57 +05304062 if (free_buf)
4063 qdf_nbuf_free(htt_t2h_msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004064}
4065
4066/*
4067 * dp_htt_h2t_full() - Send full handler (called from HTC)
4068 * @context: Opaque context (HTT SOC handle)
4069 * @pkt: HTC packet
4070 *
Manikandan Mohan50ec7042017-04-19 11:37:47 -07004071 * Return: enum htc_send_full_action
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004072 */
Manikandan Mohan50ec7042017-04-19 11:37:47 -07004073static enum htc_send_full_action
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004074dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4075{
4076 return HTC_SEND_FULL_KEEP;
4077}
4078
4079/*
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07004080 * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
4081 * @context: Opaque context (HTT SOC handle)
4082 * @nbuf: nbuf containing T2H message
4083 * @pipe_id: HIF pipe ID
4084 *
4085 * Return: QDF_STATUS
4086 *
4087 * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07004088 * will be used for packet log and other high-priority HTT messages. Proper
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07004089 * HTC connection to be added later once required FW changes are available
4090 */
4091static QDF_STATUS
4092dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4093{
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07004094 QDF_STATUS rc = QDF_STATUS_SUCCESS;
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07004095 HTC_PACKET htc_pkt;
4096
4097 qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4098 qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4099 htc_pkt.Status = QDF_STATUS_SUCCESS;
4100 htc_pkt.pPktContext = (void *)nbuf;
4101 dp_htt_t2h_msg_handler(context, &htc_pkt);
4102
4103 return rc;
4104}
4105
4106/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004107 * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4108 * @htt_soc: HTT SOC handle
4109 *
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304110 * Return: QDF_STATUS
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004111 */
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304112static QDF_STATUS
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004113htt_htc_soc_attach(struct htt_soc *soc)
4114{
Manikandan Mohan50ec7042017-04-19 11:37:47 -07004115 struct htc_service_connect_req connect;
4116 struct htc_service_connect_resp response;
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304117 QDF_STATUS status;
Kiran Venkatappa96c42b62018-01-08 12:33:07 +05304118 struct dp_soc *dpsoc = soc->dp_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004119
hangtianfe681a52019-01-16 17:16:28 +08004120 qdf_mem_zero(&connect, sizeof(connect));
4121 qdf_mem_zero(&response, sizeof(response));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004122
4123 connect.pMetaData = NULL;
4124 connect.MetaDataLength = 0;
4125 connect.EpCallbacks.pContext = soc;
4126 connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4127 connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4128 connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4129
4130 /* rx buffers currently are provided by HIF, not by EpRecvRefill */
4131 connect.EpCallbacks.EpRecvRefill = NULL;
4132
4133 /* N/A, fill is done by HIF */
4134 connect.EpCallbacks.RecvRefillWaterMark = 1;
4135
4136 connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4137 /*
4138 * Specify how deep to let a queue get before htc_send_pkt will
4139 * call the EpSendFull function due to excessive send queue depth.
4140 */
4141 connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4142
4143 /* disable flow control for HTT data message service */
4144 connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4145
4146 /* connect to control service */
4147 connect.service_id = HTT_DATA_MSG_SVC;
4148
4149 status = htc_connect_service(soc->htc_soc, &connect, &response);
4150
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304151 if (status != QDF_STATUS_SUCCESS)
4152 return status;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004153
4154 soc->htc_endpoint = response.Endpoint;
4155
Kiran Venkatappa96c42b62018-01-08 12:33:07 +05304156 hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304157
4158 htt_interface_logging_init(&soc->htt_logger_handle);
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07004159 dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4160 dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4161
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304162 return QDF_STATUS_SUCCESS; /* success */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004163}
4164
4165/*
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304166 * htt_soc_initialize() - SOC level HTT initialization
4167 * @htt_soc: Opaque htt SOC handle
4168 * @ctrl_psoc: Opaque ctrl SOC handle
4169 * @htc_soc: SOC level HTC handle
4170 * @hal_soc: Opaque HAL SOC handle
4171 * @osdev: QDF device
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004172 *
4173 * Return: HTT handle on success; NULL on failure
4174 */
4175void *
Akshay Kosigieec6db92019-07-02 14:25:54 +05304176htt_soc_initialize(struct htt_soc *htt_soc,
4177 struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
Akshay Kosigia5c46a42019-06-27 12:43:01 +05304178 HTC_HANDLE htc_soc,
Akshay Kosigia870c612019-07-08 23:10:30 +05304179 hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004180{
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304181 struct htt_soc *soc = (struct htt_soc *)htt_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004182
4183 soc->osdev = osdev;
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05304184 soc->ctrl_psoc = ctrl_psoc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004185 soc->htc_soc = htc_soc;
Akshay Kosigia870c612019-07-08 23:10:30 +05304186 soc->hal_soc = hal_soc_hdl;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004187
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004188 if (htt_htc_soc_attach(soc))
4189 goto fail2;
4190
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304191 return soc;
4192
4193fail2:
4194 return NULL;
4195}
4196
Mohit Khanna40f76b52018-11-30 14:10:55 -08004197void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4198{
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304199 htt_interface_logging_deinit(htt_handle->htt_logger_handle);
Mohit Khanna40f76b52018-11-30 14:10:55 -08004200 htt_htc_misc_pkt_pool_free(htt_handle);
4201 htt_htc_pkt_pool_free(htt_handle);
4202}
4203
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304204/*
4205 * htt_soc_htc_prealloc() - HTC memory prealloc
4206 * @htt_soc: SOC level HTT handle
4207 *
4208 * Return: QDF_STATUS_SUCCESS on Success or
4209 * QDF_STATUS_E_NOMEM on allocation failure
4210 */
Mohit Khanna40f76b52018-11-30 14:10:55 -08004211QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304212{
4213 int i;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004214
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304215 soc->htt_htc_pkt_freelist = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004216 /* pre-allocate some HTC_PACKET objects */
4217 for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4218 struct dp_htt_htc_pkt_union *pkt;
4219 pkt = qdf_mem_malloc(sizeof(*pkt));
4220 if (!pkt)
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304221 return QDF_STATUS_E_NOMEM;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004222
4223 htt_htc_pkt_free(soc, &pkt->u.pkt);
4224 }
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304225 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004226}
4227
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004228/*
Mohit Khanna40f76b52018-11-30 14:10:55 -08004229 * htt_soc_detach() - Free SOC level HTT handle
4230 * @htt_hdl: HTT SOC handle
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004231 */
Akshay Kosigia5c46a42019-06-27 12:43:01 +05304232void htt_soc_detach(struct htt_soc *htt_hdl)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004233{
Ruben Columbus814e6cb2019-09-10 15:49:11 -07004234 int i;
Mohit Khanna40f76b52018-11-30 14:10:55 -08004235 struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004236
Ruben Columbus814e6cb2019-09-10 15:49:11 -07004237 for (i = 0; i < MAX_PDEV_CNT; i++) {
4238 qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
4239 qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
4240 }
4241
Mohit Khanna40f76b52018-11-30 14:10:55 -08004242 HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4243 qdf_mem_free(htt_handle);
Ruben Columbus814e6cb2019-09-10 15:49:11 -07004244
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004245}
4246
Ishank Jain6290a3c2017-03-21 10:49:39 +05304247/**
4248 * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
4249 * @pdev: DP PDEV handle
4250 * @stats_type_upload_mask: stats type requested by user
4251 * @config_param_0: extra configuration parameters
4252 * @config_param_1: extra configuration parameters
4253 * @config_param_2: extra configuration parameters
4254 * @config_param_3: extra configuration parameters
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07004255 * @mac_id: mac number
Ishank Jain6290a3c2017-03-21 10:49:39 +05304256 *
4257 * return: QDF STATUS
4258 */
4259QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
4260 uint32_t stats_type_upload_mask, uint32_t config_param_0,
4261 uint32_t config_param_1, uint32_t config_param_2,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08004262 uint32_t config_param_3, int cookie_val, int cookie_msb,
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07004263 uint8_t mac_id)
Ishank Jain6290a3c2017-03-21 10:49:39 +05304264{
4265 struct htt_soc *soc = pdev->soc->htt_handle;
4266 struct dp_htt_htc_pkt *pkt;
4267 qdf_nbuf_t msg;
4268 uint32_t *msg_word;
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08004269 uint8_t pdev_mask = 0;
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304270 uint8_t *htt_logger_bufp;
Ishank Jain6290a3c2017-03-21 10:49:39 +05304271
4272 msg = qdf_nbuf_alloc(
4273 soc->osdev,
4274 HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
4275 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4276
4277 if (!msg)
4278 return QDF_STATUS_E_NOMEM;
4279
4280 /*TODO:Add support for SOC stats
4281 * Bit 0: SOC Stats
4282 * Bit 1: Pdev stats for pdev id 0
4283 * Bit 2: Pdev stats for pdev id 1
4284 * Bit 3: Pdev stats for pdev id 2
4285 */
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07004286 mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
Ishank Jain6290a3c2017-03-21 10:49:39 +05304287
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07004288 pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
Ishank Jain6290a3c2017-03-21 10:49:39 +05304289 /*
4290 * Set the length of the message.
4291 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4292 * separately during the below call to qdf_nbuf_push_head.
4293 * The contribution from the HTC header is added separately inside HTC.
4294 */
4295 if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
4296 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4297 "Failed to expand head for HTT_EXT_STATS");
4298 qdf_nbuf_free(msg);
4299 return QDF_STATUS_E_FAILURE;
4300 }
4301
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05304302 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4303 "-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
4304 "config_param_1 %u\n config_param_2 %u\n"
Aditya Sathishded018e2018-07-02 16:25:21 +05304305 "config_param_4 %u\n -------------",
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05304306 __func__, __LINE__, cookie_val, config_param_0,
4307 config_param_1, config_param_2, config_param_3);
4308
Ishank Jain6290a3c2017-03-21 10:49:39 +05304309 msg_word = (uint32_t *) qdf_nbuf_data(msg);
4310
4311 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304312 htt_logger_bufp = (uint8_t *)msg_word;
Ishank Jain6290a3c2017-03-21 10:49:39 +05304313 *msg_word = 0;
4314 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4315 HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4316 HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4317
4318 /* word 1 */
4319 msg_word++;
4320 *msg_word = 0;
4321 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4322
4323 /* word 2 */
4324 msg_word++;
4325 *msg_word = 0;
4326 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4327
4328 /* word 3 */
4329 msg_word++;
4330 *msg_word = 0;
4331 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4332
4333 /* word 4 */
4334 msg_word++;
4335 *msg_word = 0;
4336 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4337
4338 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05304339
4340 /* word 5 */
4341 msg_word++;
4342
4343 /* word 6 */
4344 msg_word++;
4345 *msg_word = 0;
4346 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4347
4348 /* word 7 */
4349 msg_word++;
4350 *msg_word = 0;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05304351 /*Using last 2 bits for pdev_id */
4352 cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
4353 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05304354
Ishank Jain6290a3c2017-03-21 10:49:39 +05304355 pkt = htt_htc_pkt_alloc(soc);
4356 if (!pkt) {
4357 qdf_nbuf_free(msg);
4358 return QDF_STATUS_E_NOMEM;
4359 }
4360
4361 pkt->soc_ctxt = NULL; /* not used during send-done callback */
4362
4363 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4364 dp_htt_h2t_send_complete_free_netbuf,
4365 qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4366 soc->htc_endpoint,
Yue Mae92fe022019-07-16 18:47:52 -07004367 /* tag for FW response msg not guaranteed */
4368 HTC_TX_PACKET_TAG_RUNTIME_PUT);
Ishank Jain6290a3c2017-03-21 10:49:39 +05304369
4370 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304371 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4372 htt_logger_bufp);
Pramod Simhae0baa442017-06-27 15:21:39 -07004373 return 0;
Ishank Jain6290a3c2017-03-21 10:49:39 +05304374}
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004375
4376/* This macro will revert once proper HTT header will define for
4377 * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4378 * */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004379#if defined(WDI_EVENT_ENABLE)
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004380/**
4381 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4382 * @pdev: DP PDEV handle
4383 * @stats_type_upload_mask: stats type requested by user
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004384 * @mac_id: Mac id number
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004385 *
4386 * return: QDF STATUS
4387 */
4388QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004389 uint32_t stats_type_upload_mask, uint8_t mac_id)
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004390{
4391 struct htt_soc *soc = pdev->soc->htt_handle;
4392 struct dp_htt_htc_pkt *pkt;
4393 qdf_nbuf_t msg;
4394 uint32_t *msg_word;
4395 uint8_t pdev_mask;
4396
4397 msg = qdf_nbuf_alloc(
4398 soc->osdev,
4399 HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4400 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4401
4402 if (!msg) {
4403 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304404 "Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004405 qdf_assert(0);
4406 return QDF_STATUS_E_NOMEM;
4407 }
4408
4409 /*TODO:Add support for SOC stats
4410 * Bit 0: SOC Stats
4411 * Bit 1: Pdev stats for pdev id 0
4412 * Bit 2: Pdev stats for pdev id 1
4413 * Bit 3: Pdev stats for pdev id 2
4414 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004415 pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004416
4417 /*
4418 * Set the length of the message.
4419 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4420 * separately during the below call to qdf_nbuf_push_head.
4421 * The contribution from the HTC header is added separately inside HTC.
4422 */
4423 if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4424 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304425 "Failed to expand head for HTT_CFG_STATS");
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004426 qdf_nbuf_free(msg);
4427 return QDF_STATUS_E_FAILURE;
4428 }
4429
4430 msg_word = (uint32_t *) qdf_nbuf_data(msg);
4431
4432 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4433 *msg_word = 0;
4434 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4435 HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4436 HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4437 stats_type_upload_mask);
4438
4439 pkt = htt_htc_pkt_alloc(soc);
4440 if (!pkt) {
4441 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304442 "Fail to allocate dp_htt_htc_pkt buffer");
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004443 qdf_assert(0);
4444 qdf_nbuf_free(msg);
4445 return QDF_STATUS_E_NOMEM;
4446 }
4447
4448 pkt->soc_ctxt = NULL; /* not used during send-done callback */
4449
4450 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4451 dp_htt_h2t_send_complete_free_netbuf,
4452 qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4453 soc->htc_endpoint,
4454 1); /* tag - not relevant here */
4455
4456 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304457 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4458 (uint8_t *)msg_word);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004459 return 0;
4460}
4461#endif
Amir Patel1ea85d42019-01-09 15:19:10 +05304462
4463void
4464dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4465 uint32_t *tag_buf)
4466{
4467 switch (tag_type) {
4468 case HTT_STATS_PEER_DETAILS_TAG:
4469 {
4470 htt_peer_details_tlv *dp_stats_buf =
4471 (htt_peer_details_tlv *)tag_buf;
4472
4473 pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4474 }
4475 break;
4476 case HTT_STATS_PEER_STATS_CMN_TAG:
4477 {
4478 htt_peer_stats_cmn_tlv *dp_stats_buf =
4479 (htt_peer_stats_cmn_tlv *)tag_buf;
4480
4481 struct dp_peer *peer = dp_peer_find_by_id(pdev->soc,
4482 pdev->fw_stats_peer_id);
4483
4484 if (peer && !peer->bss_peer) {
4485 peer->stats.tx.inactive_time =
4486 dp_stats_buf->inactive_time;
4487 qdf_event_set(&pdev->fw_peer_stats_event);
4488 }
4489 if (peer)
4490 dp_peer_unref_del_find_by_id(peer);
4491 }
4492 break;
4493 default:
4494 qdf_err("Invalid tag_type");
4495 }
4496}
Sumeet Raoc4fa4df2019-07-05 02:11:19 -07004497
4498/**
4499 * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4500 * @pdev: DP pdev handle
4501 * @fse_setup_info: FST setup parameters
4502 *
4503 * Return: Success when HTT message is sent, error on failure
4504 */
4505QDF_STATUS
4506dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4507 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4508{
4509 struct htt_soc *soc = pdev->soc->htt_handle;
4510 struct dp_htt_htc_pkt *pkt;
4511 qdf_nbuf_t msg;
4512 u_int32_t *msg_word;
4513 struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4514 uint8_t *htt_logger_bufp;
4515 u_int32_t *key;
4516
4517 msg = qdf_nbuf_alloc(
4518 soc->osdev,
4519 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4520 /* reserve room for the HTC header */
4521 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4522
4523 if (!msg)
4524 return QDF_STATUS_E_NOMEM;
4525
4526 /*
4527 * Set the length of the message.
4528 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4529 * separately during the below call to qdf_nbuf_push_head.
4530 * The contribution from the HTC header is added separately inside HTC.
4531 */
4532 if (!qdf_nbuf_put_tail(msg,
4533 sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4534 qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4535 return QDF_STATUS_E_FAILURE;
4536 }
4537
4538 /* fill in the message contents */
4539 msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4540
4541 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4542 /* rewind beyond alignment pad to get to the HTC header reserved area */
4543 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4544 htt_logger_bufp = (uint8_t *)msg_word;
4545
4546 *msg_word = 0;
4547 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4548
4549 fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4550
4551 HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4552
4553 msg_word++;
4554 HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4555 HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4556 HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4557 fse_setup_info->ip_da_sa_prefix);
4558
4559 msg_word++;
4560 HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4561 fse_setup_info->base_addr_lo);
4562 msg_word++;
4563 HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4564 fse_setup_info->base_addr_hi);
4565
4566 key = (u_int32_t *)fse_setup_info->hash_key;
4567 fse_setup->toeplitz31_0 = *key++;
4568 fse_setup->toeplitz63_32 = *key++;
4569 fse_setup->toeplitz95_64 = *key++;
4570 fse_setup->toeplitz127_96 = *key++;
4571 fse_setup->toeplitz159_128 = *key++;
4572 fse_setup->toeplitz191_160 = *key++;
4573 fse_setup->toeplitz223_192 = *key++;
4574 fse_setup->toeplitz255_224 = *key++;
4575 fse_setup->toeplitz287_256 = *key++;
4576 fse_setup->toeplitz314_288 = *key;
4577
4578 msg_word++;
4579 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4580 msg_word++;
4581 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4582 msg_word++;
4583 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4584 msg_word++;
4585 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4586 msg_word++;
4587 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4588 msg_word++;
4589 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4590 msg_word++;
4591 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4592 msg_word++;
4593 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4594 msg_word++;
4595 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4596 msg_word++;
4597 HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4598 fse_setup->toeplitz314_288);
4599
4600 pkt = htt_htc_pkt_alloc(soc);
4601 if (!pkt) {
4602 qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4603 qdf_assert(0);
4604 qdf_nbuf_free(msg);
4605 return QDF_STATUS_E_RESOURCES; /* failure */
4606 }
4607
4608 pkt->soc_ctxt = NULL; /* not used during send-done callback */
4609
4610 SET_HTC_PACKET_INFO_TX(
4611 &pkt->htc_pkt,
4612 dp_htt_h2t_send_complete_free_netbuf,
4613 qdf_nbuf_data(msg),
4614 qdf_nbuf_len(msg),
4615 soc->htc_endpoint,
4616 1); /* tag - not relevant here */
4617
4618 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4619
4620 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4621 htt_logger_bufp);
4622
4623 qdf_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4624 fse_setup_info->pdev_id);
4625 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4626 (void *)fse_setup_info->hash_key,
4627 fse_setup_info->hash_key_len);
4628
4629 return QDF_STATUS_SUCCESS;
4630}
4631
4632/**
4633 * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4634 * add/del a flow in HW
4635 * @pdev: DP pdev handle
4636 * @fse_op_info: Flow entry parameters
4637 *
4638 * Return: Success when HTT message is sent, error on failure
4639 */
4640QDF_STATUS
4641dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4642 struct dp_htt_rx_flow_fst_operation *fse_op_info)
4643{
4644 struct htt_soc *soc = pdev->soc->htt_handle;
4645 struct dp_htt_htc_pkt *pkt;
4646 qdf_nbuf_t msg;
4647 u_int32_t *msg_word;
4648 struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4649 uint8_t *htt_logger_bufp;
4650
4651 msg = qdf_nbuf_alloc(
4652 soc->osdev,
4653 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4654 /* reserve room for the HTC header */
4655 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4656 if (!msg)
4657 return QDF_STATUS_E_NOMEM;
4658
4659 /*
4660 * Set the length of the message.
4661 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4662 * separately during the below call to qdf_nbuf_push_head.
4663 * The contribution from the HTC header is added separately inside HTC.
4664 */
4665 if (!qdf_nbuf_put_tail(msg,
4666 sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4667 qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4668 return QDF_STATUS_E_FAILURE;
4669 }
4670
4671 /* fill in the message contents */
4672 msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4673
4674 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4675 /* rewind beyond alignment pad to get to the HTC header reserved area */
4676 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4677 htt_logger_bufp = (uint8_t *)msg_word;
4678
4679 *msg_word = 0;
4680 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4681
4682 fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4683
4684 HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4685 msg_word++;
4686 HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4687 if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4688 HTT_RX_FSE_OPERATION_SET(*msg_word,
4689 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4690 msg_word++;
4691 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4692 *msg_word,
4693 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4694 msg_word++;
4695 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4696 *msg_word,
4697 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4698 msg_word++;
4699 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4700 *msg_word,
4701 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4702 msg_word++;
4703 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4704 *msg_word,
4705 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4706 msg_word++;
4707 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4708 *msg_word,
4709 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4710 msg_word++;
4711 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4712 *msg_word,
4713 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4714 msg_word++;
4715 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4716 *msg_word,
4717 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4718 msg_word++;
4719 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4720 *msg_word,
4721 qdf_htonl(
4722 fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4723 msg_word++;
4724 HTT_RX_FSE_SOURCEPORT_SET(
4725 *msg_word,
4726 fse_op_info->rx_flow->flow_tuple_info.src_port);
4727 HTT_RX_FSE_DESTPORT_SET(
4728 *msg_word,
4729 fse_op_info->rx_flow->flow_tuple_info.dest_port);
4730 msg_word++;
4731 HTT_RX_FSE_L4_PROTO_SET(
4732 *msg_word,
4733 fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4734 } else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4735 HTT_RX_FSE_OPERATION_SET(*msg_word,
4736 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4737 } else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4738 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4739 } else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4740 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4741 }
4742
4743 pkt = htt_htc_pkt_alloc(soc);
4744 if (!pkt) {
4745 qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4746 qdf_assert(0);
4747 qdf_nbuf_free(msg);
4748 return QDF_STATUS_E_RESOURCES; /* failure */
4749 }
4750
4751 pkt->soc_ctxt = NULL; /* not used during send-done callback */
4752
4753 SET_HTC_PACKET_INFO_TX(
4754 &pkt->htc_pkt,
4755 dp_htt_h2t_send_complete_free_netbuf,
4756 qdf_nbuf_data(msg),
4757 qdf_nbuf_len(msg),
4758 soc->htc_endpoint,
4759 1); /* tag - not relevant here */
4760
4761 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4762
4763 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4764 htt_logger_bufp);
4765
4766 qdf_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4767 fse_op_info->pdev_id);
4768
4769 return QDF_STATUS_SUCCESS;
4770}