blob: 1f7d9ba536297f19e1b1d0abf3e02b9d33eeb60d [file] [log] [blame]
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001/*
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05302 * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <htt.h>
Balamurugan Mahalingamf72cb1f2018-06-25 12:18:34 +053020#include <hal_hw_headers.h>
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070021#include <hal_api.h>
Jeff Johnson2cb8fc72016-12-17 10:45:08 -080022#include "dp_peer.h"
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -080023#include "dp_types.h"
Pramod Simhab17d0672017-03-06 17:20:13 -080024#include "dp_internal.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080025#include "dp_rx_mon.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053026#include "htt_stats.h"
Pamidipati, Vijay038d0902017-07-17 09:53:31 +053027#include "htt_ppdu_stats.h"
nobeljdebe2b32019-04-23 11:18:47 -070028#include "dp_htt.h"
Ruben Columbus43194932019-05-24 09:56:52 -070029#include "dp_rx.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070030#include "qdf_mem.h" /* qdf_mem_malloc,free */
Pamidipati, Vijay038d0902017-07-17 09:53:31 +053031#include "cdp_txrx_cmn_struct.h"
Ishank Jain6290a3c2017-03-21 10:49:39 +053032
ydb247452018-08-08 00:23:16 +053033#ifdef FEATURE_PERPKT_INFO
34#include "dp_ratetable.h"
35#endif
36
Ishank Jain6290a3c2017-03-21 10:49:39 +053037#define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070038
39#define HTT_HTC_PKT_POOL_INIT_SIZE 64
40
41#define HTT_MSG_BUF_SIZE(msg_bytes) \
42 ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
43
Prathyusha Guduri43bb0562018-02-12 18:30:54 +053044#define HTT_PID_BIT_MASK 0x3
45
Ishank Jain6290a3c2017-03-21 10:49:39 +053046#define DP_EXT_MSG_LENGTH 2048
Pramod Simhae0baa442017-06-27 15:21:39 -070047
Soumya Bhat51240dc2018-05-24 18:00:57 +053048#define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
49
nobelj182938a2019-11-25 14:09:08 -080050#define HTT_SHIFT_UPPER_TIMESTAMP 32
51#define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
52
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -070053/*
Amir Patel36a79a62019-01-17 11:23:37 +053054 * dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap() - Get ppdu stats tlv
55 * bitmap for sniffer mode
56 * @bitmap: received bitmap
57 *
58 * Return: expected bitmap value, returns zero if doesn't match with
59 * either 64-bit Tx window or 256-bit window tlv bitmap
60 */
nobeljdebe2b32019-04-23 11:18:47 -070061int
Amir Patel36a79a62019-01-17 11:23:37 +053062dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(uint32_t bitmap)
63{
64 if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64))
65 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_64;
66 else if (bitmap == (HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256))
67 return HTT_PPDU_SNIFFER_AMPDU_TLV_BITMAP_256;
68
69 return 0;
70}
71
nobeljdebe2b32019-04-23 11:18:47 -070072#ifdef FEATURE_PERPKT_INFO
Amir Patel36a79a62019-01-17 11:23:37 +053073/*
nobelj7b0e2732019-05-31 00:19:07 -070074 * dp_peer_copy_delay_stats() - copy ppdu stats to peer delayed stats.
75 * @peer: Datapath peer handle
76 * @ppdu: PPDU Descriptor
77 *
78 * Return: None
79 *
80 * on Tx data frame, we may get delayed ba set
81 * in htt_ppdu_stats_user_common_tlv. which mean we get Block Ack(BA) after we
82 * request Block Ack Request(BAR). Successful msdu is received only after Block
83 * Ack. To populate peer stats we need successful msdu(data frame).
84 * So we hold the Tx data stats on delayed_ba for stats update.
85 */
86static inline void
87dp_peer_copy_delay_stats(struct dp_peer *peer,
88 struct cdp_tx_completion_ppdu_user *ppdu)
89{
90 struct dp_pdev *pdev;
91 struct dp_vdev *vdev;
92
93 if (peer->last_delayed_ba) {
94 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
95 "BA not yet recv for prev delayed ppdu[%d]\n",
96 peer->last_delayed_ba_ppduid);
97 vdev = peer->vdev;
98 if (vdev) {
99 pdev = vdev->pdev;
100 pdev->stats.cdp_delayed_ba_not_recev++;
101 }
102 }
103
104 peer->delayed_ba_ppdu_stats.ltf_size = ppdu->ltf_size;
105 peer->delayed_ba_ppdu_stats.stbc = ppdu->stbc;
106 peer->delayed_ba_ppdu_stats.he_re = ppdu->he_re;
107 peer->delayed_ba_ppdu_stats.txbf = ppdu->txbf;
108 peer->delayed_ba_ppdu_stats.bw = ppdu->bw;
109 peer->delayed_ba_ppdu_stats.nss = ppdu->nss;
110 peer->delayed_ba_ppdu_stats.preamble = ppdu->preamble;
111 peer->delayed_ba_ppdu_stats.gi = ppdu->gi;
112 peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
113 peer->delayed_ba_ppdu_stats.ldpc = ppdu->ldpc;
114 peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
115 peer->delayed_ba_ppdu_stats.mpdu_tried_ucast = ppdu->mpdu_tried_ucast;
116 peer->delayed_ba_ppdu_stats.mpdu_tried_mcast = ppdu->mpdu_tried_mcast;
117 peer->delayed_ba_ppdu_stats.frame_ctrl = ppdu->frame_ctrl;
118 peer->delayed_ba_ppdu_stats.qos_ctrl = ppdu->qos_ctrl;
119 peer->delayed_ba_ppdu_stats.dcm = ppdu->dcm;
120
121 peer->delayed_ba_ppdu_stats.ru_start = ppdu->ru_start;
122 peer->delayed_ba_ppdu_stats.ru_tones = ppdu->ru_tones;
123 peer->delayed_ba_ppdu_stats.is_mcast = ppdu->is_mcast;
124
125 peer->delayed_ba_ppdu_stats.user_pos = ppdu->user_pos;
126 peer->delayed_ba_ppdu_stats.mu_group_id = ppdu->mu_group_id;
127
128 peer->last_delayed_ba = true;
129}
130
131/*
132 * dp_peer_copy_stats_to_bar() - copy delayed stats to ppdu stats.
133 * @peer: Datapath peer handle
134 * @ppdu: PPDU Descriptor
135 *
136 * Return: None
137 *
138 * For Tx BAR, PPDU stats TLV include Block Ack info. PPDU info
139 * from Tx BAR frame not required to populate peer stats.
140 * But we need successful MPDU and MSDU to update previous
141 * transmitted Tx data frame. Overwrite ppdu stats with the previous
142 * stored ppdu stats.
143 */
144static void
145dp_peer_copy_stats_to_bar(struct dp_peer *peer,
146 struct cdp_tx_completion_ppdu_user *ppdu)
147{
148 ppdu->ltf_size = peer->delayed_ba_ppdu_stats.ltf_size;
149 ppdu->stbc = peer->delayed_ba_ppdu_stats.stbc;
150 ppdu->he_re = peer->delayed_ba_ppdu_stats.he_re;
151 ppdu->txbf = peer->delayed_ba_ppdu_stats.txbf;
152 ppdu->bw = peer->delayed_ba_ppdu_stats.bw;
153 ppdu->nss = peer->delayed_ba_ppdu_stats.nss;
154 ppdu->preamble = peer->delayed_ba_ppdu_stats.preamble;
155 ppdu->gi = peer->delayed_ba_ppdu_stats.gi;
156 ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
157 ppdu->ldpc = peer->delayed_ba_ppdu_stats.ldpc;
158 ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
159 ppdu->mpdu_tried_ucast = peer->delayed_ba_ppdu_stats.mpdu_tried_ucast;
160 ppdu->mpdu_tried_mcast = peer->delayed_ba_ppdu_stats.mpdu_tried_mcast;
161 ppdu->frame_ctrl = peer->delayed_ba_ppdu_stats.frame_ctrl;
162 ppdu->qos_ctrl = peer->delayed_ba_ppdu_stats.qos_ctrl;
163 ppdu->dcm = peer->delayed_ba_ppdu_stats.dcm;
164
165 ppdu->ru_start = peer->delayed_ba_ppdu_stats.ru_start;
166 ppdu->ru_tones = peer->delayed_ba_ppdu_stats.ru_tones;
167 ppdu->is_mcast = peer->delayed_ba_ppdu_stats.is_mcast;
168
169 ppdu->user_pos = peer->delayed_ba_ppdu_stats.user_pos;
170 ppdu->mu_group_id = peer->delayed_ba_ppdu_stats.mu_group_id;
171
172 peer->last_delayed_ba = false;
173}
174
175/*
nobeljdebe2b32019-04-23 11:18:47 -0700176 * dp_tx_rate_stats_update() - Update rate per-peer statistics
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530177 * @peer: Datapath peer handle
178 * @ppdu: PPDU Descriptor
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530179 *
180 * Return: None
181 */
nobeljdebe2b32019-04-23 11:18:47 -0700182static void
ydb247452018-08-08 00:23:16 +0530183dp_tx_rate_stats_update(struct dp_peer *peer,
184 struct cdp_tx_completion_ppdu_user *ppdu)
185{
186 uint32_t ratekbps = 0;
Shashikala Prabhuf7786d32019-04-05 14:25:09 +0530187 uint64_t ppdu_tx_rate = 0;
Amir Patel78824b12019-02-23 10:54:32 +0530188 uint32_t rix;
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530189 uint16_t ratecode = 0;
ydb247452018-08-08 00:23:16 +0530190
191 if (!peer || !ppdu)
192 return;
193
Anish Nataraj376d9b12018-08-13 14:12:01 +0530194 ratekbps = dp_getrateindex(ppdu->gi,
195 ppdu->mcs,
ydb247452018-08-08 00:23:16 +0530196 ppdu->nss,
197 ppdu->preamble,
Amir Patel78824b12019-02-23 10:54:32 +0530198 ppdu->bw,
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530199 &rix,
200 &ratecode);
ydb247452018-08-08 00:23:16 +0530201
202 DP_STATS_UPD(peer, tx.last_tx_rate, ratekbps);
203
204 if (!ratekbps)
205 return;
206
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530207 /* Calculate goodput in non-training period
208 * In training period, don't do anything as
209 * pending pkt is send as goodput.
210 */
211 if ((!peer->bss_peer) && (!ppdu->sa_is_training)) {
212 ppdu->sa_goodput = ((ratekbps / CDP_NUM_KB_IN_MB) *
213 (CDP_PERCENT_MACRO - ppdu->current_rate_per));
214 }
Amir Patel468bded2019-03-21 11:42:31 +0530215 ppdu->rix = rix;
Amir Patelac7d9462019-03-28 16:16:01 +0530216 ppdu->tx_ratekbps = ratekbps;
Ankit Kumarcd66fff2019-07-02 20:54:44 +0530217 ppdu->tx_ratecode = ratecode;
Amir Patelc2cc2522018-11-29 20:44:47 +0530218 peer->stats.tx.avg_tx_rate =
219 dp_ath_rate_lpf(peer->stats.tx.avg_tx_rate, ratekbps);
ydb247452018-08-08 00:23:16 +0530220 ppdu_tx_rate = dp_ath_rate_out(peer->stats.tx.avg_tx_rate);
221 DP_STATS_UPD(peer, tx.rnd_avg_tx_rate, ppdu_tx_rate);
222
223 if (peer->vdev) {
Chaitanya Kiran Godavarthi8c880d32019-06-06 19:32:48 +0530224 /*
225 * In STA mode:
226 * We get ucast stats as BSS peer stats.
227 *
228 * In AP mode:
229 * We get mcast stats as BSS peer stats.
230 * We get ucast stats as assoc peer stats.
231 */
232 if (peer->vdev->opmode == wlan_op_mode_ap && peer->bss_peer) {
Amir Patel1de3d3d2018-09-14 11:47:02 +0530233 peer->vdev->stats.tx.mcast_last_tx_rate = ratekbps;
234 peer->vdev->stats.tx.mcast_last_tx_rate_mcs = ppdu->mcs;
235 } else {
236 peer->vdev->stats.tx.last_tx_rate = ratekbps;
237 peer->vdev->stats.tx.last_tx_rate_mcs = ppdu->mcs;
238 }
ydb247452018-08-08 00:23:16 +0530239 }
240}
241
nobeljdebe2b32019-04-23 11:18:47 -0700242/*
243 * dp_tx_stats_update() - Update per-peer statistics
Tallapragada Kalyand92f5982019-08-02 18:22:46 +0530244 * @pdev: Datapath pdev handle
nobeljdebe2b32019-04-23 11:18:47 -0700245 * @peer: Datapath peer handle
246 * @ppdu: PPDU Descriptor
247 * @ack_rssi: RSSI of last ack received
248 *
249 * Return: None
250 */
251static void
Tallapragada Kalyand92f5982019-08-02 18:22:46 +0530252dp_tx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
nobeljdebe2b32019-04-23 11:18:47 -0700253 struct cdp_tx_completion_ppdu_user *ppdu,
254 uint32_t ack_rssi)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530255{
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530256 uint8_t preamble, mcs;
257 uint16_t num_msdu;
nobelj7b0e2732019-05-31 00:19:07 -0700258 uint16_t num_mpdu;
259 uint16_t mpdu_tried;
nobeljab929fe2019-09-16 15:38:20 -0700260 uint16_t mpdu_failed;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530261
262 preamble = ppdu->preamble;
263 mcs = ppdu->mcs;
264 num_msdu = ppdu->num_msdu;
nobelj7b0e2732019-05-31 00:19:07 -0700265 num_mpdu = ppdu->mpdu_success;
266 mpdu_tried = ppdu->mpdu_tried_ucast + ppdu->mpdu_tried_mcast;
nobeljab929fe2019-09-16 15:38:20 -0700267 mpdu_failed = mpdu_tried - num_mpdu;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530268
Pamidipati, Vijaybd9c13f2017-10-17 20:12:03 +0530269 /* If the peer statistics are already processed as part of
270 * per-MSDU completion handler, do not process these again in per-PPDU
271 * indications */
Tallapragada Kalyand92f5982019-08-02 18:22:46 +0530272 if (pdev->soc->process_tx_status)
Pamidipati, Vijaybd9c13f2017-10-17 20:12:03 +0530273 return;
274
Santosh Anbu4de9ffb2019-03-01 17:20:29 +0530275 if (ppdu->completion_status != HTT_PPDU_STATS_USER_STATUS_OK) {
nobeljab929fe2019-09-16 15:38:20 -0700276 /*
277 * All failed mpdu will be retried, so incrementing
278 * retries mpdu based on mpdu failed. Even for
279 * ack failure i.e for long retries we get
280 * mpdu failed equal mpdu tried.
281 */
282 DP_STATS_INC(peer, tx.retries, mpdu_failed);
283 DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
Santosh Anbu4de9ffb2019-03-01 17:20:29 +0530284 return;
285 }
286
Varsha Mishra27c5bd32019-05-28 11:54:46 +0530287 if (ppdu->is_ppdu_cookie_valid)
288 DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
289
nobelj79666332019-01-31 17:00:51 -0800290 if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
291 ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
nobelj4e9d51f2018-08-07 19:36:47 -0700292 if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
293 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
294 "mu_group_id out of bound!!\n");
295 else
296 DP_STATS_UPD(peer, tx.mu_group_id[ppdu->mu_group_id],
297 (ppdu->user_pos + 1));
298 }
299
nobelj79666332019-01-31 17:00:51 -0800300 if (ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA ||
301 ppdu->ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA) {
nobelj4e9d51f2018-08-07 19:36:47 -0700302 DP_STATS_UPD(peer, tx.ru_tones, ppdu->ru_tones);
303 DP_STATS_UPD(peer, tx.ru_start, ppdu->ru_start);
304 switch (ppdu->ru_tones) {
305 case RU_26:
nobelj7b0e2732019-05-31 00:19:07 -0700306 DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_msdu,
307 num_msdu);
308 DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].num_mpdu,
309 num_mpdu);
310 DP_STATS_INC(peer, tx.ru_loc[RU_26_INDEX].mpdu_tried,
311 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700312 break;
313 case RU_52:
nobelj7b0e2732019-05-31 00:19:07 -0700314 DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_msdu,
315 num_msdu);
316 DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].num_mpdu,
317 num_mpdu);
318 DP_STATS_INC(peer, tx.ru_loc[RU_52_INDEX].mpdu_tried,
319 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700320 break;
321 case RU_106:
nobelj7b0e2732019-05-31 00:19:07 -0700322 DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_msdu,
323 num_msdu);
324 DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].num_mpdu,
325 num_mpdu);
326 DP_STATS_INC(peer, tx.ru_loc[RU_106_INDEX].mpdu_tried,
327 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700328 break;
329 case RU_242:
nobelj7b0e2732019-05-31 00:19:07 -0700330 DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_msdu,
331 num_msdu);
332 DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].num_mpdu,
333 num_mpdu);
334 DP_STATS_INC(peer, tx.ru_loc[RU_242_INDEX].mpdu_tried,
335 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700336 break;
337 case RU_484:
nobelj7b0e2732019-05-31 00:19:07 -0700338 DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_msdu,
339 num_msdu);
340 DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].num_mpdu,
341 num_mpdu);
342 DP_STATS_INC(peer, tx.ru_loc[RU_484_INDEX].mpdu_tried,
343 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700344 break;
345 case RU_996:
nobelj7b0e2732019-05-31 00:19:07 -0700346 DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_msdu,
347 num_msdu);
348 DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].num_mpdu,
349 num_mpdu);
350 DP_STATS_INC(peer, tx.ru_loc[RU_996_INDEX].mpdu_tried,
351 mpdu_tried);
nobelj4e9d51f2018-08-07 19:36:47 -0700352 break;
353 }
354 }
355
nobeljab929fe2019-09-16 15:38:20 -0700356 /*
357 * All failed mpdu will be retried, so incrementing
358 * retries mpdu based on mpdu failed. Even for
359 * ack failure i.e for long retries we get
360 * mpdu failed equal mpdu tried.
361 */
362 DP_STATS_INC(peer, tx.retries, mpdu_failed);
363 DP_STATS_INC(peer, tx.tx_failed, ppdu->failed_msdus);
364
nobelj7b0e2732019-05-31 00:19:07 -0700365 DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_msdu,
366 num_msdu);
367 DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].num_mpdu,
368 num_mpdu);
369 DP_STATS_INC(peer, tx.transmit_type[ppdu->ppdu_type].mpdu_tried,
370 mpdu_tried);
371
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530372 DP_STATS_INC_PKT(peer, tx.comp_pkt,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530373 num_msdu, (ppdu->success_bytes +
374 ppdu->retry_bytes + ppdu->failed_bytes));
Pranita Solankea12b4b32017-11-20 23:04:14 +0530375 DP_STATS_UPD(peer, tx.tx_rate, ppdu->tx_rate);
376 DP_STATS_INC(peer, tx.sgi_count[ppdu->gi], num_msdu);
377 DP_STATS_INC(peer, tx.bw[ppdu->bw], num_msdu);
Pranita Solankeed0aba62018-01-12 19:14:31 +0530378 DP_STATS_INC(peer, tx.nss[ppdu->nss], num_msdu);
Amir Patel70f3c302019-05-21 12:49:35 +0530379 if (ppdu->tid < CDP_DATA_TID_MAX)
380 DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ppdu->tid)],
381 num_msdu);
Pranita Solankea12b4b32017-11-20 23:04:14 +0530382 DP_STATS_INCC(peer, tx.stbc, num_msdu, ppdu->stbc);
383 DP_STATS_INCC(peer, tx.ldpc, num_msdu, ppdu->ldpc);
Anish Nataraj7235d9b2018-08-20 13:10:25 +0530384 if (!(ppdu->is_mcast) && ppdu->ack_rssi_valid)
Anish Nataraj57614da2018-02-07 23:04:24 +0530385 DP_STATS_UPD(peer, tx.last_ack_rssi, ack_rssi);
Pranita Solankea12b4b32017-11-20 23:04:14 +0530386
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530387 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530388 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530389 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
390 DP_STATS_INCC(peer,
391 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
392 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
393 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530394 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530395 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
396 DP_STATS_INCC(peer,
397 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
398 ((mcs < (MAX_MCS_11B)) && (preamble == DOT11_B)));
399 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530400 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530401 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
402 DP_STATS_INCC(peer,
403 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
404 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
405 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530406 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530407 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
408 DP_STATS_INCC(peer,
409 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
410 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
411 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530412 tx.pkt_type[preamble].mcs_count[MAX_MCS-1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530413 ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
414 DP_STATS_INCC(peer,
415 tx.pkt_type[preamble].mcs_count[mcs], num_msdu,
416 ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
Chaitanya Kiran Godavarthie541e9c2019-04-02 21:43:43 +0530417 DP_STATS_INCC(peer, tx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
418 DP_STATS_INCC(peer, tx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
ydb247452018-08-08 00:23:16 +0530419
Tallapragada Kalyand92f5982019-08-02 18:22:46 +0530420 dp_peer_stats_notify(pdev, peer);
ydb247452018-08-08 00:23:16 +0530421
Amir Patel756d05e2018-10-10 12:35:30 +0530422#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
423 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
424 &peer->stats, ppdu->peer_id,
425 UPDATE_PEER_STATS, pdev->pdev_id);
426#endif
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530427}
428#endif
429
nobeljdebe2b32019-04-23 11:18:47 -0700430#ifdef WLAN_TX_PKT_CAPTURE_ENH
431#include "dp_tx_capture.h"
432#else
433static inline void
434dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
435 void *data,
436 uint32_t ppdu_id,
437 uint32_t size)
438{
439}
440#endif
441
Pamidipati, Vijay038d0902017-07-17 09:53:31 +0530442/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700443 * htt_htc_pkt_alloc() - Allocate HTC packet buffer
444 * @htt_soc: HTT SOC handle
445 *
446 * Return: Pointer to htc packet buffer
447 */
448static struct dp_htt_htc_pkt *
449htt_htc_pkt_alloc(struct htt_soc *soc)
450{
451 struct dp_htt_htc_pkt_union *pkt = NULL;
452
453 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
454 if (soc->htt_htc_pkt_freelist) {
455 pkt = soc->htt_htc_pkt_freelist;
456 soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
457 }
458 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
459
Jeff Johnsona8edf332019-03-18 09:51:52 -0700460 if (!pkt)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700461 pkt = qdf_mem_malloc(sizeof(*pkt));
462 return &pkt->u.pkt; /* not actually a dereference */
463}
464
465/*
466 * htt_htc_pkt_free() - Free HTC packet buffer
467 * @htt_soc: HTT SOC handle
468 */
469static void
470htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
471{
472 struct dp_htt_htc_pkt_union *u_pkt =
473 (struct dp_htt_htc_pkt_union *)pkt;
474
475 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
476 u_pkt->u.next = soc->htt_htc_pkt_freelist;
477 soc->htt_htc_pkt_freelist = u_pkt;
478 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
479}
480
481/*
482 * htt_htc_pkt_pool_free() - Free HTC packet pool
483 * @htt_soc: HTT SOC handle
484 */
485static void
486htt_htc_pkt_pool_free(struct htt_soc *soc)
487{
488 struct dp_htt_htc_pkt_union *pkt, *next;
489 pkt = soc->htt_htc_pkt_freelist;
490 while (pkt) {
491 next = pkt->u.next;
492 qdf_mem_free(pkt);
493 pkt = next;
494 }
495 soc->htt_htc_pkt_freelist = NULL;
496}
497
498/*
Pramod Simhae0baa442017-06-27 15:21:39 -0700499 * htt_htc_misc_pkt_list_trim() - trim misc list
500 * @htt_soc: HTT SOC handle
501 * @level: max no. of pkts in list
502 */
503static void
504htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
505{
506 struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
507 int i = 0;
508 qdf_nbuf_t netbuf;
509
510 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
511 pkt = soc->htt_htc_pkt_misclist;
512 while (pkt) {
513 next = pkt->u.next;
514 /* trim the out grown list*/
515 if (++i > level) {
516 netbuf =
517 (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
518 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
519 qdf_nbuf_free(netbuf);
520 qdf_mem_free(pkt);
521 pkt = NULL;
522 if (prev)
523 prev->u.next = NULL;
524 }
525 prev = pkt;
526 pkt = next;
527 }
528 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
529}
530
531/*
532 * htt_htc_misc_pkt_list_add() - Add pkt to misc list
533 * @htt_soc: HTT SOC handle
534 * @dp_htt_htc_pkt: pkt to be added to list
535 */
536static void
537htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
538{
539 struct dp_htt_htc_pkt_union *u_pkt =
540 (struct dp_htt_htc_pkt_union *)pkt;
541 int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
542 pkt->htc_pkt.Endpoint)
543 + DP_HTT_HTC_PKT_MISCLIST_SIZE;
544
545 HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
546 if (soc->htt_htc_pkt_misclist) {
547 u_pkt->u.next = soc->htt_htc_pkt_misclist;
548 soc->htt_htc_pkt_misclist = u_pkt;
549 } else {
550 soc->htt_htc_pkt_misclist = u_pkt;
551 }
552 HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
553
554 /* only ce pipe size + tx_queue_depth could possibly be in use
555 * free older packets in the misclist
556 */
557 htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
558}
559
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530560/**
561 * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
562 * @soc : HTT SOC handle
563 * @pkt: pkt to be send
564 * @cmd : command to be recorded in dp htt logger
565 * @buf : Pointer to buffer needs to be recored for above cmd
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530566 *
567 * Return: None
568 */
569static inline void DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
570 struct dp_htt_htc_pkt *pkt, uint8_t cmd,
571 uint8_t *buf)
572{
573 htt_command_record(soc->htt_logger_handle, cmd, buf);
574 if (htc_send_pkt(soc->htc_soc, &pkt->htc_pkt) ==
575 QDF_STATUS_SUCCESS)
576 htt_htc_misc_pkt_list_add(soc, pkt);
577}
578
Pramod Simhae0baa442017-06-27 15:21:39 -0700579/*
580 * htt_htc_misc_pkt_pool_free() - free pkts in misc list
581 * @htt_soc: HTT SOC handle
582 */
583static void
584htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
585{
586 struct dp_htt_htc_pkt_union *pkt, *next;
587 qdf_nbuf_t netbuf;
588
589 pkt = soc->htt_htc_pkt_misclist;
590
591 while (pkt) {
592 next = pkt->u.next;
593 netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
594 qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
595
596 soc->stats.htc_pkt_free++;
Houston Hoffman41b912c2017-08-30 14:27:51 -0700597 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
Aditya Sathishded018e2018-07-02 16:25:21 +0530598 "%s: Pkt free count %d",
Pramod Simhae0baa442017-06-27 15:21:39 -0700599 __func__, soc->stats.htc_pkt_free);
600
601 qdf_nbuf_free(netbuf);
602 qdf_mem_free(pkt);
603 pkt = next;
604 }
605 soc->htt_htc_pkt_misclist = NULL;
606}
607
608/*
Jeff Johnsonff2dfb22018-05-12 10:27:57 -0700609 * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700610 * @tgt_mac_addr: Target MAC
611 * @buffer: Output buffer
612 */
613static u_int8_t *
614htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
615{
616#ifdef BIG_ENDIAN_HOST
617 /*
618 * The host endianness is opposite of the target endianness.
619 * To make u_int32_t elements come out correctly, the target->host
620 * upload has swizzled the bytes in each u_int32_t element of the
621 * message.
622 * For byte-array message fields like the MAC address, this
623 * upload swizzling puts the bytes in the wrong order, and needs
624 * to be undone.
625 */
626 buffer[0] = tgt_mac_addr[3];
627 buffer[1] = tgt_mac_addr[2];
628 buffer[2] = tgt_mac_addr[1];
629 buffer[3] = tgt_mac_addr[0];
630 buffer[4] = tgt_mac_addr[7];
631 buffer[5] = tgt_mac_addr[6];
632 return buffer;
633#else
634 /*
635 * The host endianness matches the target endianness -
636 * we can use the mac addr directly from the message buffer.
637 */
638 return tgt_mac_addr;
639#endif
640}
641
642/*
643 * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
644 * @soc: SOC handle
645 * @status: Completion status
646 * @netbuf: HTT buffer
647 */
648static void
649dp_htt_h2t_send_complete_free_netbuf(
650 void *soc, A_STATUS status, qdf_nbuf_t netbuf)
651{
652 qdf_nbuf_free(netbuf);
653}
654
655/*
656 * dp_htt_h2t_send_complete() - H2T completion handler
657 * @context: Opaque context (HTT SOC handle)
658 * @htc_pkt: HTC packet
659 */
Jeff Johnson32140742017-01-05 15:30:47 -0800660static void
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700661dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
662{
663 void (*send_complete_part2)(
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +0530664 void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700665 struct htt_soc *soc = (struct htt_soc *) context;
666 struct dp_htt_htc_pkt *htt_pkt;
667 qdf_nbuf_t netbuf;
668
669 send_complete_part2 = htc_pkt->pPktContext;
670
671 htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
672
673 /* process (free or keep) the netbuf that held the message */
674 netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
675 /*
676 * adf sendcomplete is required for windows only
677 */
678 /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
Jeff Johnsona8edf332019-03-18 09:51:52 -0700679 if (send_complete_part2) {
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700680 send_complete_part2(
681 htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
682 }
683 /* free the htt_htc_pkt / HTC_PACKET object */
684 htt_htc_pkt_free(soc, htt_pkt);
685}
686
687/*
688 * htt_h2t_ver_req_msg() - Send HTT version request message to target
689 * @htt_soc: HTT SOC handle
690 *
691 * Return: 0 on success; error code on failure
692 */
693static int htt_h2t_ver_req_msg(struct htt_soc *soc)
694{
695 struct dp_htt_htc_pkt *pkt;
696 qdf_nbuf_t msg;
697 uint32_t *msg_word;
698
699 msg = qdf_nbuf_alloc(
700 soc->osdev,
701 HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
702 /* reserve room for the HTC header */
703 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
704 if (!msg)
705 return QDF_STATUS_E_NOMEM;
706
707 /*
708 * Set the length of the message.
709 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
710 * separately during the below call to qdf_nbuf_push_head.
711 * The contribution from the HTC header is added separately inside HTC.
712 */
713 if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
714 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530715 "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700716 __func__);
717 return QDF_STATUS_E_FAILURE;
718 }
719
720 /* fill in the message contents */
721 msg_word = (u_int32_t *) qdf_nbuf_data(msg);
722
723 /* rewind beyond alignment pad to get to the HTC header reserved area */
724 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
725
726 *msg_word = 0;
727 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
728
729 pkt = htt_htc_pkt_alloc(soc);
730 if (!pkt) {
731 qdf_nbuf_free(msg);
732 return QDF_STATUS_E_FAILURE;
733 }
734 pkt->soc_ctxt = NULL; /* not used during send-done callback */
735
736 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
737 dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
738 qdf_nbuf_len(msg), soc->htc_endpoint,
739 1); /* tag - not relevant here */
740
741 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530742 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ, NULL);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700743 return 0;
744}
745
746/*
747 * htt_srng_setup() - Send SRNG setup message to target
748 * @htt_soc: HTT SOC handle
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800749 * @mac_id: MAC Id
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700750 * @hal_srng: Opaque HAL SRNG pointer
751 * @hal_ring_type: SRNG ring type
752 *
753 * Return: 0 on success; error code on failure
754 */
Akshay Kosigia5c46a42019-06-27 12:43:01 +0530755int htt_srng_setup(struct htt_soc *soc, int mac_id,
756 hal_ring_handle_t hal_ring_hdl,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530757 int hal_ring_type)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700758{
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700759 struct dp_htt_htc_pkt *pkt;
760 qdf_nbuf_t htt_msg;
761 uint32_t *msg_word;
762 struct hal_srng_params srng_params;
763 qdf_dma_addr_t hp_addr, tp_addr;
764 uint32_t ring_entry_size =
765 hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
766 int htt_ring_type, htt_ring_id;
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530767 uint8_t *htt_logger_bufp;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700768
769 /* Sizes should be set in 4-byte words */
770 ring_entry_size = ring_entry_size >> 2;
771
772 htt_msg = qdf_nbuf_alloc(soc->osdev,
773 HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
774 /* reserve room for the HTC header */
775 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
776 if (!htt_msg)
777 goto fail0;
778
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +0530779 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
780 hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
781 tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700782
783 switch (hal_ring_type) {
784 case RXDMA_BUF:
Dhanashri Atre7351d172016-10-12 13:08:09 -0700785#ifdef QCA_HOST2FW_RXBUF_RING
Manoj Ekbote46c03162017-02-16 21:32:00 -0800786 if (srng_params.ring_id ==
Yun Parkfde6b9e2017-06-26 17:13:11 -0700787 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
Dhanashri Atre7351d172016-10-12 13:08:09 -0700788 htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
789 htt_ring_type = HTT_SW_TO_SW_RING;
Yun Parkfde6b9e2017-06-26 17:13:11 -0700790#ifdef IPA_OFFLOAD
791 } else if (srng_params.ring_id ==
792 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
793 htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
794 htt_ring_type = HTT_SW_TO_SW_RING;
795#endif
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700796#else
Manoj Ekbote46c03162017-02-16 21:32:00 -0800797 if (srng_params.ring_id ==
Yun Parkfde6b9e2017-06-26 17:13:11 -0700798 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
Manoj Ekbote46c03162017-02-16 21:32:00 -0800799 (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
Dhanashri Atre7351d172016-10-12 13:08:09 -0700800 htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
801 htt_ring_type = HTT_SW_TO_HW_RING;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700802#endif
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800803 } else if (srng_params.ring_id ==
Yun Parkfde6b9e2017-06-26 17:13:11 -0700804#ifdef IPA_OFFLOAD
805 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
806#else
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800807 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
Yun Parkfde6b9e2017-06-26 17:13:11 -0700808#endif
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800809 (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
Dhanashri Atre7351d172016-10-12 13:08:09 -0700810 htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
811 htt_ring_type = HTT_SW_TO_HW_RING;
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800812 } else {
Dhanashri Atre7351d172016-10-12 13:08:09 -0700813 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530814 "%s: Ring %d currently not supported",
Yun Parkfde6b9e2017-06-26 17:13:11 -0700815 __func__, srng_params.ring_id);
Dhanashri Atre7351d172016-10-12 13:08:09 -0700816 goto fail1;
817 }
Dhanashri Atred4032ab2017-01-17 15:05:41 -0800818
Mohit Khanna81179cb2018-08-16 20:50:43 -0700819 dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
820 hal_ring_type, srng_params.ring_id, htt_ring_id,
821 (uint64_t)hp_addr,
822 (uint64_t)tp_addr);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700823 break;
824 case RXDMA_MONITOR_BUF:
825 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
826 htt_ring_type = HTT_SW_TO_HW_RING;
827 break;
828 case RXDMA_MONITOR_STATUS:
829 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
830 htt_ring_type = HTT_SW_TO_HW_RING;
831 break;
832 case RXDMA_MONITOR_DST:
833 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
834 htt_ring_type = HTT_HW_TO_SW_RING;
835 break;
Kai Chen6eca1a62017-01-12 10:17:53 -0800836 case RXDMA_MONITOR_DESC:
837 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
838 htt_ring_type = HTT_SW_TO_HW_RING;
839 break;
Pramod Simhae382ff82017-06-05 18:09:26 -0700840 case RXDMA_DST:
841 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
842 htt_ring_type = HTT_HW_TO_SW_RING;
843 break;
Kai Chen6eca1a62017-01-12 10:17:53 -0800844
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700845 default:
846 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530847 "%s: Ring currently not supported", __func__);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700848 goto fail1;
849 }
850
851 /*
852 * Set the length of the message.
853 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
854 * separately during the below call to qdf_nbuf_push_head.
855 * The contribution from the HTC header is added separately inside HTC.
856 */
857 if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
858 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530859 "%s: Failed to expand head for SRING_SETUP msg",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700860 __func__);
861 return QDF_STATUS_E_FAILURE;
862 }
863
864 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
865
866 /* rewind beyond alignment pad to get to the HTC header reserved area */
867 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
868
869 /* word 0 */
870 *msg_word = 0;
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530871 htt_logger_bufp = (uint8_t *)msg_word;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700872 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -0800873
Kai Chencbe4c342017-06-12 20:06:35 -0700874 if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
Ravi Joshi8851f4e2017-06-07 21:22:08 -0700875 (htt_ring_type == HTT_HW_TO_SW_RING))
Dhanashri Atre2ea8c0f2017-02-08 11:09:56 -0800876 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word,
877 DP_SW2HW_MACID(mac_id));
878 else
879 HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
880
Rakesh Pillai51264a62019-05-08 19:15:56 +0530881 dp_info("%s: mac_id %d", __func__, mac_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700882 HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
883 /* TODO: Discuss with FW on changing this to unique ID and using
884 * htt_ring_type to send the type of ring
885 */
886 HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
887
888 /* word 1 */
889 msg_word++;
890 *msg_word = 0;
891 HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
892 srng_params.ring_base_paddr & 0xffffffff);
893
894 /* word 2 */
895 msg_word++;
896 *msg_word = 0;
897 HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
898 (uint64_t)srng_params.ring_base_paddr >> 32);
899
900 /* word 3 */
901 msg_word++;
902 *msg_word = 0;
903 HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
904 HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
905 (ring_entry_size * srng_params.num_entries));
Rakesh Pillai51264a62019-05-08 19:15:56 +0530906 dp_info("%s: entry_size %d", __func__, ring_entry_size);
907 dp_info("%s: num_entries %d", __func__, srng_params.num_entries);
908 dp_info("%s: ring_size %d", __func__,
909 (ring_entry_size * srng_params.num_entries));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700910 if (htt_ring_type == HTT_SW_TO_HW_RING)
Leo Chang5ea93a42016-11-03 12:39:49 -0700911 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
912 *msg_word, 1);
913 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700914 !!(srng_params.flags & HAL_SRNG_MSI_SWAP));
Leo Chang5ea93a42016-11-03 12:39:49 -0700915 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700916 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
Leo Chang5ea93a42016-11-03 12:39:49 -0700917 HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700918 !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
919
920 /* word 4 */
921 msg_word++;
922 *msg_word = 0;
923 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
924 hp_addr & 0xffffffff);
925
926 /* word 5 */
927 msg_word++;
928 *msg_word = 0;
929 HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
930 (uint64_t)hp_addr >> 32);
931
932 /* word 6 */
933 msg_word++;
934 *msg_word = 0;
935 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
936 tp_addr & 0xffffffff);
937
938 /* word 7 */
939 msg_word++;
940 *msg_word = 0;
941 HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
942 (uint64_t)tp_addr >> 32);
943
944 /* word 8 */
945 msg_word++;
946 *msg_word = 0;
947 HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
948 srng_params.msi_addr & 0xffffffff);
949
950 /* word 9 */
951 msg_word++;
952 *msg_word = 0;
953 HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
954 (uint64_t)(srng_params.msi_addr) >> 32);
955
956 /* word 10 */
957 msg_word++;
958 *msg_word = 0;
959 HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
960 srng_params.msi_data);
961
962 /* word 11 */
963 msg_word++;
964 *msg_word = 0;
965 HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
966 srng_params.intr_batch_cntr_thres_entries *
967 ring_entry_size);
968 HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
969 srng_params.intr_timer_thres_us >> 3);
970
971 /* word 12 */
972 msg_word++;
973 *msg_word = 0;
974 if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
975 /* TODO: Setting low threshold to 1/8th of ring size - see
976 * if this needs to be configurable
977 */
978 HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
979 srng_params.low_threshold);
980 }
981 /* "response_required" field should be set if a HTT response message is
982 * required after setting up the ring.
983 */
984 pkt = htt_htc_pkt_alloc(soc);
985 if (!pkt)
986 goto fail1;
987
988 pkt->soc_ctxt = NULL; /* not used during send-done callback */
989
990 SET_HTC_PACKET_INFO_TX(
991 &pkt->htc_pkt,
992 dp_htt_h2t_send_complete_free_netbuf,
993 qdf_nbuf_data(htt_msg),
994 qdf_nbuf_len(htt_msg),
995 soc->htc_endpoint,
Yue Ma245b47b2017-02-21 16:35:31 -0800996 HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -0700997
998 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +0530999 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
1000 htt_logger_bufp);
Kai Chen6eca1a62017-01-12 10:17:53 -08001001
1002 return QDF_STATUS_SUCCESS;
1003
1004fail1:
1005 qdf_nbuf_free(htt_msg);
1006fail0:
1007 return QDF_STATUS_E_FAILURE;
1008}
1009
1010/*
1011 * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
1012 * config message to target
1013 * @htt_soc: HTT SOC handle
1014 * @pdev_id: PDEV Id
1015 * @hal_srng: Opaque HAL SRNG pointer
1016 * @hal_ring_type: SRNG ring type
1017 * @ring_buf_size: SRNG buffer size
1018 * @htt_tlv_filter: Rx SRNG TLV and filter setting
1019 * Return: 0 on success; error code on failure
1020 */
Akshay Kosigia5c46a42019-06-27 12:43:01 +05301021int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301022 hal_ring_handle_t hal_ring_hdl,
1023 int hal_ring_type, int ring_buf_size,
1024 struct htt_rx_ring_tlv_filter *htt_tlv_filter)
Kai Chen6eca1a62017-01-12 10:17:53 -08001025{
1026 struct htt_soc *soc = (struct htt_soc *)htt_soc;
1027 struct dp_htt_htc_pkt *pkt;
1028 qdf_nbuf_t htt_msg;
1029 uint32_t *msg_word;
1030 struct hal_srng_params srng_params;
1031 uint32_t htt_ring_type, htt_ring_id;
1032 uint32_t tlv_filter;
Ankit Kumar0ead45c2019-04-29 15:32:49 +05301033 uint8_t *htt_logger_bufp;
Ruben Columbusb7a1c572019-08-12 11:11:29 -07001034 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
1035 uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
Kai Chen6eca1a62017-01-12 10:17:53 -08001036
1037 htt_msg = qdf_nbuf_alloc(soc->osdev,
1038 HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
1039 /* reserve room for the HTC header */
1040 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
1041 if (!htt_msg)
1042 goto fail0;
1043
Akshay Kosigi0bca9fb2019-06-27 15:26:13 +05301044 hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
Kai Chen6eca1a62017-01-12 10:17:53 -08001045
1046 switch (hal_ring_type) {
1047 case RXDMA_BUF:
Kai Chen6eca1a62017-01-12 10:17:53 -08001048 htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1049 htt_ring_type = HTT_SW_TO_HW_RING;
Kai Chen6eca1a62017-01-12 10:17:53 -08001050 break;
1051 case RXDMA_MONITOR_BUF:
1052 htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
1053 htt_ring_type = HTT_SW_TO_HW_RING;
1054 break;
1055 case RXDMA_MONITOR_STATUS:
1056 htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1057 htt_ring_type = HTT_SW_TO_HW_RING;
1058 break;
1059 case RXDMA_MONITOR_DST:
1060 htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
1061 htt_ring_type = HTT_HW_TO_SW_RING;
1062 break;
1063 case RXDMA_MONITOR_DESC:
1064 htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1065 htt_ring_type = HTT_SW_TO_HW_RING;
1066 break;
Pramod Simhae382ff82017-06-05 18:09:26 -07001067 case RXDMA_DST:
1068 htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1069 htt_ring_type = HTT_HW_TO_SW_RING;
1070 break;
Kai Chen6eca1a62017-01-12 10:17:53 -08001071
1072 default:
1073 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301074 "%s: Ring currently not supported", __func__);
Kai Chen6eca1a62017-01-12 10:17:53 -08001075 goto fail1;
1076 }
1077
1078 /*
1079 * Set the length of the message.
1080 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
1081 * separately during the below call to qdf_nbuf_push_head.
1082 * The contribution from the HTC header is added separately inside HTC.
1083 */
1084 if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
1085 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05301086 "%s: Failed to expand head for RX Ring Cfg msg",
Kai Chen6eca1a62017-01-12 10:17:53 -08001087 __func__);
1088 goto fail1; /* failure */
1089 }
1090
1091 msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
1092
1093 /* rewind beyond alignment pad to get to the HTC header reserved area */
1094 qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
1095
1096 /* word 0 */
Ankit Kumar0ead45c2019-04-29 15:32:49 +05301097 htt_logger_bufp = (uint8_t *)msg_word;
Kai Chen6eca1a62017-01-12 10:17:53 -08001098 *msg_word = 0;
1099 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
Ravi Joshi8851f4e2017-06-07 21:22:08 -07001100
1101 /*
1102 * pdev_id is indexed from 0 whereas mac_id is indexed from 1
1103 * SW_TO_SW and SW_TO_HW rings are unaffected by this
1104 */
1105 if (htt_ring_type == HTT_SW_TO_SW_RING ||
1106 htt_ring_type == HTT_SW_TO_HW_RING)
1107 HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
1108 DP_SW2HW_MACID(pdev_id));
1109
Kai Chen6eca1a62017-01-12 10:17:53 -08001110 /* TODO: Discuss with FW on changing this to unique ID and using
1111 * htt_ring_type to send the type of ring
1112 */
1113 HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
1114
1115 HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
1116 !!(srng_params.flags & HAL_SRNG_MSI_SWAP));
1117
1118 HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
1119 !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
1120
Kiran Venkatappa07921612019-03-02 23:14:12 +05301121 HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
1122 htt_tlv_filter->offset_valid);
1123
Ruben Columbusb7a1c572019-08-12 11:11:29 -07001124 if (mon_drop_th > 0)
1125 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1126 1);
1127 else
1128 HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
1129 0);
1130
Kai Chen6eca1a62017-01-12 10:17:53 -08001131 /* word 1 */
1132 msg_word++;
1133 *msg_word = 0;
1134 HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
1135 ring_buf_size);
1136
1137 /* word 2 */
1138 msg_word++;
1139 *msg_word = 0;
1140
1141 if (htt_tlv_filter->enable_fp) {
nobeljd124b742017-10-16 11:59:12 -07001142 /* TYPE: MGMT */
1143 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1144 FP, MGMT, 0000,
1145 (htt_tlv_filter->fp_mgmt_filter &
1146 FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1147 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1148 FP, MGMT, 0001,
1149 (htt_tlv_filter->fp_mgmt_filter &
1150 FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1151 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1152 FP, MGMT, 0010,
1153 (htt_tlv_filter->fp_mgmt_filter &
1154 FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1155 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1156 FP, MGMT, 0011,
1157 (htt_tlv_filter->fp_mgmt_filter &
1158 FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1159 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1160 FP, MGMT, 0100,
1161 (htt_tlv_filter->fp_mgmt_filter &
1162 FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1163 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1164 FP, MGMT, 0101,
1165 (htt_tlv_filter->fp_mgmt_filter &
1166 FILTER_MGMT_PROBE_RES) ? 1 : 0);
1167 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1168 FP, MGMT, 0110,
1169 (htt_tlv_filter->fp_mgmt_filter &
1170 FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1171 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001172 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
nobeljd124b742017-10-16 11:59:12 -07001173 MGMT, 0111,
1174 (htt_tlv_filter->fp_mgmt_filter &
1175 FILTER_MGMT_RESERVED_7) ? 1 : 0);
1176 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1177 FP, MGMT, 1000,
1178 (htt_tlv_filter->fp_mgmt_filter &
1179 FILTER_MGMT_BEACON) ? 1 : 0);
1180 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1181 FP, MGMT, 1001,
1182 (htt_tlv_filter->fp_mgmt_filter &
1183 FILTER_MGMT_ATIM) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001184 }
1185
1186 if (htt_tlv_filter->enable_md) {
sumedh baikady59a2d332018-05-22 01:50:38 -07001187 /* TYPE: MGMT */
1188 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1189 MD, MGMT, 0000,
1190 (htt_tlv_filter->md_mgmt_filter &
1191 FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1192 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1193 MD, MGMT, 0001,
1194 (htt_tlv_filter->md_mgmt_filter &
1195 FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1196 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1197 MD, MGMT, 0010,
1198 (htt_tlv_filter->md_mgmt_filter &
1199 FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1200 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1201 MD, MGMT, 0011,
1202 (htt_tlv_filter->md_mgmt_filter &
1203 FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1204 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1205 MD, MGMT, 0100,
1206 (htt_tlv_filter->md_mgmt_filter &
1207 FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1208 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1209 MD, MGMT, 0101,
1210 (htt_tlv_filter->md_mgmt_filter &
1211 FILTER_MGMT_PROBE_RES) ? 1 : 0);
1212 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1213 MD, MGMT, 0110,
1214 (htt_tlv_filter->md_mgmt_filter &
1215 FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1216 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001217 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001218 MGMT, 0111,
1219 (htt_tlv_filter->md_mgmt_filter &
1220 FILTER_MGMT_RESERVED_7) ? 1 : 0);
1221 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1222 MD, MGMT, 1000,
1223 (htt_tlv_filter->md_mgmt_filter &
1224 FILTER_MGMT_BEACON) ? 1 : 0);
1225 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1226 MD, MGMT, 1001,
1227 (htt_tlv_filter->md_mgmt_filter &
1228 FILTER_MGMT_ATIM) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001229 }
1230
1231 if (htt_tlv_filter->enable_mo) {
nobeljd124b742017-10-16 11:59:12 -07001232 /* TYPE: MGMT */
1233 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1234 MO, MGMT, 0000,
1235 (htt_tlv_filter->mo_mgmt_filter &
1236 FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
1237 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1238 MO, MGMT, 0001,
1239 (htt_tlv_filter->mo_mgmt_filter &
1240 FILTER_MGMT_ASSOC_RES) ? 1 : 0);
1241 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1242 MO, MGMT, 0010,
1243 (htt_tlv_filter->mo_mgmt_filter &
1244 FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
1245 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1246 MO, MGMT, 0011,
1247 (htt_tlv_filter->mo_mgmt_filter &
1248 FILTER_MGMT_REASSOC_RES) ? 1 : 0);
1249 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1250 MO, MGMT, 0100,
1251 (htt_tlv_filter->mo_mgmt_filter &
1252 FILTER_MGMT_PROBE_REQ) ? 1 : 0);
1253 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1254 MO, MGMT, 0101,
1255 (htt_tlv_filter->mo_mgmt_filter &
1256 FILTER_MGMT_PROBE_RES) ? 1 : 0);
1257 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1258 MO, MGMT, 0110,
1259 (htt_tlv_filter->mo_mgmt_filter &
1260 FILTER_MGMT_TIM_ADVT) ? 1 : 0);
1261 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001262 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
nobeljd124b742017-10-16 11:59:12 -07001263 MGMT, 0111,
1264 (htt_tlv_filter->mo_mgmt_filter &
1265 FILTER_MGMT_RESERVED_7) ? 1 : 0);
1266 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1267 MO, MGMT, 1000,
1268 (htt_tlv_filter->mo_mgmt_filter &
1269 FILTER_MGMT_BEACON) ? 1 : 0);
1270 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
1271 MO, MGMT, 1001,
1272 (htt_tlv_filter->mo_mgmt_filter &
1273 FILTER_MGMT_ATIM) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001274 }
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001275
Kai Chen6eca1a62017-01-12 10:17:53 -08001276 /* word 3 */
1277 msg_word++;
1278 *msg_word = 0;
1279
1280 if (htt_tlv_filter->enable_fp) {
nobeljd124b742017-10-16 11:59:12 -07001281 /* TYPE: MGMT */
1282 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1283 FP, MGMT, 1010,
1284 (htt_tlv_filter->fp_mgmt_filter &
1285 FILTER_MGMT_DISASSOC) ? 1 : 0);
1286 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1287 FP, MGMT, 1011,
1288 (htt_tlv_filter->fp_mgmt_filter &
1289 FILTER_MGMT_AUTH) ? 1 : 0);
1290 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1291 FP, MGMT, 1100,
1292 (htt_tlv_filter->fp_mgmt_filter &
1293 FILTER_MGMT_DEAUTH) ? 1 : 0);
1294 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1295 FP, MGMT, 1101,
1296 (htt_tlv_filter->fp_mgmt_filter &
1297 FILTER_MGMT_ACTION) ? 1 : 0);
1298 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1299 FP, MGMT, 1110,
1300 (htt_tlv_filter->fp_mgmt_filter &
1301 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1302 /* reserved*/
Kai Chen6eca1a62017-01-12 10:17:53 -08001303 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
nobeljd124b742017-10-16 11:59:12 -07001304 MGMT, 1111,
1305 (htt_tlv_filter->fp_mgmt_filter &
1306 FILTER_MGMT_RESERVED_15) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001307 }
1308
1309 if (htt_tlv_filter->enable_md) {
sumedh baikady59a2d332018-05-22 01:50:38 -07001310 /* TYPE: MGMT */
1311 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1312 MD, MGMT, 1010,
1313 (htt_tlv_filter->md_mgmt_filter &
1314 FILTER_MGMT_DISASSOC) ? 1 : 0);
1315 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1316 MD, MGMT, 1011,
1317 (htt_tlv_filter->md_mgmt_filter &
1318 FILTER_MGMT_AUTH) ? 1 : 0);
1319 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1320 MD, MGMT, 1100,
1321 (htt_tlv_filter->md_mgmt_filter &
1322 FILTER_MGMT_DEAUTH) ? 1 : 0);
1323 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1324 MD, MGMT, 1101,
1325 (htt_tlv_filter->md_mgmt_filter &
1326 FILTER_MGMT_ACTION) ? 1 : 0);
1327 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1328 MD, MGMT, 1110,
1329 (htt_tlv_filter->md_mgmt_filter &
1330 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001331 }
1332
1333 if (htt_tlv_filter->enable_mo) {
nobeljd124b742017-10-16 11:59:12 -07001334 /* TYPE: MGMT */
1335 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1336 MO, MGMT, 1010,
1337 (htt_tlv_filter->mo_mgmt_filter &
1338 FILTER_MGMT_DISASSOC) ? 1 : 0);
1339 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1340 MO, MGMT, 1011,
1341 (htt_tlv_filter->mo_mgmt_filter &
1342 FILTER_MGMT_AUTH) ? 1 : 0);
1343 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1344 MO, MGMT, 1100,
1345 (htt_tlv_filter->mo_mgmt_filter &
1346 FILTER_MGMT_DEAUTH) ? 1 : 0);
1347 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1348 MO, MGMT, 1101,
1349 (htt_tlv_filter->mo_mgmt_filter &
1350 FILTER_MGMT_ACTION) ? 1 : 0);
1351 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
1352 MO, MGMT, 1110,
1353 (htt_tlv_filter->mo_mgmt_filter &
1354 FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
1355 /* reserved*/
Kai Chen6eca1a62017-01-12 10:17:53 -08001356 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
nobeljd124b742017-10-16 11:59:12 -07001357 MGMT, 1111,
1358 (htt_tlv_filter->mo_mgmt_filter &
1359 FILTER_MGMT_RESERVED_15) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001360 }
1361
1362 /* word 4 */
1363 msg_word++;
1364 *msg_word = 0;
1365
1366 if (htt_tlv_filter->enable_fp) {
nobeljd124b742017-10-16 11:59:12 -07001367 /* TYPE: CTRL */
1368 /* reserved */
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001369 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001370 CTRL, 0000,
1371 (htt_tlv_filter->fp_ctrl_filter &
1372 FILTER_CTRL_RESERVED_1) ? 1 : 0);
1373 /* reserved */
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001374 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001375 CTRL, 0001,
1376 (htt_tlv_filter->fp_ctrl_filter &
1377 FILTER_CTRL_RESERVED_2) ? 1 : 0);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001378 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001379 CTRL, 0010,
1380 (htt_tlv_filter->fp_ctrl_filter &
1381 FILTER_CTRL_TRIGGER) ? 1 : 0);
1382 /* reserved */
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001383 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001384 CTRL, 0011,
1385 (htt_tlv_filter->fp_ctrl_filter &
1386 FILTER_CTRL_RESERVED_4) ? 1 : 0);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001387 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001388 CTRL, 0100,
1389 (htt_tlv_filter->fp_ctrl_filter &
1390 FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001391 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001392 CTRL, 0101,
1393 (htt_tlv_filter->fp_ctrl_filter &
1394 FILTER_CTRL_VHT_NDP) ? 1 : 0);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07001395 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001396 CTRL, 0110,
1397 (htt_tlv_filter->fp_ctrl_filter &
1398 FILTER_CTRL_FRAME_EXT) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001399 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001400 CTRL, 0111,
1401 (htt_tlv_filter->fp_ctrl_filter &
1402 FILTER_CTRL_CTRLWRAP) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001403 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001404 CTRL, 1000,
1405 (htt_tlv_filter->fp_ctrl_filter &
1406 FILTER_CTRL_BA_REQ) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001407 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
nobeljd124b742017-10-16 11:59:12 -07001408 CTRL, 1001,
1409 (htt_tlv_filter->fp_ctrl_filter &
1410 FILTER_CTRL_BA) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001411 }
1412
1413 if (htt_tlv_filter->enable_md) {
sumedh baikady59a2d332018-05-22 01:50:38 -07001414 /* TYPE: CTRL */
1415 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001416 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001417 CTRL, 0000,
1418 (htt_tlv_filter->md_ctrl_filter &
1419 FILTER_CTRL_RESERVED_1) ? 1 : 0);
1420 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001421 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001422 CTRL, 0001,
1423 (htt_tlv_filter->md_ctrl_filter &
1424 FILTER_CTRL_RESERVED_2) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001425 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001426 CTRL, 0010,
1427 (htt_tlv_filter->md_ctrl_filter &
1428 FILTER_CTRL_TRIGGER) ? 1 : 0);
1429 /* reserved */
1430 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1431 CTRL, 0011,
1432 (htt_tlv_filter->md_ctrl_filter &
1433 FILTER_CTRL_RESERVED_4) ? 1 : 0);
1434 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1435 CTRL, 0100,
1436 (htt_tlv_filter->md_ctrl_filter &
1437 FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
1438 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1439 CTRL, 0101,
1440 (htt_tlv_filter->md_ctrl_filter &
1441 FILTER_CTRL_VHT_NDP) ? 1 : 0);
1442 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1443 CTRL, 0110,
1444 (htt_tlv_filter->md_ctrl_filter &
1445 FILTER_CTRL_FRAME_EXT) ? 1 : 0);
1446 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1447 CTRL, 0111,
1448 (htt_tlv_filter->md_ctrl_filter &
1449 FILTER_CTRL_CTRLWRAP) ? 1 : 0);
1450 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1451 CTRL, 1000,
1452 (htt_tlv_filter->md_ctrl_filter &
1453 FILTER_CTRL_BA_REQ) ? 1 : 0);
1454 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
1455 CTRL, 1001,
1456 (htt_tlv_filter->md_ctrl_filter &
1457 FILTER_CTRL_BA) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001458 }
1459
1460 if (htt_tlv_filter->enable_mo) {
nobeljd124b742017-10-16 11:59:12 -07001461 /* TYPE: CTRL */
1462 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001463 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001464 CTRL, 0000,
1465 (htt_tlv_filter->mo_ctrl_filter &
1466 FILTER_CTRL_RESERVED_1) ? 1 : 0);
1467 /* reserved */
Kai Chen6eca1a62017-01-12 10:17:53 -08001468 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001469 CTRL, 0001,
1470 (htt_tlv_filter->mo_ctrl_filter &
1471 FILTER_CTRL_RESERVED_2) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001472 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001473 CTRL, 0010,
1474 (htt_tlv_filter->mo_ctrl_filter &
1475 FILTER_CTRL_TRIGGER) ? 1 : 0);
1476 /* reserved */
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001477 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001478 CTRL, 0011,
1479 (htt_tlv_filter->mo_ctrl_filter &
1480 FILTER_CTRL_RESERVED_4) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001481 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001482 CTRL, 0100,
1483 (htt_tlv_filter->mo_ctrl_filter &
1484 FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001485 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001486 CTRL, 0101,
1487 (htt_tlv_filter->mo_ctrl_filter &
1488 FILTER_CTRL_VHT_NDP) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001489 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001490 CTRL, 0110,
1491 (htt_tlv_filter->mo_ctrl_filter &
1492 FILTER_CTRL_FRAME_EXT) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001493 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001494 CTRL, 0111,
1495 (htt_tlv_filter->mo_ctrl_filter &
1496 FILTER_CTRL_CTRLWRAP) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001497 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001498 CTRL, 1000,
1499 (htt_tlv_filter->mo_ctrl_filter &
1500 FILTER_CTRL_BA_REQ) ? 1 : 0);
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001501 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
nobeljd124b742017-10-16 11:59:12 -07001502 CTRL, 1001,
1503 (htt_tlv_filter->mo_ctrl_filter &
1504 FILTER_CTRL_BA) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001505 }
1506
1507 /* word 5 */
1508 msg_word++;
1509 *msg_word = 0;
1510 if (htt_tlv_filter->enable_fp) {
nobeljd124b742017-10-16 11:59:12 -07001511 /* TYPE: CTRL */
Kai Chen6eca1a62017-01-12 10:17:53 -08001512 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001513 CTRL, 1010,
1514 (htt_tlv_filter->fp_ctrl_filter &
1515 FILTER_CTRL_PSPOLL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001516 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001517 CTRL, 1011,
1518 (htt_tlv_filter->fp_ctrl_filter &
1519 FILTER_CTRL_RTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001520 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001521 CTRL, 1100,
1522 (htt_tlv_filter->fp_ctrl_filter &
1523 FILTER_CTRL_CTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001524 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001525 CTRL, 1101,
1526 (htt_tlv_filter->fp_ctrl_filter &
1527 FILTER_CTRL_ACK) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001528 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001529 CTRL, 1110,
1530 (htt_tlv_filter->fp_ctrl_filter &
1531 FILTER_CTRL_CFEND) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001532 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001533 CTRL, 1111,
1534 (htt_tlv_filter->fp_ctrl_filter &
1535 FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1536 /* TYPE: DATA */
Kai Chen6eca1a62017-01-12 10:17:53 -08001537 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001538 DATA, MCAST,
1539 (htt_tlv_filter->fp_data_filter &
1540 FILTER_DATA_MCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001541 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001542 DATA, UCAST,
1543 (htt_tlv_filter->fp_data_filter &
1544 FILTER_DATA_UCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001545 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
nobeljd124b742017-10-16 11:59:12 -07001546 DATA, NULL,
1547 (htt_tlv_filter->fp_data_filter &
1548 FILTER_DATA_NULL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001549 }
1550
1551 if (htt_tlv_filter->enable_md) {
sumedh baikady59a2d332018-05-22 01:50:38 -07001552 /* TYPE: CTRL */
Kai Chen6eca1a62017-01-12 10:17:53 -08001553 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001554 CTRL, 1010,
1555 (htt_tlv_filter->md_ctrl_filter &
1556 FILTER_CTRL_PSPOLL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001557 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001558 CTRL, 1011,
1559 (htt_tlv_filter->md_ctrl_filter &
1560 FILTER_CTRL_RTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001561 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001562 CTRL, 1100,
1563 (htt_tlv_filter->md_ctrl_filter &
1564 FILTER_CTRL_CTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001565 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001566 CTRL, 1101,
1567 (htt_tlv_filter->md_ctrl_filter &
1568 FILTER_CTRL_ACK) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001569 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001570 CTRL, 1110,
1571 (htt_tlv_filter->md_ctrl_filter &
1572 FILTER_CTRL_CFEND) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001573 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001574 CTRL, 1111,
1575 (htt_tlv_filter->md_ctrl_filter &
1576 FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1577 /* TYPE: DATA */
Kai Chen6eca1a62017-01-12 10:17:53 -08001578 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001579 DATA, MCAST,
1580 (htt_tlv_filter->md_data_filter &
1581 FILTER_DATA_MCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001582 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001583 DATA, UCAST,
1584 (htt_tlv_filter->md_data_filter &
1585 FILTER_DATA_UCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001586 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
sumedh baikady59a2d332018-05-22 01:50:38 -07001587 DATA, NULL,
1588 (htt_tlv_filter->md_data_filter &
1589 FILTER_DATA_NULL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001590 }
Ravi Joshi2320b6f2017-05-24 15:43:04 -07001591
Kai Chen6eca1a62017-01-12 10:17:53 -08001592 if (htt_tlv_filter->enable_mo) {
nobeljd124b742017-10-16 11:59:12 -07001593 /* TYPE: CTRL */
Kai Chen6eca1a62017-01-12 10:17:53 -08001594 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001595 CTRL, 1010,
1596 (htt_tlv_filter->mo_ctrl_filter &
1597 FILTER_CTRL_PSPOLL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001598 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001599 CTRL, 1011,
1600 (htt_tlv_filter->mo_ctrl_filter &
1601 FILTER_CTRL_RTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001602 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001603 CTRL, 1100,
1604 (htt_tlv_filter->mo_ctrl_filter &
1605 FILTER_CTRL_CTS) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001606 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001607 CTRL, 1101,
1608 (htt_tlv_filter->mo_ctrl_filter &
1609 FILTER_CTRL_ACK) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001610 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001611 CTRL, 1110,
1612 (htt_tlv_filter->mo_ctrl_filter &
1613 FILTER_CTRL_CFEND) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001614 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001615 CTRL, 1111,
1616 (htt_tlv_filter->mo_ctrl_filter &
1617 FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
1618 /* TYPE: DATA */
Kai Chen6eca1a62017-01-12 10:17:53 -08001619 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001620 DATA, MCAST,
1621 (htt_tlv_filter->mo_data_filter &
1622 FILTER_DATA_MCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001623 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001624 DATA, UCAST,
1625 (htt_tlv_filter->mo_data_filter &
1626 FILTER_DATA_UCAST) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001627 htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
nobeljd124b742017-10-16 11:59:12 -07001628 DATA, NULL,
1629 (htt_tlv_filter->mo_data_filter &
1630 FILTER_DATA_NULL) ? 1 : 0);
Kai Chen6eca1a62017-01-12 10:17:53 -08001631 }
1632
1633 /* word 6 */
1634 msg_word++;
1635 *msg_word = 0;
1636 tlv_filter = 0;
1637 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
1638 htt_tlv_filter->mpdu_start);
1639 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
1640 htt_tlv_filter->msdu_start);
1641 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
1642 htt_tlv_filter->packet);
1643 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
1644 htt_tlv_filter->msdu_end);
1645 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
1646 htt_tlv_filter->mpdu_end);
1647 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
1648 htt_tlv_filter->packet_header);
1649 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
Karunakar Dasineni40555682017-03-26 22:44:39 -07001650 htt_tlv_filter->attention);
Kai Chen6eca1a62017-01-12 10:17:53 -08001651 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
1652 htt_tlv_filter->ppdu_start);
1653 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
1654 htt_tlv_filter->ppdu_end);
1655 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
1656 htt_tlv_filter->ppdu_end_user_stats);
1657 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
1658 PPDU_END_USER_STATS_EXT,
1659 htt_tlv_filter->ppdu_end_user_stats_ext);
1660 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
1661 htt_tlv_filter->ppdu_end_status_done);
sumedh baikady308ff002017-09-18 16:24:36 -07001662 /* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
1663 htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
1664 htt_tlv_filter->header_per_msdu);
Kai Chen6eca1a62017-01-12 10:17:53 -08001665
1666 HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
1667
Kiran Venkatappa07921612019-03-02 23:14:12 +05301668 msg_word++;
1669 *msg_word = 0;
1670 if (htt_tlv_filter->offset_valid) {
1671 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
1672 htt_tlv_filter->rx_packet_offset);
1673 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
1674 htt_tlv_filter->rx_header_offset);
1675
1676 msg_word++;
1677 *msg_word = 0;
1678 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
1679 htt_tlv_filter->rx_mpdu_end_offset);
1680 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
1681 htt_tlv_filter->rx_mpdu_start_offset);
1682
1683 msg_word++;
1684 *msg_word = 0;
1685 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
1686 htt_tlv_filter->rx_msdu_end_offset);
1687 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
1688 htt_tlv_filter->rx_msdu_start_offset);
1689
1690 msg_word++;
1691 *msg_word = 0;
1692 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
1693 htt_tlv_filter->rx_attn_offset);
Ruben Columbusb7a1c572019-08-12 11:11:29 -07001694 msg_word++;
1695 *msg_word = 0;
1696 } else {
1697 msg_word += 4;
1698 *msg_word = 0;
Kiran Venkatappa07921612019-03-02 23:14:12 +05301699 }
1700
Ruben Columbusb7a1c572019-08-12 11:11:29 -07001701 if (mon_drop_th > 0)
1702 HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
1703 mon_drop_th);
1704
Kai Chen6eca1a62017-01-12 10:17:53 -08001705 /* "response_required" field should be set if a HTT response message is
1706 * required after setting up the ring.
1707 */
1708 pkt = htt_htc_pkt_alloc(soc);
1709 if (!pkt)
1710 goto fail1;
1711
1712 pkt->soc_ctxt = NULL; /* not used during send-done callback */
1713
1714 SET_HTC_PACKET_INFO_TX(
1715 &pkt->htc_pkt,
1716 dp_htt_h2t_send_complete_free_netbuf,
1717 qdf_nbuf_data(htt_msg),
1718 qdf_nbuf_len(htt_msg),
1719 soc->htc_endpoint,
1720 1); /* tag - not relevant here */
1721
1722 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05301723 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1724 htt_logger_bufp);
Kai Chen6eca1a62017-01-12 10:17:53 -08001725 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07001726
1727fail1:
1728 qdf_nbuf_free(htt_msg);
1729fail0:
1730 return QDF_STATUS_E_FAILURE;
1731}
1732
Pranita Solanke05862962019-01-09 11:39:29 +05301733#if defined(HTT_STATS_ENABLE)
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301734static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1735 struct dp_soc *soc, qdf_nbuf_t htt_msg)
1736
1737{
1738 uint32_t pdev_id;
1739 uint32_t *msg_word = NULL;
1740 uint32_t msg_remain_len = 0;
1741
1742 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1743
1744 /*COOKIE MSB*/
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301745 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301746
1747 /* stats message length + 16 size of HTT header*/
1748 msg_remain_len = qdf_min(htt_stats->msg_len + 16,
1749 (uint32_t)DP_EXT_MSG_LENGTH);
1750
1751 dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
1752 msg_word, msg_remain_len,
1753 WDI_NO_VAL, pdev_id);
1754
1755 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1756 htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
1757 }
1758 /* Need to be freed here as WDI handler will
1759 * make a copy of pkt to send data to application
1760 */
1761 qdf_nbuf_free(htt_msg);
1762 return QDF_STATUS_SUCCESS;
1763}
1764#else
Pranita Solanke05862962019-01-09 11:39:29 +05301765static inline QDF_STATUS
1766dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
1767 struct dp_soc *soc, qdf_nbuf_t htt_msg)
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301768{
1769 return QDF_STATUS_E_NOSUPPORT;
1770}
1771#endif
Ishank Jain6290a3c2017-03-21 10:49:39 +05301772/**
1773 * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301774 * @htt_stats: htt stats info
Ishank Jain6290a3c2017-03-21 10:49:39 +05301775 *
1776 * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
1777 * contains sub messages which are identified by a TLV header.
1778 * In this function we will process the stream of T2H messages and read all the
1779 * TLV contained in the message.
1780 *
1781 * THe following cases have been taken care of
1782 * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
1783 * In this case the buffer will contain multiple tlvs.
1784 * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
1785 * Only one tlv will be contained in the HTT message and this tag
1786 * will extend onto the next buffer.
1787 * Case 3: When the buffer is the continuation of the previous message
1788 * Case 4: tlv length is 0. which will indicate the end of message
1789 *
1790 * return: void
1791 */
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301792static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
1793 struct dp_soc *soc)
Ishank Jain6290a3c2017-03-21 10:49:39 +05301794{
1795 htt_tlv_tag_t tlv_type = 0xff;
1796 qdf_nbuf_t htt_msg = NULL;
1797 uint32_t *msg_word;
1798 uint8_t *tlv_buf_head = NULL;
1799 uint8_t *tlv_buf_tail = NULL;
1800 uint32_t msg_remain_len = 0;
1801 uint32_t tlv_remain_len = 0;
1802 uint32_t *tlv_start;
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301803 int cookie_val;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301804 int cookie_msb;
1805 int pdev_id;
1806 bool copy_stats = false;
1807 struct dp_pdev *pdev;
Ishank Jain6290a3c2017-03-21 10:49:39 +05301808
1809 /* Process node in the HTT message queue */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301810 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
1811 != NULL) {
Ishank Jain6290a3c2017-03-21 10:49:39 +05301812 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301813 cookie_val = *(msg_word + 1);
Venkata Sharath Chandra Manchalaf5e25702018-04-26 08:41:04 -07001814 htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
1815 *(msg_word +
1816 HTT_T2H_EXT_STATS_TLV_START_OFFSET));
1817
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05301818 if (cookie_val) {
1819 if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
1820 == QDF_STATUS_SUCCESS) {
1821 continue;
1822 }
1823 }
Venkata Sharath Chandra Manchalae37bc462018-02-22 22:03:27 -08001824
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301825 cookie_msb = *(msg_word + 2);
1826 pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
1827 pdev = soc->pdev_list[pdev_id];
1828
1829 if (cookie_msb >> 2) {
1830 copy_stats = true;
1831 }
Venkata Sharath Chandra Manchalae37bc462018-02-22 22:03:27 -08001832
Ishank Jain6290a3c2017-03-21 10:49:39 +05301833 /* read 5th word */
1834 msg_word = msg_word + 4;
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301835 msg_remain_len = qdf_min(htt_stats->msg_len,
1836 (uint32_t) DP_EXT_MSG_LENGTH);
Ishank Jain6290a3c2017-03-21 10:49:39 +05301837 /* Keep processing the node till node length is 0 */
1838 while (msg_remain_len) {
1839 /*
1840 * if message is not a continuation of previous message
1841 * read the tlv type and tlv length
1842 */
1843 if (!tlv_buf_head) {
1844 tlv_type = HTT_STATS_TLV_TAG_GET(
1845 *msg_word);
1846 tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
1847 *msg_word);
1848 }
1849
1850 if (tlv_remain_len == 0) {
1851 msg_remain_len = 0;
1852
1853 if (tlv_buf_head) {
1854 qdf_mem_free(tlv_buf_head);
1855 tlv_buf_head = NULL;
1856 tlv_buf_tail = NULL;
1857 }
1858
1859 goto error;
1860 }
1861
chenguo26495542017-12-14 21:56:46 +08001862 if (!tlv_buf_head)
1863 tlv_remain_len += HTT_TLV_HDR_LEN;
Ishank Jain6290a3c2017-03-21 10:49:39 +05301864
1865 if ((tlv_remain_len <= msg_remain_len)) {
1866 /* Case 3 */
1867 if (tlv_buf_head) {
1868 qdf_mem_copy(tlv_buf_tail,
1869 (uint8_t *)msg_word,
1870 tlv_remain_len);
1871 tlv_start = (uint32_t *)tlv_buf_head;
1872 } else {
1873 /* Case 1 */
1874 tlv_start = msg_word;
1875 }
1876
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301877 if (copy_stats)
nobelj14531642019-06-25 17:41:55 -07001878 dp_htt_stats_copy_tag(pdev,
1879 tlv_type,
1880 tlv_start);
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05301881 else
nobelj14531642019-06-25 17:41:55 -07001882 dp_htt_stats_print_tag(pdev,
1883 tlv_type,
1884 tlv_start);
Ishank Jain6290a3c2017-03-21 10:49:39 +05301885
Amir Patel1ea85d42019-01-09 15:19:10 +05301886 if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
1887 tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
1888 dp_peer_update_inactive_time(pdev,
1889 tlv_type,
1890 tlv_start);
1891
Ishank Jain6290a3c2017-03-21 10:49:39 +05301892 msg_remain_len -= tlv_remain_len;
1893
1894 msg_word = (uint32_t *)
1895 (((uint8_t *)msg_word) +
1896 tlv_remain_len);
1897
1898 tlv_remain_len = 0;
1899
1900 if (tlv_buf_head) {
1901 qdf_mem_free(tlv_buf_head);
1902 tlv_buf_head = NULL;
1903 tlv_buf_tail = NULL;
1904 }
1905
1906 } else { /* tlv_remain_len > msg_remain_len */
1907 /* Case 2 & 3 */
1908 if (!tlv_buf_head) {
1909 tlv_buf_head = qdf_mem_malloc(
1910 tlv_remain_len);
1911
1912 if (!tlv_buf_head) {
1913 QDF_TRACE(QDF_MODULE_ID_TXRX,
1914 QDF_TRACE_LEVEL_ERROR,
1915 "Alloc failed");
1916 goto error;
1917 }
1918
1919 tlv_buf_tail = tlv_buf_head;
1920 }
1921
1922 qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
1923 msg_remain_len);
1924 tlv_remain_len -= msg_remain_len;
1925 tlv_buf_tail += msg_remain_len;
Ishank Jain6290a3c2017-03-21 10:49:39 +05301926 }
1927 }
1928
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301929 if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
1930 htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
Ishank Jain6290a3c2017-03-21 10:49:39 +05301931 }
1932
1933 qdf_nbuf_free(htt_msg);
1934 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05301935 return;
1936
1937error:
1938 qdf_nbuf_free(htt_msg);
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301939 while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
Ishank Jain6290a3c2017-03-21 10:49:39 +05301940 != NULL)
1941 qdf_nbuf_free(htt_msg);
1942}
1943
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05301944void htt_t2h_stats_handler(void *context)
1945{
1946 struct dp_soc *soc = (struct dp_soc *)context;
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301947 struct htt_stats_context htt_stats;
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301948 uint32_t *msg_word;
1949 qdf_nbuf_t htt_msg = NULL;
1950 uint8_t done;
Ankit Kumarf3557ff2019-10-12 15:47:23 +05301951 uint32_t rem_stats;
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05301952
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301953 if (!soc || !qdf_atomic_read(&soc->cmn_init_done)) {
1954 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Jeff Johnson3f217e22017-09-18 10:13:35 -07001955 "soc: 0x%pK, init_done: %d", soc,
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301956 qdf_atomic_read(&soc->cmn_init_done));
1957 return;
1958 }
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05301959
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301960 qdf_mem_zero(&htt_stats, sizeof(htt_stats));
1961 qdf_nbuf_queue_init(&htt_stats.msg);
1962
1963 /* pull one completed stats from soc->htt_stats_msg and process */
1964 qdf_spin_lock_bh(&soc->htt_stats.lock);
1965 if (!soc->htt_stats.num_stats) {
1966 qdf_spin_unlock_bh(&soc->htt_stats.lock);
1967 return;
1968 }
1969 while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
1970 msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
1971 msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301972 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
1973 qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
1974 /*
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301975 * Done bit signifies that this is the last T2H buffer in the
1976 * stream of HTT EXT STATS message
1977 */
1978 if (done)
1979 break;
1980 }
1981 rem_stats = --soc->htt_stats.num_stats;
1982 qdf_spin_unlock_bh(&soc->htt_stats.lock);
1983
Ankit Kumarf3557ff2019-10-12 15:47:23 +05301984 /* If there are more stats to process, schedule stats work again.
1985 * Scheduling prior to processing ht_stats to queue with early
1986 * index
1987 */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05301988 if (rem_stats)
1989 qdf_sched_work(0, &soc->htt_stats.work);
Ankit Kumarf3557ff2019-10-12 15:47:23 +05301990
1991 dp_process_htt_stat_msg(&htt_stats, soc);
Om Prakash Tripathi2cd7fab2017-07-07 20:27:25 +05301992}
1993
Soumya Bhat539ecfa2017-09-08 12:50:30 +05301994/*
Anish Natarajb9e7d012018-02-16 00:38:10 +05301995 * dp_get_ppdu_info_user_index: Find and allocate a per-user descriptor for a PPDU,
1996 * if a new peer id arrives in a PPDU
Soumya Bhat539ecfa2017-09-08 12:50:30 +05301997 * pdev: DP pdev handle
Anish Natarajb9e7d012018-02-16 00:38:10 +05301998 * @peer_id : peer unique identifier
1999 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302000 *
2001 * return:user index to be populated
2002 */
2003#ifdef FEATURE_PERPKT_INFO
2004static uint8_t dp_get_ppdu_info_user_index(struct dp_pdev *pdev,
Anish Natarajb9e7d012018-02-16 00:38:10 +05302005 uint16_t peer_id,
2006 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302007{
2008 uint8_t user_index = 0;
2009 struct cdp_tx_completion_ppdu *ppdu_desc;
2010 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2011
Anish Natarajb9e7d012018-02-16 00:38:10 +05302012 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302013
Anish Natarajb9e7d012018-02-16 00:38:10 +05302014 while ((user_index + 1) <= ppdu_info->last_user) {
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302015 ppdu_user_desc = &ppdu_desc->user[user_index];
2016 if (ppdu_user_desc->peer_id != peer_id) {
2017 user_index++;
2018 continue;
2019 } else {
Soumya Bhat835033e2017-10-04 22:21:46 +05302020 /* Max users possible is 8 so user array index should
2021 * not exceed 7
2022 */
2023 qdf_assert_always(user_index <= CDP_MU_MAX_USER_INDEX);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302024 return user_index;
2025 }
2026 }
2027
Anish Natarajb9e7d012018-02-16 00:38:10 +05302028 ppdu_info->last_user++;
Soumya Bhat835033e2017-10-04 22:21:46 +05302029 /* Max users possible is 8 so last user should not exceed 8 */
Anish Natarajb9e7d012018-02-16 00:38:10 +05302030 qdf_assert_always(ppdu_info->last_user <= CDP_MU_MAX_USERS);
2031 return ppdu_info->last_user - 1;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302032}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302033
2034/*
2035 * dp_process_ppdu_stats_common_tlv: Process htt_ppdu_stats_common_tlv
2036 * pdev: DP pdev handle
2037 * @tag_buf: buffer containing the tlv htt_ppdu_stats_common_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302038 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302039 *
2040 * return:void
2041 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302042static void dp_process_ppdu_stats_common_tlv(struct dp_pdev *pdev,
Anish Natarajb9e7d012018-02-16 00:38:10 +05302043 uint32_t *tag_buf, struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302044{
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302045 uint16_t frame_type;
nobelj57055e52019-07-11 00:38:49 -07002046 uint16_t frame_ctrl;
Venkateswara Swamy Bandaru2907bc52017-11-15 19:04:49 +05302047 uint16_t freq;
2048 struct dp_soc *soc = NULL;
Anish Natarajb9e7d012018-02-16 00:38:10 +05302049 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
nobelj182938a2019-11-25 14:09:08 -08002050 uint64_t ppdu_start_timestamp;
2051 uint32_t *start_tag_buf;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302052
nobelj182938a2019-11-25 14:09:08 -08002053 start_tag_buf = tag_buf;
Anish Natarajb9e7d012018-02-16 00:38:10 +05302054 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302055
nobelj182938a2019-11-25 14:09:08 -08002056 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(RING_ID_SCH_CMD_ID);
nobeljdebe2b32019-04-23 11:18:47 -07002057 ppdu_info->sched_cmdid =
2058 HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302059 ppdu_desc->num_users =
2060 HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(*tag_buf);
nobelj182938a2019-11-25 14:09:08 -08002061
2062 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(QTYPE_FRM_TYPE);
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302063 frame_type = HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(*tag_buf);
nobelj68930ca2019-10-03 17:22:47 -07002064 ppdu_desc->htt_frame_type = frame_type;
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302065
nobelj57055e52019-07-11 00:38:49 -07002066 frame_ctrl = ppdu_desc->frame_ctrl;
2067
nobeljdebe2b32019-04-23 11:18:47 -07002068 switch (frame_type) {
2069 case HTT_STATS_FTYPE_TIDQ_DATA_SU:
2070 case HTT_STATS_FTYPE_TIDQ_DATA_MU:
nobelj219e7e52019-10-18 13:53:12 -07002071 case HTT_STATS_FTYPE_SGEN_QOS_NULL:
nobeljdebe2b32019-04-23 11:18:47 -07002072 /*
2073 * for management packet, frame type come as DATA_SU
2074 * need to check frame_ctrl before setting frame_type
2075 */
nobelj57055e52019-07-11 00:38:49 -07002076 if (HTT_GET_FRAME_CTRL_TYPE(frame_ctrl) <= FRAME_CTRL_TYPE_CTRL)
nobeljdebe2b32019-04-23 11:18:47 -07002077 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
2078 else
2079 ppdu_desc->frame_type = CDP_PPDU_FTYPE_DATA;
2080 break;
2081 case HTT_STATS_FTYPE_SGEN_MU_BAR:
2082 case HTT_STATS_FTYPE_SGEN_BAR:
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302083 ppdu_desc->frame_type = CDP_PPDU_FTYPE_BAR;
nobelj68930ca2019-10-03 17:22:47 -07002084 ppdu_desc->bar_ppdu_id = ppdu_info->ppdu_id;
nobeljdebe2b32019-04-23 11:18:47 -07002085 break;
2086 default:
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302087 ppdu_desc->frame_type = CDP_PPDU_FTYPE_CTRL;
nobeljdebe2b32019-04-23 11:18:47 -07002088 break;
2089 }
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +05302090
nobelj182938a2019-11-25 14:09:08 -08002091 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(FES_DUR_US);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302092 ppdu_desc->tx_duration = *tag_buf;
nobelj182938a2019-11-25 14:09:08 -08002093
2094 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_L32_US);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302095 ppdu_desc->ppdu_start_timestamp = *tag_buf;
nobelj8c07d612018-03-01 12:18:07 -08002096
nobelj182938a2019-11-25 14:09:08 -08002097 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(CHAN_MHZ_PHY_MODE);
Venkateswara Swamy Bandaru2907bc52017-11-15 19:04:49 +05302098 freq = HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(*tag_buf);
2099 if (freq != ppdu_desc->channel) {
2100 soc = pdev->soc;
2101 ppdu_desc->channel = freq;
2102 if (soc && soc->cdp_soc.ol_ops->freq_to_channel)
2103 pdev->operating_channel =
Pavankumar Nandeshwar4c7b81b2019-09-27 11:27:12 +05302104 soc->cdp_soc.ol_ops->freq_to_channel(soc->ctrl_psoc,
2105 pdev->pdev_id, freq);
Venkateswara Swamy Bandaru2907bc52017-11-15 19:04:49 +05302106 }
Pranita Solankea12b4b32017-11-20 23:04:14 +05302107
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302108 ppdu_desc->phy_mode = HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(*tag_buf);
nobelj4771edb2019-12-09 12:47:41 -08002109
2110 dp_tx_capture_htt_frame_counter(pdev, frame_type);
nobelj182938a2019-11-25 14:09:08 -08002111
2112 tag_buf = start_tag_buf + HTT_GET_STATS_CMN_INDEX(START_TSTMP_U32_US);
2113 ppdu_start_timestamp = *tag_buf;
2114 ppdu_desc->ppdu_start_timestamp |= ((ppdu_start_timestamp <<
2115 HTT_SHIFT_UPPER_TIMESTAMP) &
2116 HTT_MASK_UPPER_TIMESTAMP);
2117
2118 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2119 ppdu_desc->tx_duration;
2120 /* Ack time stamp is same as end time stamp*/
2121 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
2122
2123 ppdu_desc->ppdu_end_timestamp = ppdu_desc->ppdu_start_timestamp +
2124 ppdu_desc->tx_duration;
2125
2126 ppdu_desc->bar_ppdu_start_timestamp = ppdu_desc->ppdu_start_timestamp;
2127 ppdu_desc->bar_ppdu_end_timestamp = ppdu_desc->ppdu_end_timestamp;
2128 ppdu_desc->bar_tx_duration = ppdu_desc->tx_duration;
2129
2130 /* Ack time stamp is same as end time stamp*/
2131 ppdu_desc->ack_timestamp = ppdu_desc->ppdu_end_timestamp;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302132}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302133
2134/*
2135 * dp_process_ppdu_stats_user_common_tlv: Process ppdu_stats_user_common
2136 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_common_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302137 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302138 *
2139 * return:void
2140 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302141static void dp_process_ppdu_stats_user_common_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302142 struct dp_pdev *pdev, uint32_t *tag_buf,
2143 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302144{
Soumya Bhat835033e2017-10-04 22:21:46 +05302145 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302146 struct cdp_tx_completion_ppdu *ppdu_desc;
2147 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2148 uint8_t curr_user_index = 0;
nobelj68930ca2019-10-03 17:22:47 -07002149 struct dp_peer *peer;
2150 struct dp_vdev *vdev;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302151
Anish Nataraj37b64952018-08-03 22:11:13 +05302152 ppdu_desc =
2153 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302154
2155 tag_buf++;
Anish Nataraj37b64952018-08-03 22:11:13 +05302156 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302157
Anish Nataraj37b64952018-08-03 22:11:13 +05302158 curr_user_index =
2159 dp_get_ppdu_info_user_index(pdev,
2160 peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302161 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2162
Anish Nataraj37b64952018-08-03 22:11:13 +05302163 if (peer_id == DP_SCAN_PEER_ID) {
2164 ppdu_desc->vdev_id =
2165 HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(*tag_buf);
nobelj68930ca2019-10-03 17:22:47 -07002166 vdev =
2167 dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2168 ppdu_desc->vdev_id);
2169 if (!vdev)
Anish Nataraj37b64952018-08-03 22:11:13 +05302170 return;
nobelj68930ca2019-10-03 17:22:47 -07002171 qdf_mem_copy(ppdu_user_desc->mac_addr, vdev->mac_addr.raw,
2172 QDF_MAC_ADDR_SIZE);
2173 } else {
2174 peer = dp_peer_find_by_id(pdev->soc, peer_id);
2175 if (!peer)
2176 return;
2177 qdf_mem_copy(ppdu_user_desc->mac_addr,
2178 peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
2179 dp_peer_unref_del_find_by_id(peer);
Anish Nataraj37b64952018-08-03 22:11:13 +05302180 }
2181
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302182 ppdu_user_desc->peer_id = peer_id;
2183
2184 tag_buf++;
2185
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302186 if (HTT_PPDU_STATS_USER_COMMON_TLV_DELAYED_BA_GET(*tag_buf)) {
2187 ppdu_user_desc->delayed_ba = 1;
nobelj7b0e2732019-05-31 00:19:07 -07002188 ppdu_desc->delayed_ba = 1;
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302189 }
2190
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302191 if (HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(*tag_buf)) {
2192 ppdu_user_desc->is_mcast = true;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302193 ppdu_user_desc->mpdu_tried_mcast =
2194 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
Anish Nataraj0dae6762018-03-02 22:31:45 +05302195 ppdu_user_desc->num_mpdu = ppdu_user_desc->mpdu_tried_mcast;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302196 } else {
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302197 ppdu_user_desc->mpdu_tried_ucast =
2198 HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(*tag_buf);
Soumya Bhat606fb392017-10-27 12:42:45 +05302199 }
2200
2201 tag_buf++;
2202
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302203 ppdu_user_desc->qos_ctrl =
2204 HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(*tag_buf);
2205 ppdu_user_desc->frame_ctrl =
2206 HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(*tag_buf);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302207 ppdu_desc->frame_ctrl = ppdu_user_desc->frame_ctrl;
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302208
nobelj7b0e2732019-05-31 00:19:07 -07002209 if (ppdu_user_desc->delayed_ba)
Chaithanya Garrepallibe9d5fc2018-07-26 19:21:24 +05302210 ppdu_user_desc->mpdu_success = 0;
Varsha Mishra27c5bd32019-05-28 11:54:46 +05302211
2212 tag_buf += 3;
2213
2214 if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
2215 ppdu_user_desc->ppdu_cookie =
2216 HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
2217 ppdu_user_desc->is_ppdu_cookie_valid = 1;
2218 }
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302219}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302220
2221
Ishank Jain6290a3c2017-03-21 10:49:39 +05302222/**
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302223 * dp_process_ppdu_stats_user_rate_tlv() - Process htt_ppdu_stats_user_rate_tlv
2224 * @pdev: DP pdev handle
2225 * @tag_buf: T2H message buffer carrying the user rate TLV
Anish Natarajb9e7d012018-02-16 00:38:10 +05302226 * @ppdu_info: per ppdu tlv structure
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302227 *
2228 * return:void
2229 */
2230static void dp_process_ppdu_stats_user_rate_tlv(struct dp_pdev *pdev,
Anish Natarajb9e7d012018-02-16 00:38:10 +05302231 uint32_t *tag_buf,
2232 struct ppdu_info *ppdu_info)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302233{
Soumya Bhat835033e2017-10-04 22:21:46 +05302234 uint16_t peer_id;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302235 struct dp_peer *peer;
2236 struct cdp_tx_completion_ppdu *ppdu_desc;
2237 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302238 uint8_t curr_user_index = 0;
Anish Nataraj37b64952018-08-03 22:11:13 +05302239 struct dp_vdev *vdev;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302240
Anish Natarajb9e7d012018-02-16 00:38:10 +05302241 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302242
2243 tag_buf++;
2244 peer_id = HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(*tag_buf);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302245
Anish Nataraj37b64952018-08-03 22:11:13 +05302246 curr_user_index =
2247 dp_get_ppdu_info_user_index(pdev,
2248 peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302249 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Anish Nataraj37b64952018-08-03 22:11:13 +05302250 if (peer_id == DP_SCAN_PEER_ID) {
2251 vdev =
2252 dp_get_vdev_from_soc_vdev_id_wifi3(pdev->soc,
2253 ppdu_desc->vdev_id);
Chaithanya Garrepalli3ba616a2018-10-05 19:11:00 +05302254 if (!vdev)
2255 return;
Anish Nataraj37b64952018-08-03 22:11:13 +05302256 } else {
2257 peer = dp_peer_find_by_id(pdev->soc, peer_id);
2258 if (!peer)
2259 return;
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302260 dp_peer_unref_del_find_by_id(peer);
Anish Nataraj37b64952018-08-03 22:11:13 +05302261 }
2262
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302263 ppdu_user_desc->peer_id = peer_id;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302264
2265 ppdu_user_desc->tid =
2266 HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(*tag_buf);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302267
nobelj4e9d51f2018-08-07 19:36:47 -07002268 tag_buf += 1;
Soumya Bhat28541112017-11-22 16:58:29 +05302269
nobelj4e9d51f2018-08-07 19:36:47 -07002270 ppdu_user_desc->user_pos =
2271 HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(*tag_buf);
2272 ppdu_user_desc->mu_group_id =
2273 HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(*tag_buf);
2274
2275 tag_buf += 1;
2276
2277 ppdu_user_desc->ru_start =
2278 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf);
Anish Nataraj37b64952018-08-03 22:11:13 +05302279 ppdu_user_desc->ru_tones =
2280 (HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(*tag_buf) -
2281 HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(*tag_buf)) + 1;
Soumya Bhat28541112017-11-22 16:58:29 +05302282
2283 tag_buf += 2;
Soumya Bhat606fb392017-10-27 12:42:45 +05302284
2285 ppdu_user_desc->ppdu_type =
2286 HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(*tag_buf);
2287
2288 tag_buf++;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302289 ppdu_user_desc->tx_rate = *tag_buf;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302290
2291 ppdu_user_desc->ltf_size =
2292 HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(*tag_buf);
2293 ppdu_user_desc->stbc =
2294 HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(*tag_buf);
2295 ppdu_user_desc->he_re =
2296 HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(*tag_buf);
2297 ppdu_user_desc->txbf =
2298 HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(*tag_buf);
2299 ppdu_user_desc->bw =
Keyur Parekhd005ca22018-06-26 11:26:03 -07002300 HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(*tag_buf) - 2;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302301 ppdu_user_desc->nss = HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(*tag_buf);
2302 ppdu_user_desc->mcs = HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(*tag_buf);
2303 ppdu_user_desc->preamble =
2304 HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(*tag_buf);
2305 ppdu_user_desc->gi = HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(*tag_buf);
2306 ppdu_user_desc->dcm = HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(*tag_buf);
2307 ppdu_user_desc->ldpc = HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(*tag_buf);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302308}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302309
2310/*
2311 * dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv: Process
2312 * htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302313 * pdev: DP PDEV handle
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302314 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_64_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302315 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302316 *
2317 * return:void
2318 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302319static void dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302320 struct dp_pdev *pdev, uint32_t *tag_buf,
2321 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302322{
2323 htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *dp_stats_buf =
2324 (htt_ppdu_stats_enq_mpdu_bitmap_64_tlv *)tag_buf;
2325
2326 struct cdp_tx_completion_ppdu *ppdu_desc;
2327 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2328 uint8_t curr_user_index = 0;
Soumya Bhat835033e2017-10-04 22:21:46 +05302329 uint16_t peer_id;
nobeljdebe2b32019-04-23 11:18:47 -07002330 uint32_t size = CDP_BA_64_BIT_MAP_SIZE_DWORDS;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302331
Anish Natarajb9e7d012018-02-16 00:38:10 +05302332 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302333
2334 tag_buf++;
2335
2336 peer_id =
Soumya Bhat835033e2017-10-04 22:21:46 +05302337 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302338
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302339 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302340 return;
2341
Anish Natarajb9e7d012018-02-16 00:38:10 +05302342 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302343
2344 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302345 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302346
2347 ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2348 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
nobeljdebe2b32019-04-23 11:18:47 -07002349 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
2350
2351 dp_process_ppdu_stats_update_failed_bitmap(pdev,
2352 (void *)ppdu_user_desc,
2353 ppdu_info->ppdu_id,
2354 size);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302355}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302356
2357/*
2358 * dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv: Process
2359 * htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
2360 * soc: DP SOC handle
2361 * @tag_buf: buffer containing the tlv htt_ppdu_stats_enq_mpdu_bitmap_256_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302362 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302363 *
2364 * return:void
2365 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302366static void dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302367 struct dp_pdev *pdev, uint32_t *tag_buf,
2368 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302369{
2370 htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *dp_stats_buf =
2371 (htt_ppdu_stats_enq_mpdu_bitmap_256_tlv *)tag_buf;
2372
2373 struct cdp_tx_completion_ppdu *ppdu_desc;
2374 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2375 uint8_t curr_user_index = 0;
Soumya Bhat835033e2017-10-04 22:21:46 +05302376 uint16_t peer_id;
nobeljdebe2b32019-04-23 11:18:47 -07002377 uint32_t size = CDP_BA_256_BIT_MAP_SIZE_DWORDS;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302378
Anish Natarajb9e7d012018-02-16 00:38:10 +05302379 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302380
2381 tag_buf++;
2382
2383 peer_id =
Soumya Bhat835033e2017-10-04 22:21:46 +05302384 HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302385
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302386 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302387 return;
2388
Anish Natarajb9e7d012018-02-16 00:38:10 +05302389 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302390
2391 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302392 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302393
2394 ppdu_user_desc->start_seq = dp_stats_buf->start_seq;
2395 qdf_mem_copy(&ppdu_user_desc->enq_bitmap, &dp_stats_buf->enq_bitmap,
nobeljdebe2b32019-04-23 11:18:47 -07002396 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
2397
2398 dp_process_ppdu_stats_update_failed_bitmap(pdev,
2399 (void *)ppdu_user_desc,
2400 ppdu_info->ppdu_id,
2401 size);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302402}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302403
2404/*
2405 * dp_process_ppdu_stats_user_cmpltn_common_tlv: Process
2406 * htt_ppdu_stats_user_cmpltn_common_tlv
2407 * soc: DP SOC handle
2408 * @tag_buf: buffer containing the tlv htt_ppdu_stats_user_cmpltn_common_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302409 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302410 *
2411 * return:void
2412 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302413static void dp_process_ppdu_stats_user_cmpltn_common_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302414 struct dp_pdev *pdev, uint32_t *tag_buf,
2415 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302416{
Soumya Bhat835033e2017-10-04 22:21:46 +05302417 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302418 struct cdp_tx_completion_ppdu *ppdu_desc;
2419 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2420 uint8_t curr_user_index = 0;
Ankit Kumarcd66fff2019-07-02 20:54:44 +05302421 uint8_t bw_iter;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302422 htt_ppdu_stats_user_cmpltn_common_tlv *dp_stats_buf =
2423 (htt_ppdu_stats_user_cmpltn_common_tlv *)tag_buf;
2424
Anish Natarajb9e7d012018-02-16 00:38:10 +05302425 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302426
2427 tag_buf++;
2428 peer_id =
2429 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302430
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302431 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302432 return;
2433
Anish Natarajb9e7d012018-02-16 00:38:10 +05302434 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302435 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2436 ppdu_user_desc->peer_id = peer_id;
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05302437 ppdu_desc->last_usr_index = curr_user_index;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302438
2439 ppdu_user_desc->completion_status =
2440 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(
2441 *tag_buf);
2442
2443 ppdu_user_desc->tid =
2444 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(*tag_buf);
2445
2446
2447 tag_buf++;
Viyom Mittal27fe1232018-11-15 12:24:50 +05302448 if (qdf_likely(ppdu_user_desc->completion_status ==
2449 HTT_PPDU_STATS_USER_STATUS_OK)) {
Anish Nataraj7235d9b2018-08-20 13:10:25 +05302450 ppdu_desc->ack_rssi = dp_stats_buf->ack_rssi;
2451 ppdu_user_desc->ack_rssi_valid = 1;
2452 } else {
2453 ppdu_user_desc->ack_rssi_valid = 0;
2454 }
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302455
2456 tag_buf++;
2457
2458 ppdu_user_desc->mpdu_success =
2459 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(*tag_buf);
2460
phadimanf09509b2019-07-09 14:58:38 +05302461 ppdu_user_desc->mpdu_failed =
2462 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(*tag_buf) -
2463 ppdu_user_desc->mpdu_success;
2464
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302465 tag_buf++;
2466
2467 ppdu_user_desc->long_retries =
2468 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(*tag_buf);
2469
2470 ppdu_user_desc->short_retries =
2471 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(*tag_buf);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302472 ppdu_user_desc->retry_msdus =
2473 ppdu_user_desc->long_retries + ppdu_user_desc->short_retries;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302474
2475 ppdu_user_desc->is_ampdu =
2476 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(*tag_buf);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302477 ppdu_info->is_ampdu = ppdu_user_desc->is_ampdu;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302478
nobelj68930ca2019-10-03 17:22:47 -07002479 ppdu_desc->resp_type =
2480 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(*tag_buf);
2481 ppdu_desc->mprot_type =
2482 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPROT_TYPE_GET(*tag_buf);
2483 ppdu_desc->rts_success =
2484 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_SUCCESS_GET(*tag_buf);
2485 ppdu_desc->rts_failure =
2486 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RTS_FAILURE_GET(*tag_buf);
2487
nobelj7b0e2732019-05-31 00:19:07 -07002488 /*
2489 * increase successful mpdu counter from
2490 * htt_ppdu_stats_user_cmpltn_common_tlv
2491 */
2492 ppdu_info->mpdu_compltn_common_tlv += ppdu_user_desc->mpdu_success;
2493
2494 /*
2495 * MU BAR may send request to n users but we may received ack only from
2496 * m users. To have count of number of users respond back, we have a
2497 * separate counter bar_num_users per PPDU that get increment for every
2498 * htt_ppdu_stats_user_cmpltn_common_tlv
2499 */
2500 ppdu_desc->bar_num_users++;
Ankit Kumarcd66fff2019-07-02 20:54:44 +05302501
2502 tag_buf++;
2503 for (bw_iter = 0; bw_iter < CDP_RSSI_CHAIN_LEN; bw_iter++) {
2504 ppdu_user_desc->rssi_chain[bw_iter] =
2505 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CHAIN_RSSI_GET(*tag_buf);
2506 tag_buf++;
2507 }
2508
2509 ppdu_user_desc->sa_tx_antenna =
2510 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TX_ANTENNA_MASK_GET(*tag_buf);
2511
2512 tag_buf++;
2513 ppdu_user_desc->sa_is_training =
2514 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_TRAINING_GET(*tag_buf);
2515 if (ppdu_user_desc->sa_is_training) {
2516 ppdu_user_desc->sa_goodput =
2517 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_PENDING_TRAINING_PKTS_GET(*tag_buf);
2518 }
2519
2520 tag_buf++;
2521 for (bw_iter = 0; bw_iter < CDP_NUM_SA_BW; bw_iter++) {
2522 ppdu_user_desc->sa_max_rates[bw_iter] =
2523 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MAX_RATES_GET(tag_buf[bw_iter]);
2524 }
2525
2526 tag_buf += CDP_NUM_SA_BW;
2527 ppdu_user_desc->current_rate_per =
2528 HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_CURRENT_RATE_PER_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302529}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302530
2531/*
2532 * dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv: Process
2533 * htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302534 * pdev: DP PDEV handle
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302535 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302536 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302537 *
2538 * return:void
2539 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302540static void dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302541 struct dp_pdev *pdev, uint32_t *tag_buf,
2542 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302543{
2544 htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *dp_stats_buf =
2545 (htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv *)tag_buf;
2546 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2547 struct cdp_tx_completion_ppdu *ppdu_desc;
2548 uint8_t curr_user_index = 0;
Soumya Bhat835033e2017-10-04 22:21:46 +05302549 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302550
Anish Natarajb9e7d012018-02-16 00:38:10 +05302551 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302552
2553 tag_buf++;
2554
2555 peer_id =
2556 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2557
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302558 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302559 return;
2560
Anish Natarajb9e7d012018-02-16 00:38:10 +05302561 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302562
2563 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302564 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302565
2566 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2567 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
nobeljdebe2b32019-04-23 11:18:47 -07002568 sizeof(uint32_t) * CDP_BA_64_BIT_MAP_SIZE_DWORDS);
Karunakar Dasineni63429332019-10-15 18:49:33 -07002569 ppdu_user_desc->ba_size = CDP_BA_64_BIT_MAP_SIZE_DWORDS * 32;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302570}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302571
2572/*
2573 * dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv: Process
2574 * htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302575 * pdev: DP PDEV handle
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302576 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302577 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302578 *
2579 * return:void
2580 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302581static void dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302582 struct dp_pdev *pdev, uint32_t *tag_buf,
2583 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302584{
2585 htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *dp_stats_buf =
2586 (htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv *)tag_buf;
2587 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2588 struct cdp_tx_completion_ppdu *ppdu_desc;
2589 uint8_t curr_user_index = 0;
Soumya Bhat835033e2017-10-04 22:21:46 +05302590 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302591
Anish Natarajb9e7d012018-02-16 00:38:10 +05302592 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302593
2594 tag_buf++;
2595
2596 peer_id =
2597 HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(*tag_buf);
2598
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302599 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302600 return;
2601
Anish Natarajb9e7d012018-02-16 00:38:10 +05302602 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302603
2604 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302605 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302606
2607 ppdu_user_desc->ba_seq_no = dp_stats_buf->ba_seq_no;
2608 qdf_mem_copy(&ppdu_user_desc->ba_bitmap, &dp_stats_buf->ba_bitmap,
nobeljdebe2b32019-04-23 11:18:47 -07002609 sizeof(uint32_t) * CDP_BA_256_BIT_MAP_SIZE_DWORDS);
Karunakar Dasineni63429332019-10-15 18:49:33 -07002610 ppdu_user_desc->ba_size = CDP_BA_256_BIT_MAP_SIZE_DWORDS * 32;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302611}
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302612
2613/*
2614 * dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv: Process
2615 * htt_ppdu_stats_user_compltn_ack_ba_status_tlv
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302616 * pdev: DP PDE handle
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302617 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302618 * @ppdu_info: per ppdu tlv structure
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302619 *
2620 * return:void
2621 */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302622static void dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
Anish Natarajb9e7d012018-02-16 00:38:10 +05302623 struct dp_pdev *pdev, uint32_t *tag_buf,
2624 struct ppdu_info *ppdu_info)
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302625{
Soumya Bhat835033e2017-10-04 22:21:46 +05302626 uint16_t peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302627 struct cdp_tx_completion_ppdu *ppdu_desc;
2628 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2629 uint8_t curr_user_index = 0;
2630
Anish Natarajb9e7d012018-02-16 00:38:10 +05302631 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302632
2633 tag_buf += 2;
2634 peer_id =
2635 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(*tag_buf);
2636
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302637 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id))
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302638 return;
2639
Anish Natarajb9e7d012018-02-16 00:38:10 +05302640 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302641
2642 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
Soumya Bhat835033e2017-10-04 22:21:46 +05302643 ppdu_user_desc->peer_id = peer_id;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302644
Soumya Bhat606fb392017-10-27 12:42:45 +05302645 tag_buf++;
nobeljc5cb3bf2019-11-19 14:47:14 -08002646 /* not to update ppdu_desc->tid from this TLV */
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302647 ppdu_user_desc->num_mpdu =
Anish Natarajb9e7d012018-02-16 00:38:10 +05302648 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(*tag_buf);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302649
2650 ppdu_user_desc->num_msdu =
Anish Natarajb9e7d012018-02-16 00:38:10 +05302651 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(*tag_buf);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302652
2653 ppdu_user_desc->success_msdus = ppdu_user_desc->num_msdu;
2654
Karunakar Dasinenifc8fac52019-09-27 15:15:45 -07002655 tag_buf++;
2656 ppdu_user_desc->start_seq =
2657 HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(
2658 *tag_buf);
2659
2660 tag_buf++;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302661 ppdu_user_desc->success_bytes = *tag_buf;
2662
nobelj7b0e2732019-05-31 00:19:07 -07002663 /* increase successful mpdu counter */
2664 ppdu_info->mpdu_ack_ba_tlv += ppdu_user_desc->num_mpdu;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302665}
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302666
2667/*
2668 * dp_process_ppdu_stats_user_common_array_tlv: Process
2669 * htt_ppdu_stats_user_common_array_tlv
2670 * pdev: DP PDEV handle
2671 * @tag_buf: buffer containing the htt_ppdu_stats_user_compltn_ack_ba_status_tlv
Anish Natarajb9e7d012018-02-16 00:38:10 +05302672 * @ppdu_info: per ppdu tlv structure
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302673 *
2674 * return:void
2675 */
Anish Natarajb9e7d012018-02-16 00:38:10 +05302676static void dp_process_ppdu_stats_user_common_array_tlv(
2677 struct dp_pdev *pdev, uint32_t *tag_buf,
2678 struct ppdu_info *ppdu_info)
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302679{
2680 uint32_t peer_id;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302681 struct cdp_tx_completion_ppdu *ppdu_desc;
2682 struct cdp_tx_completion_ppdu_user *ppdu_user_desc;
2683 uint8_t curr_user_index = 0;
2684 struct htt_tx_ppdu_stats_info *dp_stats_buf;
2685
Anish Natarajb9e7d012018-02-16 00:38:10 +05302686 ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302687
Pranita Solankea12b4b32017-11-20 23:04:14 +05302688 tag_buf++;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302689 dp_stats_buf = (struct htt_tx_ppdu_stats_info *)tag_buf;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302690 tag_buf += 3;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302691 peer_id =
2692 HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(*tag_buf);
2693
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302694 if (!dp_peer_find_by_id_valid(pdev->soc, peer_id)) {
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302695 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2696 "Invalid peer");
2697 return;
2698 }
2699
Anish Natarajb9e7d012018-02-16 00:38:10 +05302700 curr_user_index = dp_get_ppdu_info_user_index(pdev, peer_id, ppdu_info);
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302701
2702 ppdu_user_desc = &ppdu_desc->user[curr_user_index];
2703
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302704 ppdu_user_desc->retry_bytes = dp_stats_buf->tx_retry_bytes;
2705 ppdu_user_desc->failed_bytes = dp_stats_buf->tx_failed_bytes;
2706
2707 tag_buf++;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302708
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302709 ppdu_user_desc->success_msdus =
2710 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(*tag_buf);
2711 ppdu_user_desc->retry_bytes =
2712 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(*tag_buf);
2713 tag_buf++;
2714 ppdu_user_desc->failed_msdus =
2715 HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(*tag_buf);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302716}
2717
2718/*
2719 * dp_process_ppdu_stats_flush_tlv: Process
2720 * htt_ppdu_stats_flush_tlv
2721 * @pdev: DP PDEV handle
2722 * @tag_buf: buffer containing the htt_ppdu_stats_flush_tlv
nobelj0e007762019-12-13 12:59:14 -08002723 * @ppdu_info: per ppdu tlv structure
Pranita Solankea12b4b32017-11-20 23:04:14 +05302724 *
2725 * return:void
2726 */
nobelj0e007762019-12-13 12:59:14 -08002727static void
2728dp_process_ppdu_stats_user_compltn_flush_tlv(struct dp_pdev *pdev,
2729 uint32_t *tag_buf,
2730 struct ppdu_info *ppdu_info)
Pranita Solankea12b4b32017-11-20 23:04:14 +05302731{
nobelj0e007762019-12-13 12:59:14 -08002732 struct cdp_tx_completion_ppdu *ppdu_desc;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302733 uint32_t peer_id;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302734 uint8_t tid;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302735 struct dp_peer *peer;
2736
nobelj0e007762019-12-13 12:59:14 -08002737 ppdu_desc = (struct cdp_tx_completion_ppdu *)
2738 qdf_nbuf_data(ppdu_info->nbuf);
2739 ppdu_desc->is_flush = 1;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302740
2741 tag_buf++;
nobelj0e007762019-12-13 12:59:14 -08002742 ppdu_desc->drop_reason = *tag_buf;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302743
2744 tag_buf++;
nobelj0e007762019-12-13 12:59:14 -08002745 ppdu_desc->num_msdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(*tag_buf);
2746 ppdu_desc->num_mpdu = HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(*tag_buf);
2747 ppdu_desc->flow_type = HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(*tag_buf);
2748
2749 tag_buf++;
2750 peer_id = HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(*tag_buf);
2751 tid = HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(*tag_buf);
2752
2753 ppdu_desc->user[0].peer_id = peer_id;
2754 ppdu_desc->user[0].tid = tid;
2755
2756 ppdu_desc->queue_type =
2757 HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(*tag_buf);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302758
2759 peer = dp_peer_find_by_id(pdev->soc, peer_id);
2760 if (!peer)
2761 return;
2762
nobelj0e007762019-12-13 12:59:14 -08002763 if (ppdu_desc->drop_reason == HTT_FLUSH_EXCESS_RETRIES) {
2764 DP_STATS_INC(peer,
2765 tx.excess_retries_per_ac[TID_TO_WME_AC(tid)],
2766 ppdu_desc->num_msdu);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302767 }
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05302768
2769 dp_peer_unref_del_find_by_id(peer);
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302770}
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302771
nobeljdebe2b32019-04-23 11:18:47 -07002772#ifndef WLAN_TX_PKT_CAPTURE_ENH
2773/*
2774 * dp_deliver_mgmt_frm: Process
2775 * @pdev: DP PDEV handle
2776 * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2777 *
2778 * return: void
2779 */
Karunakar Dasineni13abde92019-09-10 12:40:41 -07002780void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
nobeljdebe2b32019-04-23 11:18:47 -07002781{
2782 if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
2783 dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
2784 nbuf, HTT_INVALID_PEER,
2785 WDI_NO_VAL, pdev->pdev_id);
2786 }
2787}
2788#endif
2789
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302790/*
2791 * dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv: Process
2792 * htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2793 * @pdev: DP PDEV handle
2794 * @tag_buf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
2795 * @length: tlv_length
2796 *
Soumya Bhat51240dc2018-05-24 18:00:57 +05302797 * return:QDF_STATUS_SUCCESS if nbuf as to be freed in caller
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302798 */
Soumya Bhat51240dc2018-05-24 18:00:57 +05302799static QDF_STATUS
Soumya Bhat402fe1a2018-03-09 13:04:57 +05302800dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv(struct dp_pdev *pdev,
2801 qdf_nbuf_t tag_buf,
Soumya Bhat402fe1a2018-03-09 13:04:57 +05302802 uint32_t ppdu_id)
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302803{
Soumya Bhat402fe1a2018-03-09 13:04:57 +05302804 uint32_t *nbuf_ptr;
Soumya Bhat51240dc2018-05-24 18:00:57 +05302805 uint8_t trim_size;
nobeljcf57a9a2019-12-06 14:23:27 -08002806 size_t head_size;
2807 struct cdp_tx_mgmt_comp_info *ptr_mgmt_comp_info;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302808
Vinay Adella873dc402018-05-28 12:06:34 +05302809 if ((!pdev->tx_sniffer_enable) && (!pdev->mcopy_mode) &&
nobeljdebe2b32019-04-23 11:18:47 -07002810 (!pdev->bpr_enable) && (!pdev->tx_capture_enabled))
Soumya Bhat51240dc2018-05-24 18:00:57 +05302811 return QDF_STATUS_SUCCESS;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302812
Soumya Bhat51240dc2018-05-24 18:00:57 +05302813 trim_size = ((pdev->mgmtctrl_frm_info.mgmt_buf +
2814 HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN) -
2815 qdf_nbuf_data(tag_buf));
2816
2817 if (!qdf_nbuf_pull_head(tag_buf, trim_size))
2818 return QDF_STATUS_SUCCESS;
2819
2820 qdf_nbuf_trim_tail(tag_buf, qdf_nbuf_len(tag_buf) -
2821 pdev->mgmtctrl_frm_info.mgmt_buf_len);
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302822
nobeljcf57a9a2019-12-06 14:23:27 -08002823 if (pdev->tx_capture_enabled) {
2824 head_size = sizeof(struct cdp_tx_mgmt_comp_info);
2825 if (qdf_unlikely(qdf_nbuf_headroom(tag_buf) < head_size)) {
2826 qdf_err("Fail to get headroom h_sz %d h_avail %d\n",
2827 head_size, qdf_nbuf_headroom(tag_buf));
2828 qdf_assert_always(0);
2829 return QDF_STATUS_E_NOMEM;
2830 }
2831 ptr_mgmt_comp_info = (struct cdp_tx_mgmt_comp_info *)
2832 qdf_nbuf_push_head(tag_buf, head_size);
2833 qdf_assert_always(ptr_mgmt_comp_info);
2834 ptr_mgmt_comp_info->ppdu_id = ppdu_id;
2835 ptr_mgmt_comp_info->is_sgen_pkt = true;
2836 } else {
2837 head_size = sizeof(ppdu_id);
2838 nbuf_ptr = (uint32_t *)qdf_nbuf_push_head(tag_buf, head_size);
2839 *nbuf_ptr = ppdu_id;
2840 }
Soumya Bhat51240dc2018-05-24 18:00:57 +05302841
Vinay Adella873dc402018-05-28 12:06:34 +05302842 if (pdev->bpr_enable) {
2843 dp_wdi_event_handler(WDI_EVENT_TX_BEACON, pdev->soc,
2844 tag_buf, HTT_INVALID_PEER,
2845 WDI_NO_VAL, pdev->pdev_id);
2846 }
nobeljdebe2b32019-04-23 11:18:47 -07002847
2848 dp_deliver_mgmt_frm(pdev, tag_buf);
Soumya Bhat51240dc2018-05-24 18:00:57 +05302849
Soumya Bhat51240dc2018-05-24 18:00:57 +05302850 return QDF_STATUS_E_ALREADY;
Soumya Bhatcfbb8952017-10-03 15:04:09 +05302851}
2852
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302853/**
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302854 * dp_validate_fix_ppdu_tlv(): Function to validate the length of PPDU
2855 *
2856 * If the TLV length sent as part of PPDU TLV is less that expected size i.e
2857 * size of corresponding data structure, pad the remaining bytes with zeros
2858 * and continue processing the TLVs
2859 *
2860 * @pdev: DP pdev handle
2861 * @tag_buf: TLV buffer
2862 * @tlv_expected_size: Expected size of Tag
2863 * @tlv_len: TLV length received from FW
2864 *
2865 * Return: Pointer to updated TLV
2866 */
2867static inline uint32_t *dp_validate_fix_ppdu_tlv(struct dp_pdev *pdev,
2868 uint32_t *tag_buf,
2869 uint16_t tlv_expected_size,
2870 uint16_t tlv_len)
2871{
2872 uint32_t *tlv_desc = tag_buf;
2873
2874 qdf_assert_always(tlv_len != 0);
2875
2876 if (tlv_len < tlv_expected_size) {
2877 qdf_mem_zero(pdev->ppdu_tlv_buf, tlv_expected_size);
2878 qdf_mem_copy(pdev->ppdu_tlv_buf, tag_buf, tlv_len);
2879 tlv_desc = pdev->ppdu_tlv_buf;
2880 }
2881
2882 return tlv_desc;
2883}
2884
2885/**
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302886 * dp_process_ppdu_tag(): Function to process the PPDU TLVs
Anish Natarajb9e7d012018-02-16 00:38:10 +05302887 * @pdev: DP pdev handle
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302888 * @tag_buf: TLV buffer
Anish Natarajb9e7d012018-02-16 00:38:10 +05302889 * @tlv_len: length of tlv
2890 * @ppdu_info: per ppdu tlv structure
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302891 *
2892 * return: void
2893 */
2894static void dp_process_ppdu_tag(struct dp_pdev *pdev, uint32_t *tag_buf,
Anish Natarajb9e7d012018-02-16 00:38:10 +05302895 uint32_t tlv_len, struct ppdu_info *ppdu_info)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302896{
2897 uint32_t tlv_type = HTT_STATS_TLV_TAG_GET(*tag_buf);
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302898 uint16_t tlv_expected_size;
2899 uint32_t *tlv_desc;
Anish Natarajb9e7d012018-02-16 00:38:10 +05302900
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302901 switch (tlv_type) {
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302902 case HTT_PPDU_STATS_COMMON_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302903 tlv_expected_size = sizeof(htt_ppdu_stats_common_tlv);
2904 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2905 tlv_expected_size, tlv_len);
2906 dp_process_ppdu_stats_common_tlv(pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302907 break;
2908 case HTT_PPDU_STATS_USR_COMMON_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302909 tlv_expected_size = sizeof(htt_ppdu_stats_user_common_tlv);
2910 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2911 tlv_expected_size, tlv_len);
2912 dp_process_ppdu_stats_user_common_tlv(pdev, tlv_desc,
2913 ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302914 break;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302915 case HTT_PPDU_STATS_USR_RATE_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302916 tlv_expected_size = sizeof(htt_ppdu_stats_user_rate_tlv);
2917 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2918 tlv_expected_size, tlv_len);
2919 dp_process_ppdu_stats_user_rate_tlv(pdev, tlv_desc,
2920 ppdu_info);
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302921 break;
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302922 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302923 tlv_expected_size =
2924 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_64_tlv);
2925 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2926 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302927 dp_process_ppdu_stats_enq_mpdu_bitmap_64_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302928 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302929 break;
2930 case HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302931 tlv_expected_size =
2932 sizeof(htt_ppdu_stats_enq_mpdu_bitmap_256_tlv);
2933 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2934 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302935 dp_process_ppdu_stats_enq_mpdu_bitmap_256_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302936 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302937 break;
2938 case HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302939 tlv_expected_size =
2940 sizeof(htt_ppdu_stats_user_cmpltn_common_tlv);
2941 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2942 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302943 dp_process_ppdu_stats_user_cmpltn_common_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302944 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302945 break;
2946 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302947 tlv_expected_size =
2948 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv);
2949 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2950 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302951 dp_process_ppdu_stats_user_compltn_ba_bitmap_64_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302952 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302953 break;
2954 case HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302955 tlv_expected_size =
2956 sizeof(htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv);
2957 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2958 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302959 dp_process_ppdu_stats_user_compltn_ba_bitmap_256_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302960 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302961 break;
2962 case HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302963 tlv_expected_size =
2964 sizeof(htt_ppdu_stats_user_compltn_ack_ba_status_tlv);
2965 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2966 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302967 dp_process_ppdu_stats_user_compltn_ack_ba_status_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302968 pdev, tlv_desc, ppdu_info);
Soumya Bhat539ecfa2017-09-08 12:50:30 +05302969 break;
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302970 case HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302971 tlv_expected_size =
2972 sizeof(htt_ppdu_stats_usr_common_array_tlv_v);
2973 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2974 tlv_expected_size, tlv_len);
Anish Natarajb9e7d012018-02-16 00:38:10 +05302975 dp_process_ppdu_stats_user_common_array_tlv(
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302976 pdev, tlv_desc, ppdu_info);
Soumya Bhat1c73aa62017-09-20 22:18:22 +05302977 break;
Pranita Solankea12b4b32017-11-20 23:04:14 +05302978 case HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV:
Tallapragada Kalyanf7b0f742019-05-14 16:59:42 +05302979 tlv_expected_size = sizeof(htt_ppdu_stats_flush_tlv);
2980 tlv_desc = dp_validate_fix_ppdu_tlv(pdev, tag_buf,
2981 tlv_expected_size, tlv_len);
nobelj0e007762019-12-13 12:59:14 -08002982 dp_process_ppdu_stats_user_compltn_flush_tlv(pdev, tlv_desc,
2983 ppdu_info);
Pranita Solankea12b4b32017-11-20 23:04:14 +05302984 break;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05302985 default:
2986 break;
2987 }
2988}
2989
Anish Natarajb9e7d012018-02-16 00:38:10 +05302990/**
nobeljdebe2b32019-04-23 11:18:47 -07002991 * dp_ppdu_desc_user_stats_update(): Function to update TX user stats
Anish Natarajb9e7d012018-02-16 00:38:10 +05302992 * @pdev: DP pdev handle
2993 * @ppdu_info: per PPDU TLV descriptor
2994 *
2995 * return: void
2996 */
nobeljdebe2b32019-04-23 11:18:47 -07002997void
2998dp_ppdu_desc_user_stats_update(struct dp_pdev *pdev,
2999 struct ppdu_info *ppdu_info)
Anish Natarajb9e7d012018-02-16 00:38:10 +05303000{
3001 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3002 struct dp_peer *peer = NULL;
Amir Patel8ae68792018-10-26 12:42:01 +05303003 uint32_t tlv_bitmap_expected;
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303004 uint32_t tlv_bitmap_default;
nobeljdebe2b32019-04-23 11:18:47 -07003005 uint16_t i;
nobelj7b0e2732019-05-31 00:19:07 -07003006 uint32_t num_users;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303007
3008 ppdu_desc = (struct cdp_tx_completion_ppdu *)
3009 qdf_nbuf_data(ppdu_info->nbuf);
3010
3011 ppdu_desc->num_users = ppdu_info->last_user;
3012 ppdu_desc->ppdu_id = ppdu_info->ppdu_id;
3013
Amir Patel8ae68792018-10-26 12:42:01 +05303014 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
nobeljab929fe2019-09-16 15:38:20 -07003015 if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3016 pdev->tx_capture_enabled) {
Amir Patel8ae68792018-10-26 12:42:01 +05303017 if (ppdu_info->is_ampdu)
Amir Patel36a79a62019-01-17 11:23:37 +05303018 tlv_bitmap_expected =
3019 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3020 ppdu_info->tlv_bitmap);
Amir Patel8ae68792018-10-26 12:42:01 +05303021 }
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303022
3023 tlv_bitmap_default = tlv_bitmap_expected;
nobelj7b0e2732019-05-31 00:19:07 -07003024
3025 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR) {
3026 num_users = ppdu_desc->bar_num_users;
3027 ppdu_desc->num_users = ppdu_desc->bar_num_users;
3028 } else {
3029 num_users = ppdu_desc->num_users;
3030 }
3031
3032 for (i = 0; i < num_users; i++) {
Anish Natarajb9e7d012018-02-16 00:38:10 +05303033 ppdu_desc->num_mpdu += ppdu_desc->user[i].num_mpdu;
3034 ppdu_desc->num_msdu += ppdu_desc->user[i].num_msdu;
3035
Amir Patel1de3d3d2018-09-14 11:47:02 +05303036 peer = dp_peer_find_by_id(pdev->soc,
3037 ppdu_desc->user[i].peer_id);
3038 /**
3039 * This check is to make sure peer is not deleted
3040 * after processing the TLVs.
3041 */
3042 if (!peer)
3043 continue;
3044
Amir Patelac7d9462019-03-28 16:16:01 +05303045 ppdu_desc->user[i].cookie = (void *)peer->wlanstats_ctx;
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303046
nobelj7b0e2732019-05-31 00:19:07 -07003047 /*
3048 * different frame like DATA, BAR or CTRL has different
3049 * tlv bitmap expected. Apart from ACK_BA_STATUS TLV, we
3050 * receive other tlv in-order/sequential from fw.
3051 * Since ACK_BA_STATUS TLV come from Hardware it is
3052 * asynchronous So we need to depend on some tlv to confirm
3053 * all tlv is received for a ppdu.
3054 * So we depend on both HTT_PPDU_STATS_COMMON_TLV and
nobeljab929fe2019-09-16 15:38:20 -07003055 * ACK_BA_STATUS_TLV. for failure packet we won't get
nobelj7b0e2732019-05-31 00:19:07 -07003056 * ACK_BA_STATUS_TLV.
3057 */
3058 if (!(ppdu_info->tlv_bitmap &
3059 (1 << HTT_PPDU_STATS_COMMON_TLV)) ||
nobeljab929fe2019-09-16 15:38:20 -07003060 (!(ppdu_info->tlv_bitmap &
3061 (1 << HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)) &&
3062 (ppdu_desc->user[i].completion_status ==
3063 HTT_PPDU_STATS_USER_STATUS_OK))) {
Amir Patel8ae68792018-10-26 12:42:01 +05303064 dp_peer_unref_del_find_by_id(peer);
3065 continue;
3066 }
nobelj7b0e2732019-05-31 00:19:07 -07003067
Amir Patel23216682019-03-28 18:16:25 +05303068 /**
3069 * Update tx stats for data frames having Qos as well as
3070 * non-Qos data tid
3071 */
Amir Patel318bcb82019-09-30 22:04:17 +05303072
Amir Patel23216682019-03-28 18:16:25 +05303073 if ((ppdu_desc->user[i].tid < CDP_DATA_TID_MAX ||
nobelj219e7e52019-10-18 13:53:12 -07003074 (ppdu_desc->user[i].tid == CDP_DATA_NON_QOS_TID) ||
3075 (ppdu_desc->htt_frame_type ==
3076 HTT_STATS_FTYPE_SGEN_QOS_NULL)) &&
nobelj7b0e2732019-05-31 00:19:07 -07003077 (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL)) {
Anish Natarajb9e7d012018-02-16 00:38:10 +05303078
Tallapragada Kalyand92f5982019-08-02 18:22:46 +05303079 dp_tx_stats_update(pdev, peer,
nobeljdebe2b32019-04-23 11:18:47 -07003080 &ppdu_desc->user[i],
3081 ppdu_desc->ack_rssi);
Surya Prakash Raajenb9780dd2019-05-13 12:48:31 +05303082 dp_tx_rate_stats_update(peer, &ppdu_desc->user[i]);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303083 }
Amir Patel1de3d3d2018-09-14 11:47:02 +05303084
chenguob21a49a2018-11-19 19:17:12 +08003085 dp_peer_unref_del_find_by_id(peer);
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303086 tlv_bitmap_expected = tlv_bitmap_default;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303087 }
nobeljdebe2b32019-04-23 11:18:47 -07003088}
3089
3090#ifndef WLAN_TX_PKT_CAPTURE_ENH
3091
3092/**
3093 * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
3094 * to upper layer
3095 * @pdev: DP pdev handle
3096 * @ppdu_info: per PPDU TLV descriptor
3097 *
3098 * return: void
3099 */
3100static
3101void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
3102 struct ppdu_info *ppdu_info)
3103{
3104 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
3105 qdf_nbuf_t nbuf;
3106
3107 ppdu_desc = (struct cdp_tx_completion_ppdu *)
3108 qdf_nbuf_data(ppdu_info->nbuf);
3109
3110 dp_ppdu_desc_user_stats_update(pdev, ppdu_info);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303111
3112 /*
3113 * Remove from the list
3114 */
3115 TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
3116 nbuf = ppdu_info->nbuf;
3117 pdev->list_depth--;
3118 qdf_mem_free(ppdu_info);
3119
3120 qdf_assert_always(nbuf);
3121
3122 ppdu_desc = (struct cdp_tx_completion_ppdu *)
3123 qdf_nbuf_data(nbuf);
3124
3125 /**
3126 * Deliver PPDU stats only for valid (acked) data frames if
3127 * sniffer mode is not enabled.
3128 * If sniffer mode is enabled, PPDU stats for all frames
3129 * including mgmt/control frames should be delivered to upper layer
3130 */
3131 if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
3132 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC, pdev->soc,
3133 nbuf, HTT_INVALID_PEER,
3134 WDI_NO_VAL, pdev->pdev_id);
3135 } else {
3136 if (ppdu_desc->num_mpdu != 0 && ppdu_desc->num_users != 0 &&
3137 ppdu_desc->frame_ctrl & HTT_FRAMECTRL_DATATYPE) {
3138
3139 dp_wdi_event_handler(WDI_EVENT_TX_PPDU_DESC,
3140 pdev->soc, nbuf, HTT_INVALID_PEER,
3141 WDI_NO_VAL, pdev->pdev_id);
3142 } else
3143 qdf_nbuf_free(nbuf);
3144 }
3145 return;
3146}
3147
nobeljdebe2b32019-04-23 11:18:47 -07003148#endif
3149
Anish Natarajb9e7d012018-02-16 00:38:10 +05303150/**
3151 * dp_get_ppdu_desc(): Function to allocate new PPDU status
3152 * desc for new ppdu id
3153 * @pdev: DP pdev handle
3154 * @ppdu_id: PPDU unique identifier
3155 * @tlv_type: TLV type received
3156 *
3157 * return: ppdu_info per ppdu tlv structure
3158 */
3159static
3160struct ppdu_info *dp_get_ppdu_desc(struct dp_pdev *pdev, uint32_t ppdu_id,
3161 uint8_t tlv_type)
3162{
3163 struct ppdu_info *ppdu_info = NULL;
3164
3165 /*
3166 * Find ppdu_id node exists or not
3167 */
3168 TAILQ_FOREACH(ppdu_info, &pdev->ppdu_info_list, ppdu_info_list_elem) {
3169
3170 if (ppdu_info && (ppdu_info->ppdu_id == ppdu_id)) {
3171 break;
3172 }
3173 }
3174
3175 if (ppdu_info) {
nobelj23bb63a2018-08-28 16:19:33 -07003176 if (ppdu_info->tlv_bitmap & (1 << tlv_type)) {
3177 /**
3178 * if we get tlv_type that is already been processed
3179 * for ppdu, that means we got a new ppdu with same
3180 * ppdu id. Hence Flush the older ppdu
3181 * for MUMIMO and OFDMA, In a PPDU we have
3182 * multiple user with same tlv types. tlv bitmap is
3183 * used to check whether SU or MU_MIMO/OFDMA
3184 */
3185 if (!(ppdu_info->tlv_bitmap &
3186 (1 << HTT_PPDU_STATS_SCH_CMD_STATUS_TLV)))
3187 return ppdu_info;
3188
nobelj7b0e2732019-05-31 00:19:07 -07003189 /**
3190 * apart from ACK BA STATUS TLV rest all comes in order
3191 * so if tlv type not ACK BA STATUS TLV we can deliver
3192 * ppdu_info
3193 */
3194 if (tlv_type ==
3195 HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV)
3196 return ppdu_info;
3197
Anish Natarajb9e7d012018-02-16 00:38:10 +05303198 dp_ppdu_desc_deliver(pdev, ppdu_info);
nobelj23bb63a2018-08-28 16:19:33 -07003199 } else {
Anish Natarajb9e7d012018-02-16 00:38:10 +05303200 return ppdu_info;
nobelj23bb63a2018-08-28 16:19:33 -07003201 }
Anish Natarajb9e7d012018-02-16 00:38:10 +05303202 }
3203
3204 /**
3205 * Flush the head ppdu descriptor if ppdu desc list reaches max
3206 * threshold
3207 */
3208 if (pdev->list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
3209 ppdu_info = TAILQ_FIRST(&pdev->ppdu_info_list);
3210 dp_ppdu_desc_deliver(pdev, ppdu_info);
3211 }
3212
3213 /*
3214 * Allocate new ppdu_info node
3215 */
3216 ppdu_info = qdf_mem_malloc(sizeof(struct ppdu_info));
3217 if (!ppdu_info)
3218 return NULL;
3219
3220 ppdu_info->nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
3221 sizeof(struct cdp_tx_completion_ppdu), 0, 4,
3222 TRUE);
3223 if (!ppdu_info->nbuf) {
3224 qdf_mem_free(ppdu_info);
3225 return NULL;
3226 }
3227
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303228 ppdu_info->ppdu_desc =
3229 (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(ppdu_info->nbuf);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303230 qdf_mem_zero(qdf_nbuf_data(ppdu_info->nbuf),
3231 sizeof(struct cdp_tx_completion_ppdu));
3232
3233 if (qdf_nbuf_put_tail(ppdu_info->nbuf,
3234 sizeof(struct cdp_tx_completion_ppdu)) == NULL) {
3235 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3236 "No tailroom for HTT PPDU");
3237 qdf_nbuf_free(ppdu_info->nbuf);
3238 ppdu_info->nbuf = NULL;
3239 ppdu_info->last_user = 0;
3240 qdf_mem_free(ppdu_info);
3241 return NULL;
3242 }
3243
3244 /**
3245 * No lock is needed because all PPDU TLVs are processed in
3246 * same context and this list is updated in same context
3247 */
3248 TAILQ_INSERT_TAIL(&pdev->ppdu_info_list, ppdu_info,
3249 ppdu_info_list_elem);
3250 pdev->list_depth++;
3251 return ppdu_info;
3252}
3253
3254/**
3255 * dp_htt_process_tlv(): Function to process each PPDU TLVs
3256 * @pdev: DP pdev handle
3257 * @htt_t2h_msg: HTT target to host message
3258 *
3259 * return: ppdu_info per ppdu tlv structure
3260 */
3261
3262static struct ppdu_info *dp_htt_process_tlv(struct dp_pdev *pdev,
Soumya Bhat51240dc2018-05-24 18:00:57 +05303263 qdf_nbuf_t htt_t2h_msg)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303264{
3265 uint32_t length;
3266 uint32_t ppdu_id;
3267 uint8_t tlv_type;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303268 uint32_t tlv_length, tlv_bitmap_expected;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303269 uint8_t *tlv_buf;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303270 struct ppdu_info *ppdu_info = NULL;
Santosh Anbu4de9ffb2019-03-01 17:20:29 +05303271 struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
nobelj7b0e2732019-05-31 00:19:07 -07003272 struct dp_peer *peer;
3273 uint32_t i = 0;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303274
3275 uint32_t *msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3276
3277 length = HTT_T2H_PPDU_STATS_PAYLOAD_SIZE_GET(*msg_word);
3278
3279 msg_word = msg_word + 1;
3280 ppdu_id = HTT_T2H_PPDU_STATS_PPDU_ID_GET(*msg_word);
3281
Anish Natarajb9e7d012018-02-16 00:38:10 +05303282
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303283 msg_word = msg_word + 3;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303284 while (length > 0) {
3285 tlv_buf = (uint8_t *)msg_word;
3286 tlv_type = HTT_STATS_TLV_TAG_GET(*msg_word);
3287 tlv_length = HTT_STATS_TLV_LENGTH_GET(*msg_word);
Soumya Bhat0d6245c2018-02-08 21:02:57 +05303288 if (qdf_likely(tlv_type < CDP_PPDU_STATS_MAX_TAG))
3289 pdev->stats.ppdu_stats_counter[tlv_type]++;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303290
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303291 if (tlv_length == 0)
3292 break;
3293
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303294 tlv_length += HTT_TLV_HDR_LEN;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303295
Anish Natarajb9e7d012018-02-16 00:38:10 +05303296 /**
3297 * Not allocating separate ppdu descriptor for MGMT Payload
3298 * TLV as this is sent as separate WDI indication and it
3299 * doesn't contain any ppdu information
3300 */
3301 if (tlv_type == HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV) {
Soumya Bhat51240dc2018-05-24 18:00:57 +05303302 pdev->mgmtctrl_frm_info.mgmt_buf = tlv_buf;
Soumya Bhat51240dc2018-05-24 18:00:57 +05303303 pdev->mgmtctrl_frm_info.ppdu_id = ppdu_id;
Chaithanya Garrepallib8db1bd2019-07-15 16:03:57 +05303304 pdev->mgmtctrl_frm_info.mgmt_buf_len =
3305 HTT_PPDU_STATS_TX_MGMTCTRL_TLV_FRAME_LENGTH_GET
3306 (*(msg_word + 1));
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303307 msg_word =
3308 (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303309 length -= (tlv_length);
Soumya Bhat51240dc2018-05-24 18:00:57 +05303310 continue;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303311 }
3312
3313 ppdu_info = dp_get_ppdu_desc(pdev, ppdu_id, tlv_type);
3314 if (!ppdu_info)
3315 return NULL;
3316 ppdu_info->ppdu_id = ppdu_id;
3317 ppdu_info->tlv_bitmap |= (1 << tlv_type);
3318
3319 dp_process_ppdu_tag(pdev, msg_word, tlv_length, ppdu_info);
3320
3321 /**
3322 * Increment pdev level tlv count to monitor
3323 * missing TLVs
3324 */
3325 pdev->tlv_count++;
3326 ppdu_info->last_tlv_cnt = pdev->tlv_count;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303327 msg_word = (uint32_t *)((uint8_t *)tlv_buf + tlv_length);
3328 length -= (tlv_length);
3329 }
Anish Natarajb9e7d012018-02-16 00:38:10 +05303330
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303331 if (!ppdu_info)
3332 return NULL;
3333
Soumya Bhat1df94642018-01-31 15:38:21 +05303334 pdev->last_ppdu_id = ppdu_id;
Anish Natarajb9e7d012018-02-16 00:38:10 +05303335
3336 tlv_bitmap_expected = HTT_PPDU_DEFAULT_TLV_BITMAP;
3337
nobeljab929fe2019-09-16 15:38:20 -07003338 if (pdev->tx_sniffer_enable || pdev->mcopy_mode ||
3339 pdev->tx_capture_enabled) {
Anish Natarajb9e7d012018-02-16 00:38:10 +05303340 if (ppdu_info->is_ampdu)
Amir Patel36a79a62019-01-17 11:23:37 +05303341 tlv_bitmap_expected =
3342 dp_htt_get_ppdu_sniffer_ampdu_tlv_bitmap(
3343 ppdu_info->tlv_bitmap);
Anish Natarajb9e7d012018-02-16 00:38:10 +05303344 }
3345
nobelj7b0e2732019-05-31 00:19:07 -07003346 ppdu_desc = ppdu_info->ppdu_desc;
3347
3348 if (!ppdu_desc)
3349 return NULL;
3350
3351 if (ppdu_desc->user[ppdu_desc->last_usr_index].completion_status !=
3352 HTT_PPDU_STATS_USER_STATUS_OK) {
3353 tlv_bitmap_expected = tlv_bitmap_expected & 0xFF;
3354 }
3355
3356 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA &&
3357 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV)) &&
3358 ppdu_desc->delayed_ba) {
3359 for (i = 0; i < ppdu_desc->num_users; i++) {
3360 uint32_t ppdu_id;
3361
3362 ppdu_id = ppdu_desc->ppdu_id;
3363 peer = dp_peer_find_by_id(pdev->soc,
3364 ppdu_desc->user[i].peer_id);
3365 /**
3366 * This check is to make sure peer is not deleted
3367 * after processing the TLVs.
3368 */
3369 if (!peer)
3370 continue;
3371
3372 /**
3373 * save delayed ba user info
3374 */
3375 if (ppdu_desc->user[i].delayed_ba) {
3376 dp_peer_copy_delay_stats(peer,
3377 &ppdu_desc->user[i]);
3378 peer->last_delayed_ba_ppduid = ppdu_id;
3379 }
3380 dp_peer_unref_del_find_by_id(peer);
3381 }
3382 }
3383
3384 /*
3385 * when frame type is BAR and STATS_COMMON_TLV is set
3386 * copy the store peer delayed info to BAR status
3387 */
3388 if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_BAR &&
3389 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) {
3390 for (i = 0; i < ppdu_desc->bar_num_users; i++) {
3391 peer = dp_peer_find_by_id(pdev->soc,
3392 ppdu_desc->user[i].peer_id);
3393 /**
3394 * This check is to make sure peer is not deleted
3395 * after processing the TLVs.
3396 */
3397 if (!peer)
3398 continue;
3399
3400 if (peer->last_delayed_ba) {
3401 dp_peer_copy_stats_to_bar(peer,
3402 &ppdu_desc->user[i]);
nobelj68930ca2019-10-03 17:22:47 -07003403 ppdu_desc->bar_ppdu_id = ppdu_desc->ppdu_id;
3404 ppdu_desc->ppdu_id =
3405 peer->last_delayed_ba_ppduid;
nobelj7b0e2732019-05-31 00:19:07 -07003406 }
3407 dp_peer_unref_del_find_by_id(peer);
3408 }
3409 }
3410
3411 /*
3412 * for frame type DATA and BAR, we update stats based on MSDU,
3413 * successful msdu and mpdu are populate from ACK BA STATUS TLV
3414 * which comes out of order. successful mpdu also populated from
3415 * COMPLTN COMMON TLV which comes in order. for every ppdu_info
3416 * we store successful mpdu from both tlv and compare before delivering
nobelj219e7e52019-10-18 13:53:12 -07003417 * to make sure we received ACK BA STATUS TLV. For some self generated
3418 * frame we won't get ack ba status tlv so no need to wait for
3419 * ack ba status tlv.
nobelj7b0e2732019-05-31 00:19:07 -07003420 */
nobelj219e7e52019-10-18 13:53:12 -07003421 if (ppdu_desc->frame_type != CDP_PPDU_FTYPE_CTRL &&
3422 ppdu_desc->htt_frame_type != HTT_STATS_FTYPE_SGEN_QOS_NULL) {
nobelj7b0e2732019-05-31 00:19:07 -07003423 /*
3424 * successful mpdu count should match with both tlv
3425 */
3426 if (ppdu_info->mpdu_compltn_common_tlv !=
3427 ppdu_info->mpdu_ack_ba_tlv)
3428 return NULL;
3429 }
3430
Anish Natarajb9e7d012018-02-16 00:38:10 +05303431 /**
3432 * Once all the TLVs for a given PPDU has been processed,
nobeljab929fe2019-09-16 15:38:20 -07003433 * return PPDU status to be delivered to higher layer.
3434 * tlv_bitmap_expected can't be available for different frame type.
3435 * But STATS COMMON TLV is the last TLV from the FW for a ppdu.
3436 * apart from ACK BA TLV, FW sends other TLV in sequential order.
nobelj0e007762019-12-13 12:59:14 -08003437 * flush tlv comes separate.
Anish Natarajb9e7d012018-02-16 00:38:10 +05303438 */
nobelj0e007762019-12-13 12:59:14 -08003439 if ((ppdu_info->tlv_bitmap != 0 &&
3440 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_COMMON_TLV))) ||
3441 (ppdu_info->tlv_bitmap &
3442 (1 << HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV)))
Anish Natarajb9e7d012018-02-16 00:38:10 +05303443 return ppdu_info;
3444
3445 return NULL;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303446}
Soumya Bhat1c73aa62017-09-20 22:18:22 +05303447#endif /* FEATURE_PERPKT_INFO */
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303448
3449/**
3450 * dp_txrx_ppdu_stats_handler() - Function to process HTT PPDU stats from FW
3451 * @soc: DP SOC handle
3452 * @pdev_id: pdev id
3453 * @htt_t2h_msg: HTT message nbuf
3454 *
3455 * return:void
3456 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003457#if defined(WDI_EVENT_ENABLE)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303458#ifdef FEATURE_PERPKT_INFO
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303459static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3460 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303461{
3462 struct dp_pdev *pdev = soc->pdev_list[pdev_id];
Anish Natarajb9e7d012018-02-16 00:38:10 +05303463 struct ppdu_info *ppdu_info = NULL;
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303464 bool free_buf = true;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303465
Nandha Kishore Easwarandd685082018-06-22 18:54:08 +05303466 if (!pdev)
3467 return true;
3468
Soumya Bhat89647ef2017-11-16 17:23:48 +05303469 if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
Alok Singh40a622b2018-06-28 10:47:26 +05303470 !pdev->mcopy_mode && !pdev->bpr_enable)
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303471 return free_buf;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303472
Soumya Bhat51240dc2018-05-24 18:00:57 +05303473 ppdu_info = dp_htt_process_tlv(pdev, htt_t2h_msg);
3474
3475 if (pdev->mgmtctrl_frm_info.mgmt_buf) {
3476 if (dp_process_ppdu_stats_tx_mgmtctrl_payload_tlv
3477 (pdev, htt_t2h_msg, pdev->mgmtctrl_frm_info.ppdu_id) !=
3478 QDF_STATUS_SUCCESS)
3479 free_buf = false;
3480 }
3481
Anish Natarajb9e7d012018-02-16 00:38:10 +05303482 if (ppdu_info)
3483 dp_ppdu_desc_deliver(pdev, ppdu_info);
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303484
nobelj2a1312c2019-06-20 23:45:43 -07003485 pdev->mgmtctrl_frm_info.mgmt_buf = NULL;
3486 pdev->mgmtctrl_frm_info.mgmt_buf_len = 0;
3487 pdev->mgmtctrl_frm_info.ppdu_id = 0;
3488
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303489 return free_buf;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303490}
3491#else
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303492static bool dp_txrx_ppdu_stats_handler(struct dp_soc *soc,
3493 uint8_t pdev_id, qdf_nbuf_t htt_t2h_msg)
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303494{
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303495 return true;
Pamidipati, Vijay038d0902017-07-17 09:53:31 +05303496}
3497#endif
3498#endif
3499
3500/**
3501 * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
Ishank Jain6290a3c2017-03-21 10:49:39 +05303502 * @soc: DP SOC handle
3503 * @htt_t2h_msg: HTT message nbuf
3504 *
3505 * return:void
3506 */
3507static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
3508 qdf_nbuf_t htt_t2h_msg)
3509{
Ishank Jain6290a3c2017-03-21 10:49:39 +05303510 uint8_t done;
3511 qdf_nbuf_t msg_copy;
3512 uint32_t *msg_word;
3513
3514 msg_word = (uint32_t *) qdf_nbuf_data(htt_t2h_msg);
3515 msg_word = msg_word + 3;
Ishank Jain6290a3c2017-03-21 10:49:39 +05303516 done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
3517
3518 /*
3519 * HTT EXT stats response comes as stream of TLVs which span over
3520 * multiple T2H messages.
3521 * The first message will carry length of the response.
3522 * For rest of the messages length will be zero.
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303523 *
Ishank Jain6290a3c2017-03-21 10:49:39 +05303524 * Clone the T2H message buffer and store it in a list to process
3525 * it later.
3526 *
3527 * The original T2H message buffers gets freed in the T2H HTT event
3528 * handler
3529 */
3530 msg_copy = qdf_nbuf_clone(htt_t2h_msg);
3531
3532 if (!msg_copy) {
3533 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3534 "T2H messge clone failed for HTT EXT STATS");
Ishank Jain6290a3c2017-03-21 10:49:39 +05303535 goto error;
3536 }
3537
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303538 qdf_spin_lock_bh(&soc->htt_stats.lock);
3539 qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
Ishank Jain6290a3c2017-03-21 10:49:39 +05303540 /*
3541 * Done bit signifies that this is the last T2H buffer in the stream of
3542 * HTT EXT STATS message
3543 */
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303544 if (done) {
3545 soc->htt_stats.num_stats++;
3546 qdf_sched_work(0, &soc->htt_stats.work);
3547 }
3548 qdf_spin_unlock_bh(&soc->htt_stats.lock);
Ishank Jain6290a3c2017-03-21 10:49:39 +05303549
3550 return;
3551
3552error:
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303553 qdf_spin_lock_bh(&soc->htt_stats.lock);
3554 while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
Ishank Jain6290a3c2017-03-21 10:49:39 +05303555 != NULL) {
3556 qdf_nbuf_free(msg_copy);
3557 }
Om Prakash Tripathi12126822017-08-03 10:21:24 +05303558 soc->htt_stats.num_stats = 0;
3559 qdf_spin_unlock_bh(&soc->htt_stats.lock);
Ishank Jain6290a3c2017-03-21 10:49:39 +05303560 return;
3561
3562}
3563
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003564/*
3565 * htt_soc_attach_target() - SOC level HTT setup
3566 * @htt_soc: HTT SOC handle
3567 *
3568 * Return: 0 on success; error code on failure
3569 */
Akshay Kosigia5c46a42019-06-27 12:43:01 +05303570int htt_soc_attach_target(struct htt_soc *htt_soc)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003571{
3572 struct htt_soc *soc = (struct htt_soc *)htt_soc;
3573
3574 return htt_h2t_ver_req_msg(soc);
3575}
3576
Akshay Kosigi383b6d52019-07-12 12:24:30 +05303577void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
3578{
3579 htt_soc->htc_soc = htc_soc;
3580}
3581
3582HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
3583{
3584 return htt_soc->htc_soc;
3585}
3586
3587struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
3588{
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003589 int i;
3590 int j;
3591 int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
Akshay Kosigi383b6d52019-07-12 12:24:30 +05303592 struct htt_soc *htt_soc = NULL;
3593
3594 htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
3595 if (!htt_soc) {
3596 dp_err("HTT attach failed");
3597 return NULL;
3598 }
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003599
3600 for (i = 0; i < MAX_PDEV_CNT; i++) {
3601 htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
3602 if (!htt_soc->pdevid_tt[i].umac_ttt)
3603 break;
3604 qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
3605 htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
3606 if (!htt_soc->pdevid_tt[i].lmac_ttt) {
3607 qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3608 break;
3609 }
3610 qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
3611 }
3612 if (i != MAX_PDEV_CNT) {
3613 for (j = 0; j < i; j++) {
3614 qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
3615 qdf_mem_free(htt_soc->pdevid_tt[i].lmac_ttt);
3616 }
3617 return NULL;
3618 }
3619
Akshay Kosigi383b6d52019-07-12 12:24:30 +05303620 htt_soc->dp_soc = soc;
3621 htt_soc->htc_soc = htc_handle;
3622 HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
3623
3624 return htt_soc;
3625}
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003626
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003627#if defined(WDI_EVENT_ENABLE) && !defined(REMOVE_PKT_LOG)
3628/*
3629 * dp_ppdu_stats_ind_handler() - PPDU stats msg handler
3630 * @htt_soc: HTT SOC handle
3631 * @msg_word: Pointer to payload
3632 * @htt_t2h_msg: HTT msg nbuf
3633 *
Kiran Venkatappad1a16872018-04-24 18:39:09 +05303634 * Return: True if buffer should be freed by caller.
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003635 */
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303636static bool
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003637dp_ppdu_stats_ind_handler(struct htt_soc *soc,
3638 uint32_t *msg_word,
3639 qdf_nbuf_t htt_t2h_msg)
3640{
3641 u_int8_t pdev_id;
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303642 bool free_buf;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003643 qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
Keyur Parekh73554f92018-01-05 12:01:10 -08003644 pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003645 pdev_id = DP_HW2SW_MACID(pdev_id);
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303646 free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
3647 htt_t2h_msg);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003648 dp_wdi_event_handler(WDI_EVENT_LITE_T2H, soc->dp_soc,
3649 htt_t2h_msg, HTT_INVALID_PEER, WDI_NO_VAL,
3650 pdev_id);
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303651 return free_buf;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003652}
3653#else
Kiran Venkatappad1a16872018-04-24 18:39:09 +05303654static bool
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003655dp_ppdu_stats_ind_handler(struct htt_soc *soc,
Kiran Venkatappad1a16872018-04-24 18:39:09 +05303656 uint32_t *msg_word,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003657 qdf_nbuf_t htt_t2h_msg)
3658{
Kiran Venkatappad1a16872018-04-24 18:39:09 +05303659 return true;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003660}
3661#endif
3662
3663#if defined(WDI_EVENT_ENABLE) && \
Venkata Sharath Chandra Manchala0ce469e2018-10-28 21:01:23 -07003664 !defined(REMOVE_PKT_LOG)
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003665/*
3666 * dp_pktlog_msg_handler() - Pktlog msg handler
3667 * @htt_soc: HTT SOC handle
3668 * @msg_word: Pointer to payload
3669 *
3670 * Return: None
3671 */
3672static void
3673dp_pktlog_msg_handler(struct htt_soc *soc,
Venkata Sharath Chandra Manchala0ce469e2018-10-28 21:01:23 -07003674 uint32_t *msg_word)
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003675{
3676 uint8_t pdev_id;
3677 uint32_t *pl_hdr;
Venkata Sharath Chandra Manchala96e36332018-12-10 16:42:31 -08003678
Keyur Parekh73554f92018-01-05 12:01:10 -08003679 pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003680 pdev_id = DP_HW2SW_MACID(pdev_id);
3681 pl_hdr = (msg_word + 1);
3682 dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
3683 pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
3684 pdev_id);
3685}
3686#else
3687static void
3688dp_pktlog_msg_handler(struct htt_soc *soc,
Venkata Sharath Chandra Manchala0ce469e2018-10-28 21:01:23 -07003689 uint32_t *msg_word)
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003690{
3691}
3692#endif
Ruben Columbus43194932019-05-24 09:56:52 -07003693
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003694/*
3695 * time_allow_print() - time allow print
3696 * @htt_ring_tt: ringi_id array of timestamps
3697 * @ring_id: ring_id (index)
3698 *
3699 * Return: 1 for successfully saving timestamp in array
3700 * and 0 for timestamp falling within 2 seconds after last one
3701 */
3702static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
3703{
3704 unsigned long tstamp;
3705 unsigned long delta;
3706
3707 tstamp = qdf_get_system_timestamp();
3708
3709 if (!htt_ring_tt)
3710 return 0; //unable to print backpressure messages
3711
3712 if (htt_ring_tt[ring_id] == -1) {
3713 htt_ring_tt[ring_id] = tstamp;
3714 return 1;
3715 }
3716 delta = tstamp - htt_ring_tt[ring_id];
3717 if (delta >= 2000) {
3718 htt_ring_tt[ring_id] = tstamp;
3719 return 1;
3720 }
3721
3722 return 0;
3723}
3724
Ruben Columbus43194932019-05-24 09:56:52 -07003725static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
3726 u_int8_t pdev_id, u_int8_t ring_id,
3727 u_int16_t hp_idx, u_int16_t tp_idx,
3728 u_int32_t bkp_time, char *ring_stype)
3729{
3730 dp_alert("msg_type: %d pdev_id: %d ring_type: %s ",
3731 msg_type, pdev_id, ring_stype);
3732 dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
3733 ring_id, hp_idx, tp_idx, bkp_time);
3734}
3735
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003736/*
3737 * dp_htt_bkp_event_alert() - htt backpressure event alert
3738 * @msg_word: htt packet context
3739 * @htt_soc: HTT SOC handle
3740 *
3741 * Return: after attempting to print stats
3742 */
3743static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
Ruben Columbus43194932019-05-24 09:56:52 -07003744{
3745 u_int8_t ring_type;
3746 u_int8_t pdev_id;
3747 u_int8_t ring_id;
3748 u_int16_t hp_idx;
3749 u_int16_t tp_idx;
3750 u_int32_t bkp_time;
3751 enum htt_t2h_msg_type msg_type;
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003752 struct dp_soc *dpsoc;
3753 struct dp_pdev *pdev;
3754 struct dp_htt_timestamp *radio_tt;
Ruben Columbus43194932019-05-24 09:56:52 -07003755
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003756 if (!soc)
3757 return;
3758
3759 dpsoc = (struct dp_soc *)soc->dp_soc;
Ruben Columbus43194932019-05-24 09:56:52 -07003760 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
3761 ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
3762 pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
3763 pdev_id = DP_HW2SW_MACID(pdev_id);
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003764 pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
Ruben Columbus43194932019-05-24 09:56:52 -07003765 ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
3766 hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
3767 tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
3768 bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003769 radio_tt = &soc->pdevid_tt[pdev_id];
Ruben Columbus43194932019-05-24 09:56:52 -07003770
3771 switch (ring_type) {
3772 case HTT_SW_RING_TYPE_UMAC:
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003773 if (!time_allow_print(radio_tt->umac_ttt, ring_id))
3774 return;
Ruben Columbus43194932019-05-24 09:56:52 -07003775 dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
Jeffin Mammen084f9362019-11-08 10:07:27 +05303776 bkp_time, "HTT_SW_RING_TYPE_UMAC");
Ruben Columbus43194932019-05-24 09:56:52 -07003777 break;
3778 case HTT_SW_RING_TYPE_LMAC:
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003779 if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
3780 return;
Ruben Columbus43194932019-05-24 09:56:52 -07003781 dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3782 bkp_time, "HTT_SW_RING_TYPE_LMAC");
3783 break;
Ruben Columbus43194932019-05-24 09:56:52 -07003784 default:
3785 dp_htt_alert_print(msg_type, pdev_id, ring_id, hp_idx, tp_idx,
3786 bkp_time, "UNKNOWN");
3787 break;
3788 }
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003789
3790 dp_print_ring_stats(pdev);
3791 dp_print_napi_stats(pdev->soc);
Ruben Columbus43194932019-05-24 09:56:52 -07003792}
3793
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003794/*
3795 * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
3796 * @context: Opaque context (HTT SOC handle)
3797 * @pkt: HTC packet
3798 */
3799static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
3800{
3801 struct htt_soc *soc = (struct htt_soc *) context;
3802 qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
3803 u_int32_t *msg_word;
3804 enum htt_t2h_msg_type msg_type;
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303805 bool free_buf = true;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003806
3807 /* check for successful message reception */
Rakesh Pillai13146452017-06-22 12:52:31 +05303808 if (pkt->Status != QDF_STATUS_SUCCESS) {
3809 if (pkt->Status != QDF_STATUS_E_CANCELED)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003810 soc->stats.htc_err_cnt++;
3811
3812 qdf_nbuf_free(htt_t2h_msg);
3813 return;
3814 }
3815
3816 /* TODO: Check if we should pop the HTC/HTT header alignment padding */
3817
3818 msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
3819 msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05303820 htt_event_record(soc->htt_logger_handle,
3821 msg_type, (uint8_t *)msg_word);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003822 switch (msg_type) {
Ruben Columbus43194932019-05-24 09:56:52 -07003823 case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
3824 {
Ruben Columbus814e6cb2019-09-10 15:49:11 -07003825 dp_htt_bkp_event_alert(msg_word, soc);
Ruben Columbus43194932019-05-24 09:56:52 -07003826 break;
3827 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003828 case HTT_T2H_MSG_TYPE_PEER_MAP:
3829 {
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08003830 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003831 u_int8_t *peer_mac_addr;
3832 u_int16_t peer_id;
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303833 u_int16_t hw_peer_id;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003834 u_int8_t vdev_id;
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303835 u_int8_t is_wds;
3836 struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003837
3838 peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303839 hw_peer_id =
3840 HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003841 vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
3842 peer_mac_addr = htt_t2h_mac_addr_deswizzle(
3843 (u_int8_t *) (msg_word+1),
3844 &mac_addr_deswizzle_buf[0]);
Pramod Simhab17d0672017-03-06 17:20:13 -08003845 QDF_TRACE(QDF_MODULE_ID_TXRX,
3846 QDF_TRACE_LEVEL_INFO,
3847 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
3848 peer_id, vdev_id);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003849
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303850 /*
3851 * check if peer already exists for this peer_id, if so
3852 * this peer map event is in response for a wds peer add
3853 * wmi command sent during wds source port learning.
3854 * in this case just add the ast entry to the existing
3855 * peer ast_list.
3856 */
3857 is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
Tallapragada Kalyan6f6166e2017-02-17 17:00:23 +05303858 dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303859 vdev_id, peer_mac_addr, 0,
3860 is_wds);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003861 break;
3862 }
3863 case HTT_T2H_MSG_TYPE_PEER_UNMAP:
3864 {
3865 u_int16_t peer_id;
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303866 u_int8_t vdev_id;
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08003867 u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003868 peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303869 vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003870
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303871 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05303872 vdev_id, mac_addr, 0);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003873 break;
3874 }
3875 case HTT_T2H_MSG_TYPE_SEC_IND:
3876 {
3877 u_int16_t peer_id;
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07003878 enum cdp_sec_type sec_type;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003879 int is_unicast;
3880
3881 peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
3882 sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
3883 is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
3884 /* point to the first part of the Michael key */
3885 msg_word++;
3886 dp_rx_sec_ind_handler(
3887 soc->dp_soc, peer_id, sec_type, is_unicast,
3888 msg_word, msg_word + 2);
3889 break;
3890 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003891
Keyur Parekhfad6d082017-05-07 08:54:47 -07003892 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
3893 {
Soumya Bhat402fe1a2018-03-09 13:04:57 +05303894 free_buf = dp_ppdu_stats_ind_handler(soc, msg_word,
3895 htt_t2h_msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003896 break;
3897 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003898
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07003899 case HTT_T2H_MSG_TYPE_PKTLOG:
3900 {
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003901 dp_pktlog_msg_handler(soc, msg_word);
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07003902 break;
3903 }
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07003904
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003905 case HTT_T2H_MSG_TYPE_VERSION_CONF:
3906 {
Yue Ma245b47b2017-02-21 16:35:31 -08003907 htc_pm_runtime_put(soc->htc_soc);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003908 soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
3909 soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
3910 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
Aditya Sathishded018e2018-07-02 16:25:21 +05303911 "target uses HTT version %d.%d; host uses %d.%d",
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003912 soc->tgt_ver.major, soc->tgt_ver.minor,
3913 HTT_CURRENT_VERSION_MAJOR,
3914 HTT_CURRENT_VERSION_MINOR);
3915 if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
3916 QDF_TRACE(QDF_MODULE_ID_TXRX,
3917 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303918 "*** Incompatible host/target HTT versions!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003919 }
3920 /* abort if the target is incompatible with the host */
3921 qdf_assert(soc->tgt_ver.major ==
3922 HTT_CURRENT_VERSION_MAJOR);
3923 if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
3924 QDF_TRACE(QDF_MODULE_ID_TXRX,
3925 QDF_TRACE_LEVEL_WARN,
3926 "*** Warning: host/target HTT versions"
Aditya Sathishded018e2018-07-02 16:25:21 +05303927 " are different, though compatible!");
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07003928 }
3929 break;
3930 }
Pramod Simhab17d0672017-03-06 17:20:13 -08003931 case HTT_T2H_MSG_TYPE_RX_ADDBA:
3932 {
3933 uint16_t peer_id;
3934 uint8_t tid;
3935 uint8_t win_sz;
3936 uint16_t status;
3937 struct dp_peer *peer;
3938
3939 /*
3940 * Update REO Queue Desc with new values
3941 */
3942 peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
3943 tid = HTT_RX_ADDBA_TID_GET(*msg_word);
3944 win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
3945 peer = dp_peer_find_by_id(soc->dp_soc, peer_id);
3946
Pramod Simha30e81352017-03-27 10:03:31 -07003947 /*
3948 * Window size needs to be incremented by 1
3949 * since fw needs to represent a value of 256
3950 * using just 8 bits
3951 */
Pramod Simhab17d0672017-03-06 17:20:13 -08003952 if (peer) {
3953 status = dp_addba_requestprocess_wifi3(peer,
Pramod Simha30e81352017-03-27 10:03:31 -07003954 0, tid, 0, win_sz + 1, 0xffff);
Sravan Kumar Kairam26d471e2018-08-14 23:51:58 +05303955
3956 /*
3957 * If PEER_LOCK_REF_PROTECT enbled dec ref
3958 * which is inc by dp_peer_find_by_id
3959 */
3960 dp_peer_unref_del_find_by_id(peer);
3961
Pramod Simhab17d0672017-03-06 17:20:13 -08003962 QDF_TRACE(QDF_MODULE_ID_TXRX,
3963 QDF_TRACE_LEVEL_INFO,
Aditya Sathishded018e2018-07-02 16:25:21 +05303964 FL("PeerID %d BAW %d TID %d stat %d"),
Pramod Simhab17d0672017-03-06 17:20:13 -08003965 peer_id, win_sz, tid, status);
3966
3967 } else {
3968 QDF_TRACE(QDF_MODULE_ID_TXRX,
3969 QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05303970 FL("Peer not found peer id %d"),
Pramod Simhab17d0672017-03-06 17:20:13 -08003971 peer_id);
3972 }
3973 break;
3974 }
Ishank Jain6290a3c2017-03-21 10:49:39 +05303975 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
3976 {
3977 dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
3978 break;
3979 }
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303980 case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
3981 {
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08003982 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05303983 u_int8_t *peer_mac_addr;
3984 u_int16_t peer_id;
3985 u_int16_t hw_peer_id;
3986 u_int8_t vdev_id;
3987 bool is_wds;
3988 u_int16_t ast_hash;
3989
3990 peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
3991 hw_peer_id =
3992 HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
3993 vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
3994 peer_mac_addr =
3995 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
3996 &mac_addr_deswizzle_buf[0]);
3997 is_wds =
3998 HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
3999 ast_hash =
4000 HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
4001 QDF_TRACE(QDF_MODULE_ID_TXRX,
4002 QDF_TRACE_LEVEL_INFO,
4003 "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
4004 peer_id, vdev_id);
4005
4006 dp_rx_peer_map_handler(soc->dp_soc, peer_id,
4007 hw_peer_id, vdev_id,
4008 peer_mac_addr, ast_hash,
4009 is_wds);
4010 break;
4011 }
4012 case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
4013 {
Srinivas Girigowda2751b6d2019-02-27 12:28:13 -08004014 u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304015 u_int8_t *mac_addr;
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05304016 u_int16_t peer_id;
4017 u_int8_t vdev_id;
4018 u_int8_t is_wds;
4019
4020 peer_id =
4021 HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
4022 vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304023 mac_addr =
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05304024 htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
4025 &mac_addr_deswizzle_buf[0]);
4026 is_wds =
4027 HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
4028 QDF_TRACE(QDF_MODULE_ID_TXRX,
4029 QDF_TRACE_LEVEL_INFO,
chenguo26c518a2019-01-07 13:47:51 +08004030 "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05304031 peer_id, vdev_id);
4032
4033 dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
Chaithanya Garrepalli9cc562c2018-11-16 18:30:41 +05304034 vdev_id, mac_addr,
Chaithanya Garrepalli3e93e5f2018-09-12 17:02:31 +05304035 is_wds);
4036 break;
4037 }
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004038 default:
4039 break;
4040 };
4041
4042 /* Free the indication buffer */
Soumya Bhat402fe1a2018-03-09 13:04:57 +05304043 if (free_buf)
4044 qdf_nbuf_free(htt_t2h_msg);
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004045}
4046
4047/*
4048 * dp_htt_h2t_full() - Send full handler (called from HTC)
4049 * @context: Opaque context (HTT SOC handle)
4050 * @pkt: HTC packet
4051 *
Manikandan Mohan50ec7042017-04-19 11:37:47 -07004052 * Return: enum htc_send_full_action
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004053 */
Manikandan Mohan50ec7042017-04-19 11:37:47 -07004054static enum htc_send_full_action
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004055dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
4056{
4057 return HTC_SEND_FULL_KEEP;
4058}
4059
4060/*
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07004061 * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
4062 * @context: Opaque context (HTT SOC handle)
4063 * @nbuf: nbuf containing T2H message
4064 * @pipe_id: HIF pipe ID
4065 *
4066 * Return: QDF_STATUS
4067 *
4068 * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
Jeff Johnsonff2dfb22018-05-12 10:27:57 -07004069 * will be used for packet log and other high-priority HTT messages. Proper
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07004070 * HTC connection to be added later once required FW changes are available
4071 */
4072static QDF_STATUS
4073dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
4074{
Venkata Sharath Chandra Manchalad18887e2018-10-02 18:18:52 -07004075 QDF_STATUS rc = QDF_STATUS_SUCCESS;
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07004076 HTC_PACKET htc_pkt;
4077
4078 qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
4079 qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
4080 htc_pkt.Status = QDF_STATUS_SUCCESS;
4081 htc_pkt.pPktContext = (void *)nbuf;
4082 dp_htt_t2h_msg_handler(context, &htc_pkt);
4083
4084 return rc;
4085}
4086
4087/*
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004088 * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
4089 * @htt_soc: HTT SOC handle
4090 *
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304091 * Return: QDF_STATUS
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004092 */
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304093static QDF_STATUS
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004094htt_htc_soc_attach(struct htt_soc *soc)
4095{
Manikandan Mohan50ec7042017-04-19 11:37:47 -07004096 struct htc_service_connect_req connect;
4097 struct htc_service_connect_resp response;
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304098 QDF_STATUS status;
Kiran Venkatappa96c42b62018-01-08 12:33:07 +05304099 struct dp_soc *dpsoc = soc->dp_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004100
hangtianfe681a52019-01-16 17:16:28 +08004101 qdf_mem_zero(&connect, sizeof(connect));
4102 qdf_mem_zero(&response, sizeof(response));
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004103
4104 connect.pMetaData = NULL;
4105 connect.MetaDataLength = 0;
4106 connect.EpCallbacks.pContext = soc;
4107 connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
4108 connect.EpCallbacks.EpTxCompleteMultiple = NULL;
4109 connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
4110
4111 /* rx buffers currently are provided by HIF, not by EpRecvRefill */
4112 connect.EpCallbacks.EpRecvRefill = NULL;
4113
4114 /* N/A, fill is done by HIF */
4115 connect.EpCallbacks.RecvRefillWaterMark = 1;
4116
4117 connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
4118 /*
4119 * Specify how deep to let a queue get before htc_send_pkt will
4120 * call the EpSendFull function due to excessive send queue depth.
4121 */
4122 connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
4123
4124 /* disable flow control for HTT data message service */
4125 connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
4126
4127 /* connect to control service */
4128 connect.service_id = HTT_DATA_MSG_SVC;
4129
4130 status = htc_connect_service(soc->htc_soc, &connect, &response);
4131
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304132 if (status != QDF_STATUS_SUCCESS)
4133 return status;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004134
4135 soc->htc_endpoint = response.Endpoint;
4136
Kiran Venkatappa96c42b62018-01-08 12:33:07 +05304137 hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304138
4139 htt_interface_logging_init(&soc->htt_logger_handle);
Karunakar Dasineniead27fb2017-09-28 14:28:48 -07004140 dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
4141 dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
4142
Sravan Kumar Kairamdeb899b2019-01-23 14:47:07 +05304143 return QDF_STATUS_SUCCESS; /* success */
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004144}
4145
4146/*
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304147 * htt_soc_initialize() - SOC level HTT initialization
4148 * @htt_soc: Opaque htt SOC handle
4149 * @ctrl_psoc: Opaque ctrl SOC handle
4150 * @htc_soc: SOC level HTC handle
4151 * @hal_soc: Opaque HAL SOC handle
4152 * @osdev: QDF device
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004153 *
4154 * Return: HTT handle on success; NULL on failure
4155 */
4156void *
Akshay Kosigieec6db92019-07-02 14:25:54 +05304157htt_soc_initialize(struct htt_soc *htt_soc,
4158 struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
Akshay Kosigia5c46a42019-06-27 12:43:01 +05304159 HTC_HANDLE htc_soc,
Akshay Kosigia870c612019-07-08 23:10:30 +05304160 hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004161{
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304162 struct htt_soc *soc = (struct htt_soc *)htt_soc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004163
4164 soc->osdev = osdev;
Sathyanarayanan Esakkiappan38c6f982017-12-05 12:00:31 +05304165 soc->ctrl_psoc = ctrl_psoc;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004166 soc->htc_soc = htc_soc;
Akshay Kosigia870c612019-07-08 23:10:30 +05304167 soc->hal_soc = hal_soc_hdl;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004168
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004169 if (htt_htc_soc_attach(soc))
4170 goto fail2;
4171
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304172 return soc;
4173
4174fail2:
4175 return NULL;
4176}
4177
Mohit Khanna40f76b52018-11-30 14:10:55 -08004178void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
4179{
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304180 htt_interface_logging_deinit(htt_handle->htt_logger_handle);
Mohit Khanna40f76b52018-11-30 14:10:55 -08004181 htt_htc_misc_pkt_pool_free(htt_handle);
4182 htt_htc_pkt_pool_free(htt_handle);
4183}
4184
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304185/*
4186 * htt_soc_htc_prealloc() - HTC memory prealloc
4187 * @htt_soc: SOC level HTT handle
4188 *
4189 * Return: QDF_STATUS_SUCCESS on Success or
4190 * QDF_STATUS_E_NOMEM on allocation failure
4191 */
Mohit Khanna40f76b52018-11-30 14:10:55 -08004192QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304193{
4194 int i;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004195
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304196 soc->htt_htc_pkt_freelist = NULL;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004197 /* pre-allocate some HTC_PACKET objects */
4198 for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
4199 struct dp_htt_htc_pkt_union *pkt;
4200 pkt = qdf_mem_malloc(sizeof(*pkt));
4201 if (!pkt)
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304202 return QDF_STATUS_E_NOMEM;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004203
4204 htt_htc_pkt_free(soc, &pkt->u.pkt);
4205 }
Anish Nataraje9d4c3b2018-11-24 22:24:56 +05304206 return QDF_STATUS_SUCCESS;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004207}
4208
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004209/*
Mohit Khanna40f76b52018-11-30 14:10:55 -08004210 * htt_soc_detach() - Free SOC level HTT handle
4211 * @htt_hdl: HTT SOC handle
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004212 */
Akshay Kosigia5c46a42019-06-27 12:43:01 +05304213void htt_soc_detach(struct htt_soc *htt_hdl)
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004214{
Ruben Columbus814e6cb2019-09-10 15:49:11 -07004215 int i;
Mohit Khanna40f76b52018-11-30 14:10:55 -08004216 struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004217
Ruben Columbus814e6cb2019-09-10 15:49:11 -07004218 for (i = 0; i < MAX_PDEV_CNT; i++) {
4219 qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
4220 qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
4221 }
4222
Mohit Khanna40f76b52018-11-30 14:10:55 -08004223 HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
4224 qdf_mem_free(htt_handle);
Ruben Columbus814e6cb2019-09-10 15:49:11 -07004225
Karunakar Dasineni9b814ce2016-09-01 15:00:09 -07004226}
4227
Ishank Jain6290a3c2017-03-21 10:49:39 +05304228/**
4229 * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
4230 * @pdev: DP PDEV handle
4231 * @stats_type_upload_mask: stats type requested by user
4232 * @config_param_0: extra configuration parameters
4233 * @config_param_1: extra configuration parameters
4234 * @config_param_2: extra configuration parameters
4235 * @config_param_3: extra configuration parameters
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07004236 * @mac_id: mac number
Ishank Jain6290a3c2017-03-21 10:49:39 +05304237 *
4238 * return: QDF STATUS
4239 */
4240QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
4241 uint32_t stats_type_upload_mask, uint32_t config_param_0,
4242 uint32_t config_param_1, uint32_t config_param_2,
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08004243 uint32_t config_param_3, int cookie_val, int cookie_msb,
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07004244 uint8_t mac_id)
Ishank Jain6290a3c2017-03-21 10:49:39 +05304245{
4246 struct htt_soc *soc = pdev->soc->htt_handle;
4247 struct dp_htt_htc_pkt *pkt;
4248 qdf_nbuf_t msg;
4249 uint32_t *msg_word;
Venkata Sharath Chandra Manchalaac863c42018-02-19 22:47:07 -08004250 uint8_t pdev_mask = 0;
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304251 uint8_t *htt_logger_bufp;
Ishank Jain6290a3c2017-03-21 10:49:39 +05304252
4253 msg = qdf_nbuf_alloc(
4254 soc->osdev,
4255 HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
4256 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4257
4258 if (!msg)
4259 return QDF_STATUS_E_NOMEM;
4260
4261 /*TODO:Add support for SOC stats
4262 * Bit 0: SOC Stats
4263 * Bit 1: Pdev stats for pdev id 0
4264 * Bit 2: Pdev stats for pdev id 1
4265 * Bit 3: Pdev stats for pdev id 2
4266 */
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07004267 mac_id = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
Ishank Jain6290a3c2017-03-21 10:49:39 +05304268
Venkata Sharath Chandra Manchala4face242018-04-23 11:48:28 -07004269 pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
Ishank Jain6290a3c2017-03-21 10:49:39 +05304270 /*
4271 * Set the length of the message.
4272 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4273 * separately during the below call to qdf_nbuf_push_head.
4274 * The contribution from the HTC header is added separately inside HTC.
4275 */
4276 if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
4277 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4278 "Failed to expand head for HTT_EXT_STATS");
4279 qdf_nbuf_free(msg);
4280 return QDF_STATUS_E_FAILURE;
4281 }
4282
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05304283 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4284 "-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
4285 "config_param_1 %u\n config_param_2 %u\n"
Aditya Sathishded018e2018-07-02 16:25:21 +05304286 "config_param_4 %u\n -------------",
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05304287 __func__, __LINE__, cookie_val, config_param_0,
4288 config_param_1, config_param_2, config_param_3);
4289
Ishank Jain6290a3c2017-03-21 10:49:39 +05304290 msg_word = (uint32_t *) qdf_nbuf_data(msg);
4291
4292 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304293 htt_logger_bufp = (uint8_t *)msg_word;
Ishank Jain6290a3c2017-03-21 10:49:39 +05304294 *msg_word = 0;
4295 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
4296 HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
4297 HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
4298
4299 /* word 1 */
4300 msg_word++;
4301 *msg_word = 0;
4302 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
4303
4304 /* word 2 */
4305 msg_word++;
4306 *msg_word = 0;
4307 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
4308
4309 /* word 3 */
4310 msg_word++;
4311 *msg_word = 0;
4312 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
4313
4314 /* word 4 */
4315 msg_word++;
4316 *msg_word = 0;
4317 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
4318
4319 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05304320
4321 /* word 5 */
4322 msg_word++;
4323
4324 /* word 6 */
4325 msg_word++;
4326 *msg_word = 0;
4327 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
4328
4329 /* word 7 */
4330 msg_word++;
4331 *msg_word = 0;
Prathyusha Guduri43bb0562018-02-12 18:30:54 +05304332 /*Using last 2 bits for pdev_id */
4333 cookie_msb = ((cookie_msb << 2) | pdev->pdev_id);
4334 HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
Chaithanya Garrepalli30927c52017-11-22 14:31:47 +05304335
Ishank Jain6290a3c2017-03-21 10:49:39 +05304336 pkt = htt_htc_pkt_alloc(soc);
4337 if (!pkt) {
4338 qdf_nbuf_free(msg);
4339 return QDF_STATUS_E_NOMEM;
4340 }
4341
4342 pkt->soc_ctxt = NULL; /* not used during send-done callback */
4343
4344 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4345 dp_htt_h2t_send_complete_free_netbuf,
4346 qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4347 soc->htc_endpoint,
Yue Mae92fe022019-07-16 18:47:52 -07004348 /* tag for FW response msg not guaranteed */
4349 HTC_TX_PACKET_TAG_RUNTIME_PUT);
Ishank Jain6290a3c2017-03-21 10:49:39 +05304350
4351 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304352 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
4353 htt_logger_bufp);
Pramod Simhae0baa442017-06-27 15:21:39 -07004354 return 0;
Ishank Jain6290a3c2017-03-21 10:49:39 +05304355}
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004356
4357/* This macro will revert once proper HTT header will define for
4358 * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
4359 * */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004360#if defined(WDI_EVENT_ENABLE)
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004361/**
4362 * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
4363 * @pdev: DP PDEV handle
4364 * @stats_type_upload_mask: stats type requested by user
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004365 * @mac_id: Mac id number
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004366 *
4367 * return: QDF STATUS
4368 */
4369QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004370 uint32_t stats_type_upload_mask, uint8_t mac_id)
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004371{
4372 struct htt_soc *soc = pdev->soc->htt_handle;
4373 struct dp_htt_htc_pkt *pkt;
4374 qdf_nbuf_t msg;
4375 uint32_t *msg_word;
4376 uint8_t pdev_mask;
4377
4378 msg = qdf_nbuf_alloc(
4379 soc->osdev,
4380 HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
4381 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
4382
4383 if (!msg) {
4384 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304385 "Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004386 qdf_assert(0);
4387 return QDF_STATUS_E_NOMEM;
4388 }
4389
4390 /*TODO:Add support for SOC stats
4391 * Bit 0: SOC Stats
4392 * Bit 1: Pdev stats for pdev id 0
4393 * Bit 2: Pdev stats for pdev id 1
4394 * Bit 3: Pdev stats for pdev id 2
4395 */
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -07004396 pdev_mask = 1 << DP_SW2HW_MACID(mac_id);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004397
4398 /*
4399 * Set the length of the message.
4400 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4401 * separately during the below call to qdf_nbuf_push_head.
4402 * The contribution from the HTC header is added separately inside HTC.
4403 */
4404 if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
4405 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304406 "Failed to expand head for HTT_CFG_STATS");
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004407 qdf_nbuf_free(msg);
4408 return QDF_STATUS_E_FAILURE;
4409 }
4410
4411 msg_word = (uint32_t *) qdf_nbuf_data(msg);
4412
4413 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4414 *msg_word = 0;
4415 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
4416 HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
4417 HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
4418 stats_type_upload_mask);
4419
4420 pkt = htt_htc_pkt_alloc(soc);
4421 if (!pkt) {
4422 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +05304423 "Fail to allocate dp_htt_htc_pkt buffer");
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004424 qdf_assert(0);
4425 qdf_nbuf_free(msg);
4426 return QDF_STATUS_E_NOMEM;
4427 }
4428
4429 pkt->soc_ctxt = NULL; /* not used during send-done callback */
4430
4431 SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
4432 dp_htt_h2t_send_complete_free_netbuf,
4433 qdf_nbuf_data(msg), qdf_nbuf_len(msg),
4434 soc->htc_endpoint,
4435 1); /* tag - not relevant here */
4436
4437 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
Ankit Kumar0ead45c2019-04-29 15:32:49 +05304438 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
4439 (uint8_t *)msg_word);
Keyur Parekhdb0fa142017-07-13 19:40:22 -07004440 return 0;
4441}
4442#endif
Amir Patel1ea85d42019-01-09 15:19:10 +05304443
4444void
4445dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
4446 uint32_t *tag_buf)
4447{
4448 switch (tag_type) {
4449 case HTT_STATS_PEER_DETAILS_TAG:
4450 {
4451 htt_peer_details_tlv *dp_stats_buf =
4452 (htt_peer_details_tlv *)tag_buf;
4453
4454 pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
4455 }
4456 break;
4457 case HTT_STATS_PEER_STATS_CMN_TAG:
4458 {
4459 htt_peer_stats_cmn_tlv *dp_stats_buf =
4460 (htt_peer_stats_cmn_tlv *)tag_buf;
4461
4462 struct dp_peer *peer = dp_peer_find_by_id(pdev->soc,
4463 pdev->fw_stats_peer_id);
4464
4465 if (peer && !peer->bss_peer) {
4466 peer->stats.tx.inactive_time =
4467 dp_stats_buf->inactive_time;
4468 qdf_event_set(&pdev->fw_peer_stats_event);
4469 }
4470 if (peer)
4471 dp_peer_unref_del_find_by_id(peer);
4472 }
4473 break;
4474 default:
4475 qdf_err("Invalid tag_type");
4476 }
4477}
Sumeet Raoc4fa4df2019-07-05 02:11:19 -07004478
4479/**
4480 * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
4481 * @pdev: DP pdev handle
4482 * @fse_setup_info: FST setup parameters
4483 *
4484 * Return: Success when HTT message is sent, error on failure
4485 */
4486QDF_STATUS
4487dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
4488 struct dp_htt_rx_flow_fst_setup *fse_setup_info)
4489{
4490 struct htt_soc *soc = pdev->soc->htt_handle;
4491 struct dp_htt_htc_pkt *pkt;
4492 qdf_nbuf_t msg;
4493 u_int32_t *msg_word;
4494 struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
4495 uint8_t *htt_logger_bufp;
4496 u_int32_t *key;
4497
4498 msg = qdf_nbuf_alloc(
4499 soc->osdev,
4500 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
4501 /* reserve room for the HTC header */
4502 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4503
4504 if (!msg)
4505 return QDF_STATUS_E_NOMEM;
4506
4507 /*
4508 * Set the length of the message.
4509 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4510 * separately during the below call to qdf_nbuf_push_head.
4511 * The contribution from the HTC header is added separately inside HTC.
4512 */
4513 if (!qdf_nbuf_put_tail(msg,
4514 sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
4515 qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
4516 return QDF_STATUS_E_FAILURE;
4517 }
4518
4519 /* fill in the message contents */
4520 msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4521
4522 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
4523 /* rewind beyond alignment pad to get to the HTC header reserved area */
4524 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4525 htt_logger_bufp = (uint8_t *)msg_word;
4526
4527 *msg_word = 0;
4528 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
4529
4530 fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
4531
4532 HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
4533
4534 msg_word++;
4535 HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
4536 HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
4537 HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
4538 fse_setup_info->ip_da_sa_prefix);
4539
4540 msg_word++;
4541 HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
4542 fse_setup_info->base_addr_lo);
4543 msg_word++;
4544 HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
4545 fse_setup_info->base_addr_hi);
4546
4547 key = (u_int32_t *)fse_setup_info->hash_key;
4548 fse_setup->toeplitz31_0 = *key++;
4549 fse_setup->toeplitz63_32 = *key++;
4550 fse_setup->toeplitz95_64 = *key++;
4551 fse_setup->toeplitz127_96 = *key++;
4552 fse_setup->toeplitz159_128 = *key++;
4553 fse_setup->toeplitz191_160 = *key++;
4554 fse_setup->toeplitz223_192 = *key++;
4555 fse_setup->toeplitz255_224 = *key++;
4556 fse_setup->toeplitz287_256 = *key++;
4557 fse_setup->toeplitz314_288 = *key;
4558
4559 msg_word++;
4560 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
4561 msg_word++;
4562 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
4563 msg_word++;
4564 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
4565 msg_word++;
4566 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
4567 msg_word++;
4568 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
4569 msg_word++;
4570 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
4571 msg_word++;
4572 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
4573 msg_word++;
4574 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
4575 msg_word++;
4576 HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
4577 msg_word++;
4578 HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
4579 fse_setup->toeplitz314_288);
4580
4581 pkt = htt_htc_pkt_alloc(soc);
4582 if (!pkt) {
4583 qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4584 qdf_assert(0);
4585 qdf_nbuf_free(msg);
4586 return QDF_STATUS_E_RESOURCES; /* failure */
4587 }
4588
4589 pkt->soc_ctxt = NULL; /* not used during send-done callback */
4590
4591 SET_HTC_PACKET_INFO_TX(
4592 &pkt->htc_pkt,
4593 dp_htt_h2t_send_complete_free_netbuf,
4594 qdf_nbuf_data(msg),
4595 qdf_nbuf_len(msg),
4596 soc->htc_endpoint,
4597 1); /* tag - not relevant here */
4598
4599 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4600
4601 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
4602 htt_logger_bufp);
4603
4604 qdf_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
4605 fse_setup_info->pdev_id);
4606 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
4607 (void *)fse_setup_info->hash_key,
4608 fse_setup_info->hash_key_len);
4609
4610 return QDF_STATUS_SUCCESS;
4611}
4612
4613/**
4614 * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
4615 * add/del a flow in HW
4616 * @pdev: DP pdev handle
4617 * @fse_op_info: Flow entry parameters
4618 *
4619 * Return: Success when HTT message is sent, error on failure
4620 */
4621QDF_STATUS
4622dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
4623 struct dp_htt_rx_flow_fst_operation *fse_op_info)
4624{
4625 struct htt_soc *soc = pdev->soc->htt_handle;
4626 struct dp_htt_htc_pkt *pkt;
4627 qdf_nbuf_t msg;
4628 u_int32_t *msg_word;
4629 struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
4630 uint8_t *htt_logger_bufp;
4631
4632 msg = qdf_nbuf_alloc(
4633 soc->osdev,
4634 HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
4635 /* reserve room for the HTC header */
4636 HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
4637 if (!msg)
4638 return QDF_STATUS_E_NOMEM;
4639
4640 /*
4641 * Set the length of the message.
4642 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
4643 * separately during the below call to qdf_nbuf_push_head.
4644 * The contribution from the HTC header is added separately inside HTC.
4645 */
4646 if (!qdf_nbuf_put_tail(msg,
4647 sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
4648 qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
4649 return QDF_STATUS_E_FAILURE;
4650 }
4651
4652 /* fill in the message contents */
4653 msg_word = (u_int32_t *)qdf_nbuf_data(msg);
4654
4655 memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
4656 /* rewind beyond alignment pad to get to the HTC header reserved area */
4657 qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
4658 htt_logger_bufp = (uint8_t *)msg_word;
4659
4660 *msg_word = 0;
4661 HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
4662
4663 fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
4664
4665 HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
4666 msg_word++;
4667 HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
4668 if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
4669 HTT_RX_FSE_OPERATION_SET(*msg_word,
4670 HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
4671 msg_word++;
4672 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4673 *msg_word,
4674 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
4675 msg_word++;
4676 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4677 *msg_word,
4678 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
4679 msg_word++;
4680 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4681 *msg_word,
4682 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
4683 msg_word++;
4684 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4685 *msg_word,
4686 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
4687 msg_word++;
4688 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4689 *msg_word,
4690 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
4691 msg_word++;
4692 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4693 *msg_word,
4694 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
4695 msg_word++;
4696 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4697 *msg_word,
4698 qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
4699 msg_word++;
4700 HTT_RX_FSE_OPERATION_IP_ADDR_SET(
4701 *msg_word,
4702 qdf_htonl(
4703 fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
4704 msg_word++;
4705 HTT_RX_FSE_SOURCEPORT_SET(
4706 *msg_word,
4707 fse_op_info->rx_flow->flow_tuple_info.src_port);
4708 HTT_RX_FSE_DESTPORT_SET(
4709 *msg_word,
4710 fse_op_info->rx_flow->flow_tuple_info.dest_port);
4711 msg_word++;
4712 HTT_RX_FSE_L4_PROTO_SET(
4713 *msg_word,
4714 fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
4715 } else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
4716 HTT_RX_FSE_OPERATION_SET(*msg_word,
4717 HTT_RX_FSE_CACHE_INVALIDATE_FULL);
4718 } else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
4719 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
4720 } else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
4721 HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
4722 }
4723
4724 pkt = htt_htc_pkt_alloc(soc);
4725 if (!pkt) {
4726 qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
4727 qdf_assert(0);
4728 qdf_nbuf_free(msg);
4729 return QDF_STATUS_E_RESOURCES; /* failure */
4730 }
4731
4732 pkt->soc_ctxt = NULL; /* not used during send-done callback */
4733
4734 SET_HTC_PACKET_INFO_TX(
4735 &pkt->htc_pkt,
4736 dp_htt_h2t_send_complete_free_netbuf,
4737 qdf_nbuf_data(msg),
4738 qdf_nbuf_len(msg),
4739 soc->htc_endpoint,
4740 1); /* tag - not relevant here */
4741
4742 SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
4743
4744 DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
4745 htt_logger_bufp);
4746
4747 qdf_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
4748 fse_op_info->pdev_id);
4749
4750 return QDF_STATUS_SUCCESS;
4751}