blob: b9b3e1ce091ab236c349120d93e0fa8a2879c726 [file] [log] [blame]
Kai Chen6eca1a62017-01-12 10:17:53 -08001/*
phadimanebf4cde2019-01-28 17:50:37 +05302 * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
Kai Chen6eca1a62017-01-12 10:17:53 -08003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +053018#include "hal_hw_headers.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080019#include "dp_types.h"
20#include "dp_rx.h"
21#include "dp_peer.h"
22#include "hal_rx.h"
23#include "hal_api.h"
24#include "qdf_trace.h"
25#include "qdf_nbuf.h"
26#include "hal_api_mon.h"
Kai Chen6eca1a62017-01-12 10:17:53 -080027#include "dp_rx_mon.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070028#include "dp_internal.h"
29#include "qdf_mem.h" /* qdf_mem_malloc,free */
Kai Chen6eca1a62017-01-12 10:17:53 -080030
ydb247452018-08-08 00:23:16 +053031#ifdef FEATURE_PERPKT_INFO
32#include "dp_ratetable.h"
33#endif
34
Kai Chen6eca1a62017-01-12 10:17:53 -080035/**
Anish Nataraj38a29562017-08-18 19:41:17 +053036* dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
Soumya Bhat560f90c2018-03-30 13:53:26 +053037* @pdev: pdev ctx
Anish Nataraj38a29562017-08-18 19:41:17 +053038* @ppdu_info: ppdu info structure from ppdu ring
39* @ppdu_nbuf: qdf nbuf abstraction for linux skb
40*
41* Return: none
42*/
43#ifdef FEATURE_PERPKT_INFO
44static inline void
Soumya Bhat560f90c2018-03-30 13:53:26 +053045dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
Anish Nataraj38a29562017-08-18 19:41:17 +053046 struct hal_rx_ppdu_info *ppdu_info,
47 qdf_nbuf_t ppdu_nbuf)
48{
49 struct dp_peer *peer;
Soumya Bhat560f90c2018-03-30 13:53:26 +053050 struct dp_soc *soc = pdev->soc;
Anish Nataraj38a29562017-08-18 19:41:17 +053051 struct dp_ast_entry *ast_entry;
52 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
53 uint32_t ast_index;
54
55 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
56
Pamidipati, Vijayd7eb83e2017-09-20 21:19:56 +053057 cdp_rx_ppdu->first_data_seq_ctrl =
58 ppdu_info->rx_status.first_data_seq_ctrl;
Anish Natarajeb30aa72018-09-20 16:34:01 +053059 cdp_rx_ppdu->frame_ctrl =
60 ppdu_info->rx_status.frame_control;
Anish Nataraj38a29562017-08-18 19:41:17 +053061 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
nobelj9ab76e22018-02-13 18:10:54 -080062 cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
Anish Nataraj38a29562017-08-18 19:41:17 +053063 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
64 cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +053065 cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
66 cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
67 cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
Anish Nataraj38a29562017-08-18 19:41:17 +053068 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
69 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
Keyur Parekh49cdc742018-05-09 10:59:03 -070070 if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
71 (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
72 cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
73 else
74 cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
Anish Nataraj50347012018-03-06 21:12:45 +053075 cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
Anish Nataraj38a29562017-08-18 19:41:17 +053076 cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
Anish Nataraj28490c42018-01-19 19:34:54 +053077 cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
Pamidipati, Vijayba4b57f2019-02-13 16:47:23 +053078 cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
79 QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
80 cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
Anish Nataraj38a29562017-08-18 19:41:17 +053081 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
Anish Nataraj57614da2018-02-07 23:04:24 +053082 cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
83 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
nobeljd8039592018-03-13 16:59:42 -070084 cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +053085 cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
86 cdp_rx_ppdu->udp_msdu_count +
87 cdp_rx_ppdu->other_msdu_count);
Pranita Solankeed0aba62018-01-12 19:14:31 +053088
89 if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
90 cdp_rx_ppdu->is_ampdu = 1;
91 else
92 cdp_rx_ppdu->is_ampdu = 0;
Anish Nataraj45d282c2017-12-30 01:03:38 +053093
94 cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
Anish Nataraj57614da2018-02-07 23:04:24 +053095 cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
Soumya Bhat560f90c2018-03-30 13:53:26 +053096
97 ast_index = ppdu_info->rx_status.ast_index;
Tallapragada Kalyana7023622018-12-03 19:29:52 +053098 if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
Soumya Bhat560f90c2018-03-30 13:53:26 +053099 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
100 return;
101 }
102
103 ast_entry = soc->ast_table[ast_index];
104 if (!ast_entry) {
105 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
106 return;
107 }
108 peer = ast_entry->peer;
109 if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
110 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
111 return;
112 }
113
114 qdf_mem_copy(cdp_rx_ppdu->mac_addr,
115 peer->mac_addr.raw, DP_MAC_ADDR_LEN);
116 cdp_rx_ppdu->peer_id = peer->peer_ids[0];
117 cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
Anish Nataraj38a29562017-08-18 19:41:17 +0530118}
119#else
120static inline void
Soumya Bhat560f90c2018-03-30 13:53:26 +0530121dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
Anish Nataraj38a29562017-08-18 19:41:17 +0530122 struct hal_rx_ppdu_info *ppdu_info,
123 qdf_nbuf_t ppdu_nbuf)
124{
125}
126#endif
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530127/**
128 * dp_rx_stats_update() - Update per-peer statistics
129 * @soc: Datapath SOC handle
130 * @peer: Datapath peer handle
131 * @ppdu: PPDU Descriptor
132 *
133 * Return: None
134 */
135#ifdef FEATURE_PERPKT_INFO
ydb247452018-08-08 00:23:16 +0530136static inline void dp_rx_rate_stats_update(struct dp_peer *peer,
137 struct cdp_rx_indication_ppdu *ppdu)
138{
139 uint32_t ratekbps = 0;
140 uint32_t ppdu_rx_rate = 0;
141 uint32_t nss = 0;
Amir Patel78824b12019-02-23 10:54:32 +0530142 uint32_t rix;
ydb247452018-08-08 00:23:16 +0530143
144 if (!peer || !ppdu)
145 return;
146
147 if (ppdu->u.nss == 0)
148 nss = 0;
149 else
150 nss = ppdu->u.nss - 1;
151
Anish Nataraj376d9b12018-08-13 14:12:01 +0530152 ratekbps = dp_getrateindex(ppdu->u.gi,
153 ppdu->u.mcs,
ydb247452018-08-08 00:23:16 +0530154 nss,
155 ppdu->u.preamble,
Amir Patel78824b12019-02-23 10:54:32 +0530156 ppdu->u.bw,
157 &rix);
ydb247452018-08-08 00:23:16 +0530158
159 if (!ratekbps)
160 return;
161
162 DP_STATS_UPD(peer, rx.last_rx_rate, ratekbps);
163 dp_ath_rate_lpf(peer->stats.rx.avg_rx_rate, ratekbps);
164 ppdu_rx_rate = dp_ath_rate_out(peer->stats.rx.avg_rx_rate);
165 DP_STATS_UPD(peer, rx.rnd_avg_rx_rate, ppdu_rx_rate);
166
167 if (peer->vdev)
168 peer->vdev->stats.rx.last_rx_rate = ratekbps;
169}
170
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530171static void dp_rx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
172 struct cdp_rx_indication_ppdu *ppdu)
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530173{
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530174 struct dp_soc *soc = NULL;
Anish Nataraj28490c42018-01-19 19:34:54 +0530175 uint8_t mcs, preamble, ac = 0;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530176 uint16_t num_msdu;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530177 bool is_invalid_peer = false;
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530178
179 mcs = ppdu->u.mcs;
180 preamble = ppdu->u.preamble;
181 num_msdu = ppdu->num_msdu;
182
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530183 if (pdev)
184 soc = pdev->soc;
185 else
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530186 return;
187
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530188 if (!peer) {
189 is_invalid_peer = true;
190 peer = pdev->invalid_peer;
191 }
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530192
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530193 if (!soc || soc->process_rx_status)
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530194 return;
Pranita Solankefc2ff392017-12-15 19:25:13 +0530195
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530196 DP_STATS_UPD(peer, rx.rssi, ppdu->rssi);
Pranita Solankeed0aba62018-01-12 19:14:31 +0530197
198 if ((preamble == DOT11_A) || (preamble == DOT11_B))
199 ppdu->u.nss = 1;
200
201 if (ppdu->u.nss)
202 DP_STATS_INC(peer, rx.nss[ppdu->u.nss - 1], num_msdu);
203
Pranita Solankea12b4b32017-11-20 23:04:14 +0530204 DP_STATS_INC(peer, rx.sgi_count[ppdu->u.gi], num_msdu);
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530205 DP_STATS_INC(peer, rx.bw[ppdu->u.bw], num_msdu);
Anish Nataraj28490c42018-01-19 19:34:54 +0530206 DP_STATS_INC(peer, rx.reception_type[ppdu->u.ppdu_type], num_msdu);
Pranita Solankea12b4b32017-11-20 23:04:14 +0530207 DP_STATS_INCC(peer, rx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
208 DP_STATS_INCC(peer, rx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
209 DP_STATS_UPD(peer, rx.rx_rate, mcs);
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530210 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530211 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530212 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
213 DP_STATS_INCC(peer,
214 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530215 ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530216 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530217 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530218 ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
219 DP_STATS_INCC(peer,
220 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530221 ((mcs < MAX_MCS_11B) && (preamble == DOT11_B)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530222 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530223 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530224 ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
225 DP_STATS_INCC(peer,
226 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
Pranita Solankea12b4b32017-11-20 23:04:14 +0530227 ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530228 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530229 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530230 ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
231 DP_STATS_INCC(peer,
232 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
233 ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
234 DP_STATS_INCC(peer,
Pranita Solankeed0aba62018-01-12 19:14:31 +0530235 rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530236 ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
237 DP_STATS_INCC(peer,
238 rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
239 ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
Anish Nataraj28490c42018-01-19 19:34:54 +0530240 /*
241 * If invalid TID, it could be a non-qos frame, hence do not update
242 * any AC counters
243 */
244 ac = TID_TO_WME_AC(ppdu->tid);
245 if (ppdu->tid != HAL_TID_INVALID)
246 DP_STATS_INC(peer, rx.wme_ac_type[ac], num_msdu);
Amir Patel52c6b732018-08-03 12:13:22 +0530247 dp_peer_stats_notify(peer);
248 DP_STATS_UPD(peer, rx.last_rssi, ppdu->rssi);
Anish Nataraj28490c42018-01-19 19:34:54 +0530249
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530250 if (is_invalid_peer)
251 return;
252
ydb247452018-08-08 00:23:16 +0530253 dp_rx_rate_stats_update(peer, ppdu);
254
Amir Patel756d05e2018-10-10 12:35:30 +0530255#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
256 dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
257 &peer->stats, ppdu->peer_id,
258 UPDATE_PEER_STATS, pdev->pdev_id);
259#endif
Pamidipati, Vijay57a435a2017-10-17 11:03:39 +0530260}
261#endif
Anish Nataraj38a29562017-08-18 19:41:17 +0530262
263/**
Soumya Bhat7422db82017-12-15 13:48:53 +0530264 * dp_rx_handle_mcopy_mode() - Allocate and deliver first MSDU payload
Pranita Solankefc2ff392017-12-15 19:25:13 +0530265 * @soc: core txrx main context
266 * @pdev: pdev strcuture
267 * @ppdu_info: structure for rx ppdu ring
268 *
269 * Return: QDF_STATUS_SUCCESS - If nbuf to be freed by caller
270 * QDF_STATUS_E_ALREADY - If nbuf not to be freed by caller
Soumya Bhat7422db82017-12-15 13:48:53 +0530271 */
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530272#ifdef FEATURE_PERPKT_INFO
273static inline QDF_STATUS
Soumya Bhat7422db82017-12-15 13:48:53 +0530274dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530275 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
276{
277 uint8_t size = 0;
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530278 struct ieee80211_frame *wh;
279 uint32_t *nbuf_data;
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530280
Soumya Bhatdc8aca82018-03-13 14:10:24 +0530281 if (ppdu_info->msdu_info.first_msdu_payload == NULL)
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530282 return QDF_STATUS_SUCCESS;
283
Soumya Bhat2f54de22018-02-21 09:54:28 +0530284 if (pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id)
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530285 return QDF_STATUS_SUCCESS;
286
Soumya Bhat2f54de22018-02-21 09:54:28 +0530287 pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530288
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530289 wh = (struct ieee80211_frame *)(ppdu_info->msdu_info.first_msdu_payload
290 + 4);
Soumya Bhatdc8aca82018-03-13 14:10:24 +0530291 size = (ppdu_info->msdu_info.first_msdu_payload -
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530292 qdf_nbuf_data(nbuf));
Soumya Bhatdc8aca82018-03-13 14:10:24 +0530293 ppdu_info->msdu_info.first_msdu_payload = NULL;
Soumya Bhat7422db82017-12-15 13:48:53 +0530294
295 if (qdf_nbuf_pull_head(nbuf, size) == NULL)
296 return QDF_STATUS_SUCCESS;
297
Chaithanya Garrepalli7ab76ae2018-07-05 14:53:50 +0530298 if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
299 IEEE80211_FC0_TYPE_MGT) ||
300 ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
301 IEEE80211_FC0_TYPE_CTL)) {
302 return QDF_STATUS_SUCCESS;
303 }
304
305 nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf);
306 *nbuf_data = pdev->ppdu_info.com_info.ppdu_id;
Soumya Bhatdc8aca82018-03-13 14:10:24 +0530307 /* only retain RX MSDU payload in the skb */
308 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
309 ppdu_info->msdu_info.payload_len);
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530310 dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
311 nbuf, HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
312 return QDF_STATUS_E_ALREADY;
313}
314#else
315static inline QDF_STATUS
Soumya Bhat7422db82017-12-15 13:48:53 +0530316dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530317 struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
318{
319 return QDF_STATUS_SUCCESS;
320}
321#endif
322
sumedh baikady59a2d332018-05-22 01:50:38 -0700323/**
324 * dp_rx_handle_smart_mesh_mode() - Deliver header for smart mesh
325 * @soc: Datapath SOC handle
326 * @pdev: Datapath PDEV handle
327 * @ppdu_info: Structure for rx ppdu info
328 * @nbuf: Qdf nbuf abstraction for linux skb
329 *
330 * Return: 0 on success, 1 on failure
331 */
332static inline int
333dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev,
334 struct hal_rx_ppdu_info *ppdu_info,
335 qdf_nbuf_t nbuf)
336{
337 uint8_t size = 0;
338
339 if (!pdev->monitor_vdev) {
340 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
341 "[%s]:[%d] Monitor vdev is NULL !!",
342 __func__, __LINE__);
343 return 1;
344 }
345 if (ppdu_info->msdu_info.first_msdu_payload == NULL) {
346 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
347 "[%s]:[%d] First msdu payload not present",
348 __func__, __LINE__);
349 return 1;
350 }
351
sumedh baikadyda159202018-11-01 17:31:23 -0700352 /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */
sumedh baikady59a2d332018-05-22 01:50:38 -0700353 size = (ppdu_info->msdu_info.first_msdu_payload -
sumedh baikadyda159202018-11-01 17:31:23 -0700354 qdf_nbuf_data(nbuf)) + 4;
sumedh baikady59a2d332018-05-22 01:50:38 -0700355 ppdu_info->msdu_info.first_msdu_payload = NULL;
356
357 if (qdf_nbuf_pull_head(nbuf, size) == NULL) {
358 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
359 "[%s]:[%d] No header present",
360 __func__, __LINE__);
361 return 1;
362 }
363
sumedh baikadyda159202018-11-01 17:31:23 -0700364 /* Only retain RX MSDU payload in the skb */
sumedh baikady59a2d332018-05-22 01:50:38 -0700365 qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
366 ppdu_info->msdu_info.payload_len);
367 qdf_nbuf_update_radiotap(&(pdev->ppdu_info.rx_status),
368 nbuf, sizeof(struct rx_pkt_tlvs));
369 pdev->monitor_vdev->osif_rx_mon(pdev->monitor_vdev->osif_vdev,
370 nbuf, NULL);
sumedh baikadyda159202018-11-01 17:31:23 -0700371 pdev->ppdu_info.rx_status.monitor_direct_used = 0;
sumedh baikady59a2d332018-05-22 01:50:38 -0700372 return 0;
373}
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530374
375/**
Anish Nataraj38a29562017-08-18 19:41:17 +0530376* dp_rx_handle_ppdu_stats() - Allocate and deliver ppdu stats to cdp layer
377* @soc: core txrx main context
378* @pdev: pdev strcuture
379* @ppdu_info: structure for rx ppdu ring
380*
381* Return: none
382*/
383#ifdef FEATURE_PERPKT_INFO
384static inline void
385dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
386 struct hal_rx_ppdu_info *ppdu_info)
387{
388 qdf_nbuf_t ppdu_nbuf;
389 struct dp_peer *peer;
390 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
391
Anish Nataraj28490c42018-01-19 19:34:54 +0530392 /*
393 * Do not allocate if fcs error,
394 * ast idx invalid / fctl invalid
395 */
Anish Nataraj28490c42018-01-19 19:34:54 +0530396 if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0)
397 return;
398
Chaithanya Garrepalli95fc62f2018-07-24 18:52:27 +0530399 if (ppdu_info->nac_info.fc_valid &&
400 ppdu_info->nac_info.to_ds_flag &&
401 ppdu_info->nac_info.mac_addr2_valid) {
402 struct dp_neighbour_peer *peer = NULL;
403 uint8_t rssi = ppdu_info->rx_status.rssi_comb;
404
405 qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
406 if (pdev->neighbour_peers_added) {
407 TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
408 neighbour_peer_list_elem) {
409 if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
410 &ppdu_info->nac_info.mac_addr2,
411 DP_MAC_ADDR_LEN)) {
412 peer->rssi = rssi;
413 break;
414 }
415 }
416 }
417 qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
418 }
419
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +0530420 /* need not generate wdi event when mcopy and
421 * enhanced stats are not enabled
422 */
423 if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
424 return;
425
Soumya Bhat560f90c2018-03-30 13:53:26 +0530426 if (!pdev->mcopy_mode) {
427 if (!ppdu_info->rx_status.frame_control_info_valid)
428 return;
Anish Nataraj28490c42018-01-19 19:34:54 +0530429
Soumya Bhat560f90c2018-03-30 13:53:26 +0530430 if (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)
431 return;
432 }
Tallapragada Kalyana867edf2017-11-14 12:26:41 +0530433 ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
Anish Nataraj38a29562017-08-18 19:41:17 +0530434 sizeof(struct hal_rx_ppdu_info), 0, 0, FALSE);
435 if (ppdu_nbuf) {
Soumya Bhat560f90c2018-03-30 13:53:26 +0530436 dp_rx_populate_cdp_indication_ppdu(pdev, ppdu_info, ppdu_nbuf);
Anish Nataraj38a29562017-08-18 19:41:17 +0530437 qdf_nbuf_put_tail(ppdu_nbuf,
438 sizeof(struct cdp_rx_indication_ppdu));
439 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530440 peer = dp_peer_find_by_id(soc, cdp_rx_ppdu->peer_id);
chenguob21a49a2018-11-19 19:17:12 +0800441 if (peer) {
Ruchi, Agrawal4c5ade62018-09-27 21:52:11 +0530442 dp_rx_stats_update(pdev, peer, cdp_rx_ppdu);
chenguob21a49a2018-11-19 19:17:12 +0800443 dp_peer_unref_del_find_by_id(peer);
444 }
445 if (cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) {
446 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
447 soc, ppdu_nbuf,
448 cdp_rx_ppdu->peer_id,
449 WDI_NO_VAL, pdev->pdev_id);
450 } else if (pdev->mcopy_mode) {
Soumya Bhat2f54de22018-02-21 09:54:28 +0530451 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
452 ppdu_nbuf, HTT_INVALID_PEER,
453 WDI_NO_VAL, pdev->pdev_id);
454 } else {
Anish Nataraj38a29562017-08-18 19:41:17 +0530455 qdf_nbuf_free(ppdu_nbuf);
Soumya Bhat2f54de22018-02-21 09:54:28 +0530456 }
Anish Nataraj38a29562017-08-18 19:41:17 +0530457 }
458}
459#else
460static inline void
461dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
462 struct hal_rx_ppdu_info *ppdu_info)
463{
464}
465#endif
466
467/**
Keyur Parekhc28f8392018-11-21 02:50:56 -0800468* dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based
469* filtering enabled
470* @soc: core txrx main context
471* @ppdu_info: Structure for rx ppdu info
472* @status_nbuf: Qdf nbuf abstraction for linux skb
473* @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
474*
475* Return: none
476*/
477static inline void
478dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
479 struct hal_rx_ppdu_info *ppdu_info,
480 qdf_nbuf_t status_nbuf, uint32_t mac_id)
481{
482 struct dp_peer *peer;
483 struct dp_ast_entry *ast_entry;
484 uint32_t ast_index;
485
486 ast_index = ppdu_info->rx_status.ast_index;
487 if (ast_index < (WLAN_UMAC_PSOC_MAX_PEERS * 2)) {
488 ast_entry = soc->ast_table[ast_index];
489 if (ast_entry) {
490 peer = ast_entry->peer;
491 if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER)) {
492 if (peer->peer_based_pktlog_filter) {
493 dp_wdi_event_handler(
494 WDI_EVENT_RX_DESC, soc,
495 status_nbuf,
496 peer->peer_ids[0],
497 WDI_NO_VAL, mac_id);
498 }
499 }
500 }
501 }
502}
503
504/**
Kai Chen6eca1a62017-01-12 10:17:53 -0800505* dp_rx_mon_status_process_tlv() - Process status TLV in status
506* buffer on Rx status Queue posted by status SRNG processing.
507* @soc: core txrx main context
508* @mac_id: mac_id which is one of 3 mac_ids _ring
509*
510* Return: none
511*/
512static inline void
Karunakar Dasineni40555682017-03-26 22:44:39 -0700513dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id,
514 uint32_t quota)
515{
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800516 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -0800517 struct hal_rx_ppdu_info *ppdu_info;
518 qdf_nbuf_t status_nbuf;
519 uint8_t *rx_tlv;
520 uint8_t *rx_tlv_start;
Kai Chenad516ae2017-09-08 18:35:47 -0700521 uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
Soumya Bhat2f54de22018-02-21 09:54:28 +0530522 QDF_STATUS m_copy_status = QDF_STATUS_SUCCESS;
Kai Chen783e0382018-01-25 16:29:08 -0800523 struct cdp_pdev_mon_stats *rx_mon_stats;
sumedh baikady59a2d332018-05-22 01:50:38 -0700524 int smart_mesh_status;
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800525 enum WDI_EVENT pktlog_mode = WDI_NO_VAL;
Kai Chen6eca1a62017-01-12 10:17:53 -0800526
Kai Chen6eca1a62017-01-12 10:17:53 -0800527 ppdu_info = &pdev->ppdu_info;
Kai Chen783e0382018-01-25 16:29:08 -0800528 rx_mon_stats = &pdev->rx_mon_stats;
Kai Chen6eca1a62017-01-12 10:17:53 -0800529
530 if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
531 return;
532
533 while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
534
535 status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
phadiman49757302018-12-18 16:13:59 +0530536
Kai Chen6eca1a62017-01-12 10:17:53 -0800537 rx_tlv = qdf_nbuf_data(status_nbuf);
538 rx_tlv_start = rx_tlv;
Venkata Sharath Chandra Manchala5a6f4292017-11-03 14:57:41 -0700539
Soumya Bhat2f54de22018-02-21 09:54:28 +0530540 if ((pdev->monitor_vdev != NULL) || (pdev->enhanced_stats_en) ||
541 pdev->mcopy_mode) {
Kai Chen6eca1a62017-01-12 10:17:53 -0800542
Keyur Parekhfad6d082017-05-07 08:54:47 -0700543 do {
544 tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
Balamurugan Mahalingamd0159642018-07-11 15:02:29 +0530545 ppdu_info, pdev->soc->hal_soc);
Kai Chen783e0382018-01-25 16:29:08 -0800546
547 dp_rx_mon_update_dbg_ppdu_stats(ppdu_info,
548 rx_mon_stats);
549
Keyur Parekhfad6d082017-05-07 08:54:47 -0700550 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
Kai Chen6eca1a62017-01-12 10:17:53 -0800551
Keyur Parekhfad6d082017-05-07 08:54:47 -0700552 if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE)
553 break;
Kai Chen6eca1a62017-01-12 10:17:53 -0800554
Keyur Parekhfad6d082017-05-07 08:54:47 -0700555 } while (tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE);
556 }
Keyur Parekhc28f8392018-11-21 02:50:56 -0800557 if (pdev->dp_peer_based_pktlog) {
558 dp_rx_process_peer_based_pktlog(soc, ppdu_info,
559 status_nbuf, mac_id);
560 } else {
Venkata Sharath Chandra Manchalacad74ad2019-01-28 11:36:47 -0800561 if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
562 pktlog_mode = WDI_EVENT_RX_DESC;
563 else if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
564 pktlog_mode = WDI_EVENT_LITE_RX;
565
566 if (pktlog_mode != WDI_NO_VAL)
567 dp_wdi_event_handler(pktlog_mode, soc,
568 status_nbuf,
569 HTT_INVALID_PEER,
570 WDI_NO_VAL, mac_id);
Keyur Parekhc28f8392018-11-21 02:50:56 -0800571 }
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +0530572
573 /* smart monitor vap and m_copy cannot co-exist */
sumedh baikady59a2d332018-05-22 01:50:38 -0700574 if (ppdu_info->rx_status.monitor_direct_used && pdev->neighbour_peers_added
575 && pdev->monitor_vdev) {
576 smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc,
577 pdev, ppdu_info, status_nbuf);
578 if (smart_mesh_status)
579 qdf_nbuf_free(status_nbuf);
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +0530580 } else if (pdev->mcopy_mode) {
Soumya Bhat2f54de22018-02-21 09:54:28 +0530581 m_copy_status = dp_rx_handle_mcopy_mode(soc,
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530582 pdev, ppdu_info, status_nbuf);
Soumya Bhat2f54de22018-02-21 09:54:28 +0530583 if (m_copy_status == QDF_STATUS_SUCCESS)
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530584 qdf_nbuf_free(status_nbuf);
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +0530585 } else {
sumedh baikady59a2d332018-05-22 01:50:38 -0700586 qdf_nbuf_free(status_nbuf);
Chaithanya Garrepalli3e4ac1c2018-12-12 20:50:45 +0530587 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800588
chenguo1b880462018-07-11 15:34:56 +0800589 if (tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE) {
590 dp_rx_mon_deliver_non_std(soc, mac_id);
591 } else if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
Kai Chen783e0382018-01-25 16:29:08 -0800592 rx_mon_stats->status_ppdu_done++;
Soumya Bhat5c60deb2017-12-12 16:42:04 +0530593 if (pdev->enhanced_stats_en ||
sumedh baikady59a2d332018-05-22 01:50:38 -0700594 pdev->mcopy_mode || pdev->neighbour_peers_added)
Anish Nataraj38a29562017-08-18 19:41:17 +0530595 dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
Soumya Bhat6fee59c2017-10-31 13:12:37 +0530596
nobelj1c31fee2018-03-21 11:47:05 -0700597 pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
598 dp_rx_mon_dest_process(soc, mac_id, quota);
Kai Chencbe4c342017-06-12 20:06:35 -0700599 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Kai Chen6eca1a62017-01-12 10:17:53 -0800600 }
601 }
602 return;
603}
604
605/*
606 * dp_rx_mon_status_srng_process() - Process monitor status ring
607 * post the status ring buffer to Rx status Queue for later
608 * processing when status ring is filled with status TLV.
609 * Allocate a new buffer to status ring if the filled buffer
610 * is posted.
611 *
612 * @soc: core txrx main context
613 * @mac_id: mac_id which is one of 3 mac_ids
614 * @quota: No. of ring entry that can be serviced in one shot.
615
616 * Return: uint32_t: No. of ring entry that is processed.
617 */
618static inline uint32_t
619dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
620 uint32_t quota)
621{
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800622 struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -0800623 void *hal_soc;
624 void *mon_status_srng;
625 void *rxdma_mon_status_ring_entry;
626 QDF_STATUS status;
627 uint32_t work_done = 0;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800628 int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -0800629
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800630 mon_status_srng = pdev->rxdma_mon_status_ring[mac_for_pdev].hal_srng;
Kai Chen6eca1a62017-01-12 10:17:53 -0800631
632 qdf_assert(mon_status_srng);
Houston Hoffman648a9182017-05-21 23:27:50 -0700633 if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
634
635 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530636 "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
Houston Hoffman648a9182017-05-21 23:27:50 -0700637 __func__, __LINE__, mon_status_srng);
638 return work_done;
639 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800640
641 hal_soc = soc->hal_soc;
642
643 qdf_assert(hal_soc);
644
645 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
646 goto done;
647
648 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
649 * BUFFER_ADDR_INFO STRUCT
650 */
651 while (qdf_likely((rxdma_mon_status_ring_entry =
652 hal_srng_src_peek(hal_soc, mon_status_srng))
653 && quota--)) {
654 uint32_t rx_buf_cookie;
655 qdf_nbuf_t status_nbuf;
656 struct dp_rx_desc *rx_desc;
657 uint8_t *status_buf;
658 qdf_dma_addr_t paddr;
659 uint64_t buf_addr;
660
661 buf_addr =
662 (HAL_RX_BUFFER_ADDR_31_0_GET(
663 rxdma_mon_status_ring_entry) |
664 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
665 rxdma_mon_status_ring_entry)) << 32));
666
667 if (qdf_likely(buf_addr)) {
668
669 rx_buf_cookie =
670 HAL_RX_BUF_COOKIE_GET(
671 rxdma_mon_status_ring_entry);
672 rx_desc = dp_rx_cookie_2_va_mon_status(soc,
673 rx_buf_cookie);
674
675 qdf_assert(rx_desc);
676
677 status_nbuf = rx_desc->nbuf;
678
679 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
680 QDF_DMA_FROM_DEVICE);
681
682 status_buf = qdf_nbuf_data(status_nbuf);
683
684 status = hal_get_rx_status_done(status_buf);
685
686 if (status != QDF_STATUS_SUCCESS) {
Karunakar Dasineni37995ac2018-02-06 12:37:30 -0800687 uint32_t hp, tp;
Venkata Sharath Chandra Manchala443b9b42018-10-10 12:04:54 -0700688 hal_get_sw_hptp(hal_soc, mon_status_srng,
689 &tp, &hp);
Kai Chen6eca1a62017-01-12 10:17:53 -0800690 QDF_TRACE(QDF_MODULE_ID_DP,
Karunakar Dasineni37995ac2018-02-06 12:37:30 -0800691 QDF_TRACE_LEVEL_ERROR,
692 "[%s][%d] status not done - hp:%u, tp:%u",
693 __func__, __LINE__, hp, tp);
694 /* WAR for missing status: Skip status entry */
695 hal_srng_src_get_next(hal_soc, mon_status_srng);
696 continue;
Kai Chen6eca1a62017-01-12 10:17:53 -0800697 }
698 qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE);
699
700 qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
701 QDF_DMA_FROM_DEVICE);
702
703 /* Put the status_nbuf to queue */
704 qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
705
706 } else {
707 union dp_rx_desc_list_elem_t *desc_list = NULL;
708 union dp_rx_desc_list_elem_t *tail = NULL;
709 struct rx_desc_pool *rx_desc_pool;
710 uint32_t num_alloc_desc;
711
712 rx_desc_pool = &soc->rx_desc_status[mac_id];
713
714 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
715 rx_desc_pool,
716 1,
717 &desc_list,
718 &tail);
phadimanebf4cde2019-01-28 17:50:37 +0530719 /*
720 * No free descriptors available
721 */
722 if (qdf_unlikely(num_alloc_desc == 0)) {
723 work_done++;
724 break;
725 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800726
727 rx_desc = &desc_list->rx_desc;
728 }
729
jinweic chenc3546322018-02-02 15:03:41 +0800730 status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -0800731
jinweic chenc3546322018-02-02 15:03:41 +0800732 /*
733 * qdf_nbuf alloc or map failed,
734 * free the dp rx desc to free list,
735 * fill in NULL dma address at current HP entry,
736 * keep HP in mon_status_ring unchanged,
737 * wait next time dp_rx_mon_status_srng_process
738 * to fill in buffer at current HP.
739 */
740 if (qdf_unlikely(status_nbuf == NULL)) {
741 union dp_rx_desc_list_elem_t *desc_list = NULL;
742 union dp_rx_desc_list_elem_t *tail = NULL;
743 struct rx_desc_pool *rx_desc_pool;
Kai Chen6eca1a62017-01-12 10:17:53 -0800744
jinweic chenc3546322018-02-02 15:03:41 +0800745 rx_desc_pool = &soc->rx_desc_status[mac_id];
Kai Chen6eca1a62017-01-12 10:17:53 -0800746
jinweic chenc3546322018-02-02 15:03:41 +0800747 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
748 "%s: fail to allocate or map qdf_nbuf",
749 __func__);
750 dp_rx_add_to_free_desc_list(&desc_list,
751 &tail, rx_desc);
752 dp_rx_add_desc_list_to_free_list(soc, &desc_list,
753 &tail, mac_id, rx_desc_pool);
754
755 hal_rxdma_buff_addr_info_set(
756 rxdma_mon_status_ring_entry,
757 0, 0, HAL_RX_BUF_RBM_SW3_BM);
758 work_done++;
759 break;
760 }
761
Kai Chen6eca1a62017-01-12 10:17:53 -0800762 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
763
764 rx_desc->nbuf = status_nbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -0700765 rx_desc->in_use = 1;
Kai Chen6eca1a62017-01-12 10:17:53 -0800766
767 hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
768 paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
769
Karunakar Dasineni37995ac2018-02-06 12:37:30 -0800770 hal_srng_src_get_next(hal_soc, mon_status_srng);
Kai Chen6eca1a62017-01-12 10:17:53 -0800771 work_done++;
772 }
773done:
774
775 hal_srng_access_end(hal_soc, mon_status_srng);
776
777 return work_done;
778
779}
780/*
781 * dp_rx_mon_status_process() - Process monitor status ring and
782 * TLV in status ring.
783 *
784 * @soc: core txrx main context
785 * @mac_id: mac_id which is one of 3 mac_ids
786 * @quota: No. of ring entry that can be serviced in one shot.
787
788 * Return: uint32_t: No. of ring entry that is processed.
789 */
790static inline uint32_t
791dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
792 uint32_t work_done;
793
794 work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota);
Karunakar Dasineni40555682017-03-26 22:44:39 -0700795 quota -= work_done;
796 dp_rx_mon_status_process_tlv(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -0800797
798 return work_done;
799}
800/**
801 * dp_mon_process() - Main monitor mode processing roution.
802 * This call monitor status ring process then monitor
803 * destination ring process.
804 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
805 * @soc: core txrx main context
806 * @mac_id: mac_id which is one of 3 mac_ids
807 * @quota: No. of status ring entry that can be serviced in one shot.
808
809 * Return: uint32_t: No. of ring entry that is processed.
810 */
811uint32_t
812dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
Karunakar Dasineni40555682017-03-26 22:44:39 -0700813 return dp_rx_mon_status_process(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -0800814}
Karunakar Dasineni40555682017-03-26 22:44:39 -0700815
Kai Chen6eca1a62017-01-12 10:17:53 -0800816/**
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -0700817 * dp_rx_pdev_mon_status_detach() - detach dp rx for status ring
Kai Chen6eca1a62017-01-12 10:17:53 -0800818 * @pdev: core txrx pdev context
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800819 * @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
Kai Chen6eca1a62017-01-12 10:17:53 -0800820 *
821 * This function will detach DP RX status ring from
822 * main device context. will free DP Rx resources for
823 * status ring
824 *
825 * Return: QDF_STATUS_SUCCESS: success
826 * QDF_STATUS_E_RESOURCES: Error return
827 */
828QDF_STATUS
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800829dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id)
Kai Chen6eca1a62017-01-12 10:17:53 -0800830{
Kai Chen6eca1a62017-01-12 10:17:53 -0800831 struct dp_soc *soc = pdev->soc;
832 struct rx_desc_pool *rx_desc_pool;
833
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -0800834 rx_desc_pool = &soc->rx_desc_status[mac_id];
phadiman449a2682019-02-20 14:00:00 +0530835 if (rx_desc_pool->pool_size != 0) {
836 if (!dp_is_soc_reinit(soc))
837 dp_rx_desc_pool_free(soc, mac_id, rx_desc_pool);
838 else
839 dp_rx_desc_nbuf_pool_free(soc, rx_desc_pool);
840 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800841
842 return QDF_STATUS_SUCCESS;
843}
844
845/*
846 * dp_rx_buffers_replenish() - replenish monitor status ring with
847 * rx nbufs called during dp rx
848 * monitor status ring initialization
849 *
850 * @soc: core txrx main context
851 * @mac_id: mac_id which is one of 3 mac_ids
852 * @dp_rxdma_srng: dp monitor status circular ring
853 * @rx_desc_pool; Pointer to Rx descriptor pool
854 * @num_req_buffers: number of buffer to be replenished
855 * @desc_list: list of descs if called from dp rx monitor status
856 * process or NULL during dp rx initialization or
857 * out of buffer interrupt
858 * @tail: tail of descs list
859 * @owner: who owns the nbuf (host, NSS etc...)
860 * Return: return success or failure
861 */
862static inline
863QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
864 uint32_t mac_id,
865 struct dp_srng *dp_rxdma_srng,
866 struct rx_desc_pool *rx_desc_pool,
867 uint32_t num_req_buffers,
868 union dp_rx_desc_list_elem_t **desc_list,
869 union dp_rx_desc_list_elem_t **tail,
870 uint8_t owner)
871{
872 uint32_t num_alloc_desc;
873 uint16_t num_desc_to_free = 0;
874 uint32_t num_entries_avail;
jinweic chenc3546322018-02-02 15:03:41 +0800875 uint32_t count = 0;
Kai Chen6eca1a62017-01-12 10:17:53 -0800876 int sync_hw_ptr = 1;
877 qdf_dma_addr_t paddr;
878 qdf_nbuf_t rx_netbuf;
879 void *rxdma_ring_entry;
880 union dp_rx_desc_list_elem_t *next;
881 void *rxdma_srng;
jinweic chenc3546322018-02-02 15:03:41 +0800882 struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
Kai Chen6eca1a62017-01-12 10:17:53 -0800883
884 rxdma_srng = dp_rxdma_srng->hal_srng;
885
886 qdf_assert(rxdma_srng);
887
Houston Hoffmanae850c62017-08-11 16:47:50 -0700888 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530889 "[%s][%d] requested %d buffers for replenish",
Kai Chen6eca1a62017-01-12 10:17:53 -0800890 __func__, __LINE__, num_req_buffers);
891
892 /*
893 * if desc_list is NULL, allocate the descs from freelist
894 */
895 if (!(*desc_list)) {
896
897 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
898 rx_desc_pool,
899 num_req_buffers,
900 desc_list,
901 tail);
902
903 if (!num_alloc_desc) {
904 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530905 "[%s][%d] no free rx_descs in freelist",
Kai Chen6eca1a62017-01-12 10:17:53 -0800906 __func__, __LINE__);
907 return QDF_STATUS_E_NOMEM;
908 }
909
Houston Hoffmanae850c62017-08-11 16:47:50 -0700910 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530911 "[%s][%d] %d rx desc allocated", __func__, __LINE__,
Kai Chen6eca1a62017-01-12 10:17:53 -0800912 num_alloc_desc);
Houston Hoffmanae850c62017-08-11 16:47:50 -0700913
Kai Chen6eca1a62017-01-12 10:17:53 -0800914 num_req_buffers = num_alloc_desc;
915 }
916
917 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
918 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
919 rxdma_srng, sync_hw_ptr);
920
Houston Hoffmanae850c62017-08-11 16:47:50 -0700921 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530922 "[%s][%d] no of available entries in rxdma ring: %d",
Kai Chen6eca1a62017-01-12 10:17:53 -0800923 __func__, __LINE__, num_entries_avail);
924
925 if (num_entries_avail < num_req_buffers) {
926 num_desc_to_free = num_req_buffers - num_entries_avail;
927 num_req_buffers = num_entries_avail;
928 }
929
jinweic chenc3546322018-02-02 15:03:41 +0800930 while (count < num_req_buffers) {
931 rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev);
Kai Chen6eca1a62017-01-12 10:17:53 -0800932
jinweic chenc3546322018-02-02 15:03:41 +0800933 /*
934 * qdf_nbuf alloc or map failed,
935 * keep HP in mon_status_ring unchanged,
936 * wait dp_rx_mon_status_srng_process
937 * to fill in buffer at current HP.
938 */
939 if (qdf_unlikely(rx_netbuf == NULL)) {
940 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
941 "%s: qdf_nbuf allocate or map fail, count %d",
942 __func__, count);
943 break;
944 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800945
946 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
947
948 next = (*desc_list)->next;
sumedh baikadyeca2de62018-04-11 14:20:38 -0700949 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
950 rxdma_srng);
951
952 if (qdf_unlikely(rxdma_ring_entry == NULL)) {
953 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
Aditya Sathishded018e2018-07-02 16:25:21 +0530954 "[%s][%d] rxdma_ring_entry is NULL, count - %d",
sumedh baikadyeca2de62018-04-11 14:20:38 -0700955 __func__, __LINE__, count);
956 qdf_nbuf_unmap_single(dp_soc->osdev, rx_netbuf,
957 QDF_DMA_BIDIRECTIONAL);
958 qdf_nbuf_free(rx_netbuf);
959 break;
960 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800961
962 (*desc_list)->rx_desc.nbuf = rx_netbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -0700963 (*desc_list)->rx_desc.in_use = 1;
jinweic chenc3546322018-02-02 15:03:41 +0800964 count++;
jinweic chenc3546322018-02-02 15:03:41 +0800965
Kai Chen6eca1a62017-01-12 10:17:53 -0800966 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
967 (*desc_list)->rx_desc.cookie, owner);
968
Karunakar Dasineni40555682017-03-26 22:44:39 -0700969 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Jeff Johnson3f217e22017-09-18 10:13:35 -0700970 "[%s][%d] rx_desc=%pK, cookie=%d, nbuf=%pK, \
Aditya Sathishded018e2018-07-02 16:25:21 +0530971 paddr=%pK",
Kai Chen6eca1a62017-01-12 10:17:53 -0800972 __func__, __LINE__, &(*desc_list)->rx_desc,
973 (*desc_list)->rx_desc.cookie, rx_netbuf,
jinweic chenc3546322018-02-02 15:03:41 +0800974 (void *)paddr);
Kai Chen6eca1a62017-01-12 10:17:53 -0800975
976 *desc_list = next;
977 }
978
979 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
980
Houston Hoffmanae850c62017-08-11 16:47:50 -0700981 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530982 "successfully replenished %d buffers", num_req_buffers);
Kai Chen6eca1a62017-01-12 10:17:53 -0800983
Houston Hoffmanae850c62017-08-11 16:47:50 -0700984 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Aditya Sathishded018e2018-07-02 16:25:21 +0530985 "%d rx desc added back to free list", num_desc_to_free);
Kai Chen6eca1a62017-01-12 10:17:53 -0800986
Kai Chen6eca1a62017-01-12 10:17:53 -0800987 /*
988 * add any available free desc back to the free list
989 */
990 if (*desc_list) {
991 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
992 mac_id, rx_desc_pool);
993 }
994
995 return QDF_STATUS_SUCCESS;
996}
997/**
998 * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
999 * @pdev: core txrx pdev context
Venkata Sharath Chandra Manchala87479582018-08-01 12:45:34 -07001000 * @ring_id: ring number
Kai Chen6eca1a62017-01-12 10:17:53 -08001001 * This function will attach a DP RX monitor status ring into pDEV
1002 * and replenish monitor status ring with buffer.
1003 *
1004 * Return: QDF_STATUS_SUCCESS: success
1005 * QDF_STATUS_E_RESOURCES: Error return
1006 */
1007QDF_STATUS
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001008dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) {
Kai Chen6eca1a62017-01-12 10:17:53 -08001009 struct dp_soc *soc = pdev->soc;
1010 union dp_rx_desc_list_elem_t *desc_list = NULL;
1011 union dp_rx_desc_list_elem_t *tail = NULL;
Mohit Khanna70514992018-11-12 18:39:03 -08001012 struct dp_srng *mon_status_ring;
1013 uint32_t num_entries;
Kai Chen6eca1a62017-01-12 10:17:53 -08001014 struct rx_desc_pool *rx_desc_pool;
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001015 QDF_STATUS status;
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001016 int mac_for_pdev = dp_get_mac_id_for_mac(soc, ring_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001017
Mohit Khanna70514992018-11-12 18:39:03 -08001018 mon_status_ring = &pdev->rxdma_mon_status_ring[mac_for_pdev];
Kai Chen6eca1a62017-01-12 10:17:53 -08001019
Mohit Khanna70514992018-11-12 18:39:03 -08001020 num_entries = mon_status_ring->num_entries;
Kai Chen6eca1a62017-01-12 10:17:53 -08001021
Manjunathappa Prakashd9ce3502018-02-05 14:09:17 -08001022 rx_desc_pool = &soc->rx_desc_status[ring_id];
Kai Chen6eca1a62017-01-12 10:17:53 -08001023
Mohit Khanna70514992018-11-12 18:39:03 -08001024 dp_info("Mon RX Status Pool[%d] entries=%d",
1025 ring_id, num_entries);
Kai Chen6eca1a62017-01-12 10:17:53 -08001026
Mohit Khanna70514992018-11-12 18:39:03 -08001027 status = dp_rx_desc_pool_alloc(soc, ring_id, num_entries + 1,
1028 rx_desc_pool);
1029 if (!QDF_IS_STATUS_SUCCESS(status))
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001030 return status;
Kai Chen6eca1a62017-01-12 10:17:53 -08001031
Mohit Khanna70514992018-11-12 18:39:03 -08001032 dp_debug("Mon RX Status Buffers Replenish ring_id=%d", ring_id);
Kai Chen6eca1a62017-01-12 10:17:53 -08001033
Mohit Khanna70514992018-11-12 18:39:03 -08001034 status = dp_rx_mon_status_buffers_replenish(soc, ring_id,
1035 mon_status_ring,
1036 rx_desc_pool,
1037 num_entries,
1038 &desc_list, &tail,
1039 HAL_RX_BUF_RBM_SW3_BM);
1040
1041 if (!QDF_IS_STATUS_SUCCESS(status))
Ravi Joshia9ebe0a2017-06-17 16:43:02 -07001042 return status;
Kai Chen6eca1a62017-01-12 10:17:53 -08001043
1044 qdf_nbuf_queue_init(&pdev->rx_status_q);
1045
1046 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Kai Chen783e0382018-01-25 16:29:08 -08001047
Karunakar Dasineni40555682017-03-26 22:44:39 -07001048 qdf_mem_zero(&(pdev->ppdu_info.rx_status),
Mohit Khanna70514992018-11-12 18:39:03 -08001049 sizeof(pdev->ppdu_info.rx_status));
Kai Chen6eca1a62017-01-12 10:17:53 -08001050
Kai Chen783e0382018-01-25 16:29:08 -08001051 qdf_mem_zero(&pdev->rx_mon_stats,
1052 sizeof(pdev->rx_mon_stats));
1053
1054 dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info,
1055 &pdev->rx_mon_stats);
1056
Kai Chen6eca1a62017-01-12 10:17:53 -08001057 return QDF_STATUS_SUCCESS;
1058}