blob: cc87364a6d818f37ba674c105ea6d9ed4179821c [file] [log] [blame]
Kai Chen6eca1a62017-01-12 10:17:53 -08001/*
2 * Copyright (c) 2017 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18#include "dp_types.h"
19#include "dp_rx.h"
20#include "dp_peer.h"
21#include "hal_rx.h"
22#include "hal_api.h"
23#include "qdf_trace.h"
24#include "qdf_nbuf.h"
25#include "hal_api_mon.h"
26#include "ieee80211.h"
27#include "dp_rx_mon.h"
Keyur Parekhfad6d082017-05-07 08:54:47 -070028#include "dp_internal.h"
29#include "qdf_mem.h" /* qdf_mem_malloc,free */
Kai Chen6eca1a62017-01-12 10:17:53 -080030
Kai Chen6eca1a62017-01-12 10:17:53 -080031/**
Anish Nataraj38a29562017-08-18 19:41:17 +053032* dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
33* @soc: core txrx main context
34* @ppdu_info: ppdu info structure from ppdu ring
35* @ppdu_nbuf: qdf nbuf abstraction for linux skb
36*
37* Return: none
38*/
39#ifdef FEATURE_PERPKT_INFO
40static inline void
41dp_rx_populate_cdp_indication_ppdu(struct dp_soc *soc,
42 struct hal_rx_ppdu_info *ppdu_info,
43 qdf_nbuf_t ppdu_nbuf)
44{
45 struct dp_peer *peer;
46 struct dp_ast_entry *ast_entry;
47 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
48 uint32_t ast_index;
49
50 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
51
52 ast_index = ppdu_info->rx_status.ast_index;
53 if (ast_index > (WLAN_UMAC_PSOC_MAX_PEERS * 2)) {
54 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
55 return;
56 }
57
58 ast_entry = soc->ast_table[ast_index];
59 if (!ast_entry) {
60 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
61 return;
62 }
63 peer = ast_entry->peer;
64 if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
65 cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
66 return;
67 }
68
69 cdp_rx_ppdu->peer_id = peer->peer_ids[0];
70 cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
71 cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
72 cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
73 cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
74 cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
75 cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
76 cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
77 cdp_rx_ppdu->timestamp = ppdu_info->com_info.ppdu_timestamp;
78 cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_freq;
79
80}
81#else
82static inline void
83dp_rx_populate_cdp_indication_ppdu(struct dp_soc *soc,
84 struct hal_rx_ppdu_info *ppdu_info,
85 qdf_nbuf_t ppdu_nbuf)
86{
87}
88#endif
89
90/**
91* dp_rx_handle_ppdu_stats() - Allocate and deliver ppdu stats to cdp layer
92* @soc: core txrx main context
93* @pdev: pdev strcuture
94* @ppdu_info: structure for rx ppdu ring
95*
96* Return: none
97*/
98#ifdef FEATURE_PERPKT_INFO
99static inline void
100dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
101 struct hal_rx_ppdu_info *ppdu_info)
102{
103 qdf_nbuf_t ppdu_nbuf;
104 struct dp_peer *peer;
105 struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
106
107 ppdu_nbuf = qdf_nbuf_alloc(pdev->osif_pdev,
108 sizeof(struct hal_rx_ppdu_info), 0, 0, FALSE);
109 if (ppdu_nbuf) {
110 dp_rx_populate_cdp_indication_ppdu(soc, ppdu_info, ppdu_nbuf);
111 qdf_nbuf_put_tail(ppdu_nbuf,
112 sizeof(struct cdp_rx_indication_ppdu));
113 cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
114
115 peer = dp_peer_find_by_id(soc, cdp_rx_ppdu->peer_id);
116 if (peer && cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) {
117 dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
118 ppdu_nbuf, cdp_rx_ppdu->peer_id,
119 WDI_NO_VAL, pdev->pdev_id);
120 } else
121 qdf_nbuf_free(ppdu_nbuf);
122 }
123}
124#else
125static inline void
126dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
127 struct hal_rx_ppdu_info *ppdu_info)
128{
129}
130#endif
131
132/**
Kai Chen6eca1a62017-01-12 10:17:53 -0800133* dp_rx_mon_status_process_tlv() - Process status TLV in status
134* buffer on Rx status Queue posted by status SRNG processing.
135* @soc: core txrx main context
136* @mac_id: mac_id which is one of 3 mac_ids _ring
137*
138* Return: none
139*/
140static inline void
Karunakar Dasineni40555682017-03-26 22:44:39 -0700141dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id,
142 uint32_t quota)
143{
Kai Chen6eca1a62017-01-12 10:17:53 -0800144 struct dp_pdev *pdev = soc->pdev_list[mac_id];
145 struct hal_rx_ppdu_info *ppdu_info;
146 qdf_nbuf_t status_nbuf;
147 uint8_t *rx_tlv;
148 uint8_t *rx_tlv_start;
Keyur Parekhfad6d082017-05-07 08:54:47 -0700149 uint32_t tlv_status = HAL_TLV_STATUS_DUMMY;
Kai Chen6eca1a62017-01-12 10:17:53 -0800150
Kai Chen6eca1a62017-01-12 10:17:53 -0800151 ppdu_info = &pdev->ppdu_info;
152
153 if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
154 return;
155
156 while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
157
158 status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
159 rx_tlv = qdf_nbuf_data(status_nbuf);
160 rx_tlv_start = rx_tlv;
161
Keyur Parekhfad6d082017-05-07 08:54:47 -0700162#if defined(CONFIG_WIN) && WDI_EVENT_ENABLE
163#ifndef REMOVE_PKT_LOG
164 dp_wdi_event_handler(WDI_EVENT_RX_DESC, soc,
Keyur Parekhdb0fa142017-07-13 19:40:22 -0700165 status_nbuf, HTT_INVALID_PEER, WDI_NO_VAL, mac_id);
Keyur Parekhfad6d082017-05-07 08:54:47 -0700166#endif
167#endif
168 if (pdev->monitor_vdev != NULL) {
Kai Chen6eca1a62017-01-12 10:17:53 -0800169
Keyur Parekhfad6d082017-05-07 08:54:47 -0700170 do {
171 tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
172 ppdu_info);
173 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
Kai Chen6eca1a62017-01-12 10:17:53 -0800174
Keyur Parekhfad6d082017-05-07 08:54:47 -0700175 if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE)
176 break;
Kai Chen6eca1a62017-01-12 10:17:53 -0800177
Keyur Parekhfad6d082017-05-07 08:54:47 -0700178 } while (tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE);
179 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800180 qdf_nbuf_free(status_nbuf);
181
182 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
Anish Nataraj38a29562017-08-18 19:41:17 +0530183 if (pdev->enhanced_stats_en)
184 dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
Kai Chen6eca1a62017-01-12 10:17:53 -0800185 pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
Karunakar Dasineni40555682017-03-26 22:44:39 -0700186 dp_rx_mon_dest_process(soc, mac_id, quota);
Kai Chencbe4c342017-06-12 20:06:35 -0700187 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Kai Chen6eca1a62017-01-12 10:17:53 -0800188 }
189 }
190 return;
191}
192
193/*
194 * dp_rx_mon_status_srng_process() - Process monitor status ring
195 * post the status ring buffer to Rx status Queue for later
196 * processing when status ring is filled with status TLV.
197 * Allocate a new buffer to status ring if the filled buffer
198 * is posted.
199 *
200 * @soc: core txrx main context
201 * @mac_id: mac_id which is one of 3 mac_ids
202 * @quota: No. of ring entry that can be serviced in one shot.
203
204 * Return: uint32_t: No. of ring entry that is processed.
205 */
206static inline uint32_t
207dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
208 uint32_t quota)
209{
210 struct dp_pdev *pdev = soc->pdev_list[mac_id];
211 void *hal_soc;
212 void *mon_status_srng;
213 void *rxdma_mon_status_ring_entry;
214 QDF_STATUS status;
215 uint32_t work_done = 0;
216
Kai Chen6eca1a62017-01-12 10:17:53 -0800217 mon_status_srng = pdev->rxdma_mon_status_ring.hal_srng;
218
219 qdf_assert(mon_status_srng);
Houston Hoffman648a9182017-05-21 23:27:50 -0700220 if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
221
222 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
223 "%s %d : HAL Monitor Destination Ring Init Failed -- %p\n",
224 __func__, __LINE__, mon_status_srng);
225 return work_done;
226 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800227
228 hal_soc = soc->hal_soc;
229
230 qdf_assert(hal_soc);
231
232 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
233 goto done;
234
235 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
236 * BUFFER_ADDR_INFO STRUCT
237 */
238 while (qdf_likely((rxdma_mon_status_ring_entry =
239 hal_srng_src_peek(hal_soc, mon_status_srng))
240 && quota--)) {
241 uint32_t rx_buf_cookie;
242 qdf_nbuf_t status_nbuf;
243 struct dp_rx_desc *rx_desc;
244 uint8_t *status_buf;
245 qdf_dma_addr_t paddr;
246 uint64_t buf_addr;
247
248 buf_addr =
249 (HAL_RX_BUFFER_ADDR_31_0_GET(
250 rxdma_mon_status_ring_entry) |
251 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
252 rxdma_mon_status_ring_entry)) << 32));
253
254 if (qdf_likely(buf_addr)) {
255
256 rx_buf_cookie =
257 HAL_RX_BUF_COOKIE_GET(
258 rxdma_mon_status_ring_entry);
259 rx_desc = dp_rx_cookie_2_va_mon_status(soc,
260 rx_buf_cookie);
261
262 qdf_assert(rx_desc);
263
264 status_nbuf = rx_desc->nbuf;
265
266 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
267 QDF_DMA_FROM_DEVICE);
268
269 status_buf = qdf_nbuf_data(status_nbuf);
270
271 status = hal_get_rx_status_done(status_buf);
272
273 if (status != QDF_STATUS_SUCCESS) {
274 QDF_TRACE(QDF_MODULE_ID_DP,
275 QDF_TRACE_LEVEL_WARN,
276 "[%s][%d] status not done",
277 __func__, __LINE__);
278 break;
279 }
280 qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE);
281
282 qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
283 QDF_DMA_FROM_DEVICE);
284
285 /* Put the status_nbuf to queue */
286 qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
287
288 } else {
289 union dp_rx_desc_list_elem_t *desc_list = NULL;
290 union dp_rx_desc_list_elem_t *tail = NULL;
291 struct rx_desc_pool *rx_desc_pool;
292 uint32_t num_alloc_desc;
293
294 rx_desc_pool = &soc->rx_desc_status[mac_id];
295
296 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
297 rx_desc_pool,
298 1,
299 &desc_list,
300 &tail);
301
302 rx_desc = &desc_list->rx_desc;
303 }
304
305 /* Allocate a new skb */
306 status_nbuf = qdf_nbuf_alloc(pdev->osif_pdev, RX_BUFFER_SIZE,
307 RX_BUFFER_RESERVATION, RX_BUFFER_ALIGNMENT, FALSE);
308
309 status_buf = qdf_nbuf_data(status_nbuf);
310
311 hal_clear_rx_status_done(status_buf);
312
313 qdf_nbuf_map_single(soc->osdev, status_nbuf,
314 QDF_DMA_BIDIRECTIONAL);
315 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
316
317 rx_desc->nbuf = status_nbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -0700318 rx_desc->in_use = 1;
Kai Chen6eca1a62017-01-12 10:17:53 -0800319
320 hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
321 paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
322
323 rxdma_mon_status_ring_entry =
324 hal_srng_src_get_next(hal_soc, mon_status_srng);
325 work_done++;
326 }
327done:
328
329 hal_srng_access_end(hal_soc, mon_status_srng);
330
331 return work_done;
332
333}
334/*
335 * dp_rx_mon_status_process() - Process monitor status ring and
336 * TLV in status ring.
337 *
338 * @soc: core txrx main context
339 * @mac_id: mac_id which is one of 3 mac_ids
340 * @quota: No. of ring entry that can be serviced in one shot.
341
342 * Return: uint32_t: No. of ring entry that is processed.
343 */
344static inline uint32_t
345dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
346 uint32_t work_done;
347
348 work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota);
Karunakar Dasineni40555682017-03-26 22:44:39 -0700349 quota -= work_done;
350 dp_rx_mon_status_process_tlv(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -0800351
352 return work_done;
353}
354/**
355 * dp_mon_process() - Main monitor mode processing roution.
356 * This call monitor status ring process then monitor
357 * destination ring process.
358 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
359 * @soc: core txrx main context
360 * @mac_id: mac_id which is one of 3 mac_ids
361 * @quota: No. of status ring entry that can be serviced in one shot.
362
363 * Return: uint32_t: No. of ring entry that is processed.
364 */
365uint32_t
366dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
Karunakar Dasineni40555682017-03-26 22:44:39 -0700367 return dp_rx_mon_status_process(soc, mac_id, quota);
Kai Chen6eca1a62017-01-12 10:17:53 -0800368}
Karunakar Dasineni40555682017-03-26 22:44:39 -0700369
Kai Chen6eca1a62017-01-12 10:17:53 -0800370/**
371 * dp_rx_pdev_mon_detach() - detach dp rx for status ring
372 * @pdev: core txrx pdev context
373 *
374 * This function will detach DP RX status ring from
375 * main device context. will free DP Rx resources for
376 * status ring
377 *
378 * Return: QDF_STATUS_SUCCESS: success
379 * QDF_STATUS_E_RESOURCES: Error return
380 */
381QDF_STATUS
382dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev)
383{
384 uint8_t pdev_id = pdev->pdev_id;
385 struct dp_soc *soc = pdev->soc;
386 struct rx_desc_pool *rx_desc_pool;
387
388 rx_desc_pool = &soc->rx_desc_status[pdev_id];
389 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
390
391 return QDF_STATUS_SUCCESS;
392}
393
394/*
395 * dp_rx_buffers_replenish() - replenish monitor status ring with
396 * rx nbufs called during dp rx
397 * monitor status ring initialization
398 *
399 * @soc: core txrx main context
400 * @mac_id: mac_id which is one of 3 mac_ids
401 * @dp_rxdma_srng: dp monitor status circular ring
402 * @rx_desc_pool; Pointer to Rx descriptor pool
403 * @num_req_buffers: number of buffer to be replenished
404 * @desc_list: list of descs if called from dp rx monitor status
405 * process or NULL during dp rx initialization or
406 * out of buffer interrupt
407 * @tail: tail of descs list
408 * @owner: who owns the nbuf (host, NSS etc...)
409 * Return: return success or failure
410 */
411static inline
412QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
413 uint32_t mac_id,
414 struct dp_srng *dp_rxdma_srng,
415 struct rx_desc_pool *rx_desc_pool,
416 uint32_t num_req_buffers,
417 union dp_rx_desc_list_elem_t **desc_list,
418 union dp_rx_desc_list_elem_t **tail,
419 uint8_t owner)
420{
421 uint32_t num_alloc_desc;
422 uint16_t num_desc_to_free = 0;
423 uint32_t num_entries_avail;
424 uint32_t count;
425 int sync_hw_ptr = 1;
426 qdf_dma_addr_t paddr;
427 qdf_nbuf_t rx_netbuf;
428 void *rxdma_ring_entry;
429 union dp_rx_desc_list_elem_t *next;
430 void *rxdma_srng;
431 uint8_t *status_buf;
432
433 rxdma_srng = dp_rxdma_srng->hal_srng;
434
435 qdf_assert(rxdma_srng);
436
Houston Hoffmanae850c62017-08-11 16:47:50 -0700437 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Kai Chen6eca1a62017-01-12 10:17:53 -0800438 "[%s][%d] requested %d buffers for replenish\n",
439 __func__, __LINE__, num_req_buffers);
440
441 /*
442 * if desc_list is NULL, allocate the descs from freelist
443 */
444 if (!(*desc_list)) {
445
446 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
447 rx_desc_pool,
448 num_req_buffers,
449 desc_list,
450 tail);
451
452 if (!num_alloc_desc) {
453 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
454 "[%s][%d] no free rx_descs in freelist\n",
455 __func__, __LINE__);
456 return QDF_STATUS_E_NOMEM;
457 }
458
Houston Hoffmanae850c62017-08-11 16:47:50 -0700459 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Kai Chen6eca1a62017-01-12 10:17:53 -0800460 "[%s][%d] %d rx desc allocated\n", __func__, __LINE__,
461 num_alloc_desc);
Houston Hoffmanae850c62017-08-11 16:47:50 -0700462
Kai Chen6eca1a62017-01-12 10:17:53 -0800463 num_req_buffers = num_alloc_desc;
464 }
465
466 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
467 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
468 rxdma_srng, sync_hw_ptr);
469
Houston Hoffmanae850c62017-08-11 16:47:50 -0700470 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Kai Chen6eca1a62017-01-12 10:17:53 -0800471 "[%s][%d] no of availble entries in rxdma ring: %d\n",
472 __func__, __LINE__, num_entries_avail);
473
474 if (num_entries_avail < num_req_buffers) {
475 num_desc_to_free = num_req_buffers - num_entries_avail;
476 num_req_buffers = num_entries_avail;
477 }
478
479 for (count = 0; count < num_req_buffers; count++) {
480 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
481 rxdma_srng);
482
483 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
484 RX_BUFFER_SIZE,
485 RX_BUFFER_RESERVATION,
486 RX_BUFFER_ALIGNMENT,
487 FALSE);
488
489 status_buf = qdf_nbuf_data(rx_netbuf);
490 hal_clear_rx_status_done(status_buf);
491
492 memset(status_buf, 0, RX_BUFFER_SIZE);
493
494 qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
495 QDF_DMA_BIDIRECTIONAL);
496
497 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
498
499 next = (*desc_list)->next;
500
501 (*desc_list)->rx_desc.nbuf = rx_netbuf;
Pramod Simha59fcb312017-06-22 17:43:16 -0700502 (*desc_list)->rx_desc.in_use = 1;
Kai Chen6eca1a62017-01-12 10:17:53 -0800503 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
504 (*desc_list)->rx_desc.cookie, owner);
505
Karunakar Dasineni40555682017-03-26 22:44:39 -0700506 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Kai Chen6eca1a62017-01-12 10:17:53 -0800507 "[%s][%d] rx_desc=%p, cookie=%d, nbuf=%p, \
508 status_buf=%p paddr=%p\n",
509 __func__, __LINE__, &(*desc_list)->rx_desc,
510 (*desc_list)->rx_desc.cookie, rx_netbuf,
511 status_buf, (void *)paddr);
512
513 *desc_list = next;
514 }
515
516 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
517
Houston Hoffmanae850c62017-08-11 16:47:50 -0700518 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Kai Chen6eca1a62017-01-12 10:17:53 -0800519 "successfully replenished %d buffers\n", num_req_buffers);
520
Houston Hoffmanae850c62017-08-11 16:47:50 -0700521 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Kai Chen6eca1a62017-01-12 10:17:53 -0800522 "%d rx desc added back to free list\n", num_desc_to_free);
523
Houston Hoffmanae850c62017-08-11 16:47:50 -0700524 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
Kai Chen6eca1a62017-01-12 10:17:53 -0800525 "[%s][%d] desc_list=%p, tail=%p rx_desc=%p, cookie=%d\n",
526 __func__, __LINE__, desc_list, tail, &(*desc_list)->rx_desc,
527 (*desc_list)->rx_desc.cookie);
528
529 /*
530 * add any available free desc back to the free list
531 */
532 if (*desc_list) {
533 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
534 mac_id, rx_desc_pool);
535 }
536
537 return QDF_STATUS_SUCCESS;
538}
539/**
540 * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
541 * @pdev: core txrx pdev context
542 *
543 * This function will attach a DP RX monitor status ring into pDEV
544 * and replenish monitor status ring with buffer.
545 *
546 * Return: QDF_STATUS_SUCCESS: success
547 * QDF_STATUS_E_RESOURCES: Error return
548 */
549QDF_STATUS
550dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev) {
551 uint8_t pdev_id = pdev->pdev_id;
552 struct dp_soc *soc = pdev->soc;
553 union dp_rx_desc_list_elem_t *desc_list = NULL;
554 union dp_rx_desc_list_elem_t *tail = NULL;
555 struct dp_srng *rxdma_srng;
556 uint32_t rxdma_entries;
557 struct rx_desc_pool *rx_desc_pool;
Ravi Joshia9ebe0a2017-06-17 16:43:02 -0700558 QDF_STATUS status;
Kai Chen6eca1a62017-01-12 10:17:53 -0800559
560 rxdma_srng = &pdev->rxdma_mon_status_ring;
561
562 rxdma_entries = rxdma_srng->alloc_size/hal_srng_get_entrysize(
563 soc->hal_soc, RXDMA_MONITOR_STATUS);
564
565 rx_desc_pool = &soc->rx_desc_status[pdev_id];
566
567 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
568 "%s: Mon RX Status Pool[%d] allocation size=%d\n",
569 __func__, pdev_id, rxdma_entries);
570
Ravi Joshia9ebe0a2017-06-17 16:43:02 -0700571 status = dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries+1,
572 rx_desc_pool);
Ravi Joshi73a05512017-07-10 13:53:32 -0700573 if (!QDF_IS_STATUS_SUCCESS(status)) {
Ravi Joshia9ebe0a2017-06-17 16:43:02 -0700574 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
575 "%s: dp_rx_desc_pool_alloc() failed \n", __func__);
576 return status;
577 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800578
579 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
580 "%s: Mon RX Status Buffers Replenish pdev_id=%d\n",
581 __func__, pdev_id);
582
Ravi Joshia9ebe0a2017-06-17 16:43:02 -0700583 status = dp_rx_mon_status_buffers_replenish(soc, pdev_id, rxdma_srng,
584 rx_desc_pool, rxdma_entries, &desc_list, &tail,
585 HAL_RX_BUF_RBM_SW3_BM);
Ravi Joshi73a05512017-07-10 13:53:32 -0700586 if (!QDF_IS_STATUS_SUCCESS(status)) {
Ravi Joshia9ebe0a2017-06-17 16:43:02 -0700587 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
588 "%s: dp_rx_buffers_replenish() failed \n", __func__);
589 return status;
590 }
Kai Chen6eca1a62017-01-12 10:17:53 -0800591
592 qdf_nbuf_queue_init(&pdev->rx_status_q);
593
594 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
Karunakar Dasineni40555682017-03-26 22:44:39 -0700595 qdf_mem_zero(&(pdev->ppdu_info.rx_status),
596 sizeof(pdev->ppdu_info.rx_status));
Kai Chen6eca1a62017-01-12 10:17:53 -0800597
598 return QDF_STATUS_SUCCESS;
599}