blob: 5e87fd654f0410ce63f9b70dd333e66e1cf80894 [file] [log] [blame]
Kai Chen6eca1a62017-01-12 10:17:53 -08001/*
2 * Copyright (c) 2017 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18#include "dp_types.h"
19#include "dp_rx.h"
20#include "dp_peer.h"
21#include "hal_rx.h"
22#include "hal_api.h"
23#include "qdf_trace.h"
24#include "qdf_nbuf.h"
25#include "hal_api_mon.h"
26#include "ieee80211.h"
27#include "dp_rx_mon.h"
28
29
30/**
31* dp_rx_mon_status_process_tlv() - Process status TLV in status
32* buffer on Rx status Queue posted by status SRNG processing.
33* @soc: core txrx main context
34* @mac_id: mac_id which is one of 3 mac_ids _ring
35*
36* Return: none
37*/
38static inline void
39dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id) {
40 struct dp_pdev *pdev = soc->pdev_list[mac_id];
41 struct hal_rx_ppdu_info *ppdu_info;
42 qdf_nbuf_t status_nbuf;
43 uint8_t *rx_tlv;
44 uint8_t *rx_tlv_start;
45 uint32_t tlv_status;
46
47#ifdef DP_INTR_POLL_BASED
48 if (!pdev)
49 return;
50#endif
51
52 ppdu_info = &pdev->ppdu_info;
53
54 if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
55 return;
56
57 while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
58
59 status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
60 rx_tlv = qdf_nbuf_data(status_nbuf);
61 rx_tlv_start = rx_tlv;
62
63 do {
64 tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
65 ppdu_info);
66 rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
67
68 if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE)
69 break;
70
71 } while (tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE);
72
73 qdf_nbuf_free(status_nbuf);
74
75 if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
76
77 pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
78 /* Temperary */
79 pdev->mon_ppdu_status =
80 DP_PPDU_STATUS_START;
81 break;
82 }
83 }
84 return;
85}
86
87/*
88 * dp_rx_mon_status_srng_process() - Process monitor status ring
89 * post the status ring buffer to Rx status Queue for later
90 * processing when status ring is filled with status TLV.
91 * Allocate a new buffer to status ring if the filled buffer
92 * is posted.
93 *
94 * @soc: core txrx main context
95 * @mac_id: mac_id which is one of 3 mac_ids
96 * @quota: No. of ring entry that can be serviced in one shot.
97
98 * Return: uint32_t: No. of ring entry that is processed.
99 */
100static inline uint32_t
101dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
102 uint32_t quota)
103{
104 struct dp_pdev *pdev = soc->pdev_list[mac_id];
105 void *hal_soc;
106 void *mon_status_srng;
107 void *rxdma_mon_status_ring_entry;
108 QDF_STATUS status;
109 uint32_t work_done = 0;
110
111#ifdef DP_INTR_POLL_BASED
112 if (!pdev)
113 return work_done;
114#endif
115
116 mon_status_srng = pdev->rxdma_mon_status_ring.hal_srng;
117
118 qdf_assert(mon_status_srng);
119
120 hal_soc = soc->hal_soc;
121
122 qdf_assert(hal_soc);
123
124 if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
125 goto done;
126
127 /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
128 * BUFFER_ADDR_INFO STRUCT
129 */
130 while (qdf_likely((rxdma_mon_status_ring_entry =
131 hal_srng_src_peek(hal_soc, mon_status_srng))
132 && quota--)) {
133 uint32_t rx_buf_cookie;
134 qdf_nbuf_t status_nbuf;
135 struct dp_rx_desc *rx_desc;
136 uint8_t *status_buf;
137 qdf_dma_addr_t paddr;
138 uint64_t buf_addr;
139
140 buf_addr =
141 (HAL_RX_BUFFER_ADDR_31_0_GET(
142 rxdma_mon_status_ring_entry) |
143 ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
144 rxdma_mon_status_ring_entry)) << 32));
145
146 if (qdf_likely(buf_addr)) {
147
148 rx_buf_cookie =
149 HAL_RX_BUF_COOKIE_GET(
150 rxdma_mon_status_ring_entry);
151 rx_desc = dp_rx_cookie_2_va_mon_status(soc,
152 rx_buf_cookie);
153
154 qdf_assert(rx_desc);
155
156 status_nbuf = rx_desc->nbuf;
157
158 qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
159 QDF_DMA_FROM_DEVICE);
160
161 status_buf = qdf_nbuf_data(status_nbuf);
162
163 status = hal_get_rx_status_done(status_buf);
164
165 if (status != QDF_STATUS_SUCCESS) {
166 QDF_TRACE(QDF_MODULE_ID_DP,
167 QDF_TRACE_LEVEL_WARN,
168 "[%s][%d] status not done",
169 __func__, __LINE__);
170 break;
171 }
172 qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE);
173
174 qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
175 QDF_DMA_FROM_DEVICE);
176
177 /* Put the status_nbuf to queue */
178 qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
179
180 } else {
181 union dp_rx_desc_list_elem_t *desc_list = NULL;
182 union dp_rx_desc_list_elem_t *tail = NULL;
183 struct rx_desc_pool *rx_desc_pool;
184 uint32_t num_alloc_desc;
185
186 rx_desc_pool = &soc->rx_desc_status[mac_id];
187
188 num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
189 rx_desc_pool,
190 1,
191 &desc_list,
192 &tail);
193
194 rx_desc = &desc_list->rx_desc;
195 }
196
197 /* Allocate a new skb */
198 status_nbuf = qdf_nbuf_alloc(pdev->osif_pdev, RX_BUFFER_SIZE,
199 RX_BUFFER_RESERVATION, RX_BUFFER_ALIGNMENT, FALSE);
200
201 status_buf = qdf_nbuf_data(status_nbuf);
202
203 hal_clear_rx_status_done(status_buf);
204
205 qdf_nbuf_map_single(soc->osdev, status_nbuf,
206 QDF_DMA_BIDIRECTIONAL);
207 paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
208
209 rx_desc->nbuf = status_nbuf;
210
211 hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
212 paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
213
214 rxdma_mon_status_ring_entry =
215 hal_srng_src_get_next(hal_soc, mon_status_srng);
216 work_done++;
217 }
218done:
219
220 hal_srng_access_end(hal_soc, mon_status_srng);
221
222 return work_done;
223
224}
225/*
226 * dp_rx_mon_status_process() - Process monitor status ring and
227 * TLV in status ring.
228 *
229 * @soc: core txrx main context
230 * @mac_id: mac_id which is one of 3 mac_ids
231 * @quota: No. of ring entry that can be serviced in one shot.
232
233 * Return: uint32_t: No. of ring entry that is processed.
234 */
235static inline uint32_t
236dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
237 uint32_t work_done;
238
239 work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota);
240
241 dp_rx_mon_status_process_tlv(soc, mac_id);
242
243 return work_done;
244}
245/**
246 * dp_mon_process() - Main monitor mode processing roution.
247 * This call monitor status ring process then monitor
248 * destination ring process.
249 * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
250 * @soc: core txrx main context
251 * @mac_id: mac_id which is one of 3 mac_ids
252 * @quota: No. of status ring entry that can be serviced in one shot.
253
254 * Return: uint32_t: No. of ring entry that is processed.
255 */
256uint32_t
257dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
258 uint32_t work_done;
259
260 work_done = dp_rx_mon_status_process(soc, mac_id, quota);
261
262 dp_rx_mon_dest_process(soc, mac_id, quota);
263
264 return work_done;
265}
266/**
267 * dp_rx_pdev_mon_detach() - detach dp rx for status ring
268 * @pdev: core txrx pdev context
269 *
270 * This function will detach DP RX status ring from
271 * main device context. will free DP Rx resources for
272 * status ring
273 *
274 * Return: QDF_STATUS_SUCCESS: success
275 * QDF_STATUS_E_RESOURCES: Error return
276 */
277QDF_STATUS
278dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev)
279{
280 uint8_t pdev_id = pdev->pdev_id;
281 struct dp_soc *soc = pdev->soc;
282 struct rx_desc_pool *rx_desc_pool;
283
284 rx_desc_pool = &soc->rx_desc_status[pdev_id];
285 dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
286
287 return QDF_STATUS_SUCCESS;
288}
289
290/*
291 * dp_rx_buffers_replenish() - replenish monitor status ring with
292 * rx nbufs called during dp rx
293 * monitor status ring initialization
294 *
295 * @soc: core txrx main context
296 * @mac_id: mac_id which is one of 3 mac_ids
297 * @dp_rxdma_srng: dp monitor status circular ring
298 * @rx_desc_pool; Pointer to Rx descriptor pool
299 * @num_req_buffers: number of buffer to be replenished
300 * @desc_list: list of descs if called from dp rx monitor status
301 * process or NULL during dp rx initialization or
302 * out of buffer interrupt
303 * @tail: tail of descs list
304 * @owner: who owns the nbuf (host, NSS etc...)
305 * Return: return success or failure
306 */
307static inline
308QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
309 uint32_t mac_id,
310 struct dp_srng *dp_rxdma_srng,
311 struct rx_desc_pool *rx_desc_pool,
312 uint32_t num_req_buffers,
313 union dp_rx_desc_list_elem_t **desc_list,
314 union dp_rx_desc_list_elem_t **tail,
315 uint8_t owner)
316{
317 uint32_t num_alloc_desc;
318 uint16_t num_desc_to_free = 0;
319 uint32_t num_entries_avail;
320 uint32_t count;
321 int sync_hw_ptr = 1;
322 qdf_dma_addr_t paddr;
323 qdf_nbuf_t rx_netbuf;
324 void *rxdma_ring_entry;
325 union dp_rx_desc_list_elem_t *next;
326 void *rxdma_srng;
327 uint8_t *status_buf;
328
329 rxdma_srng = dp_rxdma_srng->hal_srng;
330
331 qdf_assert(rxdma_srng);
332
333 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
334 "[%s][%d] requested %d buffers for replenish\n",
335 __func__, __LINE__, num_req_buffers);
336
337 /*
338 * if desc_list is NULL, allocate the descs from freelist
339 */
340 if (!(*desc_list)) {
341
342 num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
343 rx_desc_pool,
344 num_req_buffers,
345 desc_list,
346 tail);
347
348 if (!num_alloc_desc) {
349 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
350 "[%s][%d] no free rx_descs in freelist\n",
351 __func__, __LINE__);
352 return QDF_STATUS_E_NOMEM;
353 }
354
355 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
356 "[%s][%d] %d rx desc allocated\n", __func__, __LINE__,
357 num_alloc_desc);
358 num_req_buffers = num_alloc_desc;
359 }
360
361 hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
362 num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
363 rxdma_srng, sync_hw_ptr);
364
365 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
366 "[%s][%d] no of availble entries in rxdma ring: %d\n",
367 __func__, __LINE__, num_entries_avail);
368
369 if (num_entries_avail < num_req_buffers) {
370 num_desc_to_free = num_req_buffers - num_entries_avail;
371 num_req_buffers = num_entries_avail;
372 }
373
374 for (count = 0; count < num_req_buffers; count++) {
375 rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
376 rxdma_srng);
377
378 rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
379 RX_BUFFER_SIZE,
380 RX_BUFFER_RESERVATION,
381 RX_BUFFER_ALIGNMENT,
382 FALSE);
383
384 status_buf = qdf_nbuf_data(rx_netbuf);
385 hal_clear_rx_status_done(status_buf);
386
387 memset(status_buf, 0, RX_BUFFER_SIZE);
388
389 qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
390 QDF_DMA_BIDIRECTIONAL);
391
392 paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
393
394 next = (*desc_list)->next;
395
396 (*desc_list)->rx_desc.nbuf = rx_netbuf;
397 hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
398 (*desc_list)->rx_desc.cookie, owner);
399
400 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
401 "[%s][%d] rx_desc=%p, cookie=%d, nbuf=%p, \
402 status_buf=%p paddr=%p\n",
403 __func__, __LINE__, &(*desc_list)->rx_desc,
404 (*desc_list)->rx_desc.cookie, rx_netbuf,
405 status_buf, (void *)paddr);
406
407 *desc_list = next;
408 }
409
410 hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
411
412 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
413 "successfully replenished %d buffers\n", num_req_buffers);
414
415 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
416 "%d rx desc added back to free list\n", num_desc_to_free);
417
418 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
419 "[%s][%d] desc_list=%p, tail=%p rx_desc=%p, cookie=%d\n",
420 __func__, __LINE__, desc_list, tail, &(*desc_list)->rx_desc,
421 (*desc_list)->rx_desc.cookie);
422
423 /*
424 * add any available free desc back to the free list
425 */
426 if (*desc_list) {
427 dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
428 mac_id, rx_desc_pool);
429 }
430
431 return QDF_STATUS_SUCCESS;
432}
433/**
434 * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
435 * @pdev: core txrx pdev context
436 *
437 * This function will attach a DP RX monitor status ring into pDEV
438 * and replenish monitor status ring with buffer.
439 *
440 * Return: QDF_STATUS_SUCCESS: success
441 * QDF_STATUS_E_RESOURCES: Error return
442 */
443QDF_STATUS
444dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev) {
445 uint8_t pdev_id = pdev->pdev_id;
446 struct dp_soc *soc = pdev->soc;
447 union dp_rx_desc_list_elem_t *desc_list = NULL;
448 union dp_rx_desc_list_elem_t *tail = NULL;
449 struct dp_srng *rxdma_srng;
450 uint32_t rxdma_entries;
451 struct rx_desc_pool *rx_desc_pool;
452
453 rxdma_srng = &pdev->rxdma_mon_status_ring;
454
455 rxdma_entries = rxdma_srng->alloc_size/hal_srng_get_entrysize(
456 soc->hal_soc, RXDMA_MONITOR_STATUS);
457
458 rx_desc_pool = &soc->rx_desc_status[pdev_id];
459
460 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
461 "%s: Mon RX Status Pool[%d] allocation size=%d\n",
462 __func__, pdev_id, rxdma_entries);
463
464 dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries+1, rx_desc_pool);
465
466 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
467 "%s: Mon RX Status Buffers Replenish pdev_id=%d\n",
468 __func__, pdev_id);
469
470 dp_rx_mon_status_buffers_replenish(soc, pdev_id, rxdma_srng,
471 rx_desc_pool, rxdma_entries, &desc_list, &tail,
472 HAL_RX_BUF_RBM_SW3_BM);
473
474 qdf_nbuf_queue_init(&pdev->rx_status_q);
475
476 pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
477
478 return QDF_STATUS_SUCCESS;
479}