blob: 5adec7d5f750bd4133fe3615e144c00a48f284a7 [file] [log] [blame]
Mohit Khanna70322002018-05-15 19:21:32 -07001/*
Tiger Yu6bc77dc2020-01-03 17:30:58 +08002 * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
Mohit Khanna70322002018-05-15 19:21:32 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <dp_txrx.h>
Vevek Venkatesan0ac759f2019-10-03 04:14:29 +053020#include "dp_peer.h"
21#include "dp_internal.h"
Mohit Khanna70322002018-05-15 19:21:32 -070022#include <cdp_txrx_cmn_struct.h>
23#include <cdp_txrx_peer_ops.h>
24#include <cds_sched.h>
25
26/* Timeout in ms to wait for a DP rx thread */
27#define DP_RX_THREAD_WAIT_TIMEOUT 200
28
29#define DP_RX_TM_DEBUG 0
30#if DP_RX_TM_DEBUG
31/**
32 * dp_rx_tm_walk_skb_list() - Walk skb list and print members
33 * @nbuf_list - nbuf list to print
34 *
35 * Returns: None
36 */
37static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
38{
39 qdf_nbuf_t nbuf;
40 int i = 0;
41
42 nbuf = nbuf_list;
43 while (nbuf) {
Manjunathappa Prakash1e9cba02020-05-07 14:08:49 -070044 dp_debug("%d nbuf:%pK nbuf->next:%pK nbuf->data:%pK", i,
Mohit Khanna70322002018-05-15 19:21:32 -070045 nbuf, qdf_nbuf_next(nbuf), qdf_nbuf_data(nbuf));
46 nbuf = qdf_nbuf_next(nbuf);
47 i++;
48 }
49}
50#else
51static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
52{ }
53#endif /* DP_RX_TM_DEBUG */
54
55/**
Mohit Khanna81418772018-10-30 14:14:46 -070056 * dp_rx_tm_get_soc_handle() - get soc handle from struct dp_rx_tm_handle_cmn
57 * @rx_tm_handle_cmn - rx thread manager cmn handle
58 *
59 * Returns: ol_txrx_soc_handle on success, NULL on failure.
60 */
61static inline
62ol_txrx_soc_handle dp_rx_tm_get_soc_handle(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn)
63{
64 struct dp_txrx_handle_cmn *txrx_handle_cmn;
65 ol_txrx_soc_handle soc;
66
67 txrx_handle_cmn =
68 dp_rx_thread_get_txrx_handle(rx_tm_handle_cmn);
69
70 soc = dp_txrx_get_soc_from_ext_handle(txrx_handle_cmn);
71 return soc;
72}
73
74/**
Mohit Khanna70322002018-05-15 19:21:32 -070075 * dp_rx_tm_thread_dump_stats() - display stats for a rx_thread
76 * @rx_thread - rx_thread pointer for which the stats need to be
77 * displayed
78 *
79 * Returns: None
80 */
81static void dp_rx_tm_thread_dump_stats(struct dp_rx_thread *rx_thread)
82{
83 uint8_t reo_ring_num;
84 uint32_t off = 0;
85 char nbuf_queued_string[100];
86 uint32_t total_queued = 0;
87 uint32_t temp = 0;
88
hangtian127c9532019-01-12 13:29:07 +080089 qdf_mem_zero(nbuf_queued_string, sizeof(nbuf_queued_string));
Mohit Khanna70322002018-05-15 19:21:32 -070090
91 for (reo_ring_num = 0; reo_ring_num < DP_RX_TM_MAX_REO_RINGS;
92 reo_ring_num++) {
93 temp = rx_thread->stats.nbuf_queued[reo_ring_num];
94 if (!temp)
95 continue;
96 total_queued += temp;
97 if (off >= sizeof(nbuf_queued_string))
98 continue;
99 off += qdf_scnprintf(&nbuf_queued_string[off],
100 sizeof(nbuf_queued_string) - off,
101 "reo[%u]:%u ", reo_ring_num, temp);
102 }
Mohit Khannac9649652018-11-28 18:10:28 -0800103
104 if (!total_queued)
105 return;
106
Yu Tiane6063462020-07-22 11:17:06 +0800107 dp_info("thread:%u - qlen:%u queued:(total:%u %s) dequeued:%u stack:%u gro_flushes: %u gro_flushes_by_vdev_del: %u rx_flushes: %u max_len:%u invalid(peer:%u vdev:%u rx-handle:%u others:%u enq fail:%u)",
Mohit Khanna70322002018-05-15 19:21:32 -0700108 rx_thread->id,
109 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue),
110 total_queued,
111 nbuf_queued_string,
112 rx_thread->stats.nbuf_dequeued,
113 rx_thread->stats.nbuf_sent_to_stack,
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700114 rx_thread->stats.gro_flushes,
Tiger Yu0da98c42020-01-10 12:31:36 +0800115 rx_thread->stats.gro_flushes_by_vdev_del,
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530116 rx_thread->stats.rx_flushed,
Mohit Khanna70322002018-05-15 19:21:32 -0700117 rx_thread->stats.nbufq_max_len,
118 rx_thread->stats.dropped_invalid_peer,
119 rx_thread->stats.dropped_invalid_vdev,
Mohit Khannab7194392019-03-25 23:07:13 -0700120 rx_thread->stats.dropped_invalid_os_rx_handles,
Yu Tiane6063462020-07-22 11:17:06 +0800121 rx_thread->stats.dropped_others,
122 rx_thread->stats.dropped_enq_fail);
Mohit Khanna70322002018-05-15 19:21:32 -0700123}
124
125QDF_STATUS dp_rx_tm_dump_stats(struct dp_rx_tm_handle *rx_tm_hdl)
126{
127 int i;
128
Mohit Khanna81418772018-10-30 14:14:46 -0700129 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700130 if (!rx_tm_hdl->rx_thread[i])
131 continue;
132 dp_rx_tm_thread_dump_stats(rx_tm_hdl->rx_thread[i]);
133 }
134 return QDF_STATUS_SUCCESS;
135}
136
Yu Tiane6063462020-07-22 11:17:06 +0800137#ifdef FEATURE_ALLOW_PKT_DROPPING
138/*
139 * dp_check_and_update_pending() - Check and Set RX Pending flag
140 * @tm_handle_cmn - DP thread pointer
141 *
142 * Returns: QDF_STATUS_SUCCESS on success or qdf error code on
143 * failure
144 */
145static inline
146QDF_STATUS dp_check_and_update_pending(struct dp_rx_tm_handle_cmn
147 *tm_handle_cmn)
148{
149 struct dp_txrx_handle_cmn *txrx_handle_cmn;
150 struct dp_rx_tm_handle *rx_tm_hdl =
151 (struct dp_rx_tm_handle *)tm_handle_cmn;
152 struct dp_soc *dp_soc;
153 uint32_t rx_pending_hl_threshold;
154 uint32_t rx_pending_lo_threshold;
155 uint32_t nbuf_queued_total = 0;
156 uint32_t nbuf_dequeued_total = 0;
157 uint32_t pending = 0;
158 int i;
159
160 txrx_handle_cmn =
161 dp_rx_thread_get_txrx_handle(tm_handle_cmn);
162 if (!txrx_handle_cmn) {
163 dp_err("invalid txrx_handle_cmn!");
164 QDF_BUG(0);
165 return QDF_STATUS_E_FAILURE;
166 }
167
168 dp_soc = (struct dp_soc *)dp_txrx_get_soc_from_ext_handle(
169 txrx_handle_cmn);
170 if (!dp_soc) {
171 dp_err("invalid soc!");
172 QDF_BUG(0);
173 return QDF_STATUS_E_FAILURE;
174 }
175
176 rx_pending_hl_threshold = wlan_cfg_rx_pending_hl_threshold(
177 dp_soc->wlan_cfg_ctx);
178 rx_pending_lo_threshold = wlan_cfg_rx_pending_lo_threshold(
179 dp_soc->wlan_cfg_ctx);
180
181 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
182 if (likely(rx_tm_hdl->rx_thread[i])) {
183 nbuf_queued_total +=
184 rx_tm_hdl->rx_thread[i]->stats.nbuf_queued_total;
185 nbuf_dequeued_total +=
186 rx_tm_hdl->rx_thread[i]->stats.nbuf_dequeued;
187 }
188 }
189
190 if (nbuf_queued_total > nbuf_dequeued_total)
191 pending = nbuf_queued_total - nbuf_dequeued_total;
192
193 if (unlikely(pending > rx_pending_hl_threshold))
194 qdf_atomic_set(&rx_tm_hdl->allow_dropping, 1);
195 else if (pending < rx_pending_lo_threshold)
196 qdf_atomic_set(&rx_tm_hdl->allow_dropping, 0);
197
198 return QDF_STATUS_SUCCESS;
199}
200
201#else
202static inline
203QDF_STATUS dp_check_and_update_pending(struct dp_rx_tm_handle_cmn
204 *tm_handle_cmn)
205{
206 return QDF_STATUS_SUCCESS;
207}
208#endif
209
Mohit Khanna70322002018-05-15 19:21:32 -0700210/**
211 * dp_rx_tm_thread_enqueue() - enqueue nbuf list into rx_thread
212 * @rx_thread - rx_thread in which the nbuf needs to be queued
213 * @nbuf_list - list of packets to be queued into the thread
214 *
215 * Enqueue packet into rx_thread and wake it up. The function
216 * moves the next pointer of the nbuf_list into the ext list of
217 * the first nbuf for storage into the thread. Only the first
218 * nbuf is queued into the thread nbuf queue. The reverse is
219 * done at the time of dequeue.
220 *
221 * Returns: QDF_STATUS_SUCCESS on success or qdf error code on
222 * failure
223 */
224static QDF_STATUS dp_rx_tm_thread_enqueue(struct dp_rx_thread *rx_thread,
225 qdf_nbuf_t nbuf_list)
226{
227 qdf_nbuf_t head_ptr, next_ptr_list;
228 uint32_t temp_qlen;
229 uint32_t num_elements_in_nbuf;
Manjunathappa Prakasha59028c2019-11-05 15:28:35 -0800230 uint32_t nbuf_queued;
Mohit Khanna70322002018-05-15 19:21:32 -0700231 struct dp_rx_tm_handle_cmn *tm_handle_cmn;
232 uint8_t reo_ring_num = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
233 qdf_wait_queue_head_t *wait_q_ptr;
Yu Tiane6063462020-07-22 11:17:06 +0800234 uint8_t allow_dropping;
Mohit Khanna70322002018-05-15 19:21:32 -0700235
236 tm_handle_cmn = rx_thread->rtm_handle_cmn;
237
238 if (!tm_handle_cmn) {
239 dp_alert("tm_handle_cmn is null!");
240 QDF_BUG(0);
241 return QDF_STATUS_E_FAILURE;
242 }
243
Mohit Khannaba8b3fe2019-07-29 00:24:24 -0700244 wait_q_ptr = &rx_thread->wait_q;
Mohit Khanna70322002018-05-15 19:21:32 -0700245
246 if (reo_ring_num >= DP_RX_TM_MAX_REO_RINGS) {
247 dp_alert("incorrect ring %u", reo_ring_num);
248 QDF_BUG(0);
249 return QDF_STATUS_E_FAILURE;
250 }
251
252 num_elements_in_nbuf = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
Manjunathappa Prakasha59028c2019-11-05 15:28:35 -0800253 nbuf_queued = num_elements_in_nbuf;
Mohit Khanna70322002018-05-15 19:21:32 -0700254
Yu Tiane6063462020-07-22 11:17:06 +0800255 allow_dropping = qdf_atomic_read(
256 &((struct dp_rx_tm_handle *)tm_handle_cmn)->allow_dropping);
257 if (unlikely(allow_dropping)) {
258 qdf_nbuf_list_free(nbuf_list);
259 rx_thread->stats.dropped_enq_fail += num_elements_in_nbuf;
260 nbuf_queued = 0;
261 goto enq_done;
262 }
263
Mohit Khanna70322002018-05-15 19:21:32 -0700264 dp_rx_tm_walk_skb_list(nbuf_list);
265
266 head_ptr = nbuf_list;
Pramod Simha7ee7b532018-08-20 15:46:34 -0700267
268 /* Ensure head doesn't have an ext list */
269 while (qdf_unlikely(head_ptr && qdf_nbuf_get_ext_list(head_ptr))) {
270 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head_ptr) = 1;
271 num_elements_in_nbuf--;
272 next_ptr_list = head_ptr->next;
273 qdf_nbuf_set_next(head_ptr, NULL);
274 qdf_nbuf_queue_head_enqueue_tail(&rx_thread->nbuf_queue,
275 head_ptr);
276 head_ptr = next_ptr_list;
277 }
278
279 if (!head_ptr)
280 goto enq_done;
281
Manjunathappa Prakasha59028c2019-11-05 15:28:35 -0800282 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head_ptr) = num_elements_in_nbuf;
283
Mohit Khanna70322002018-05-15 19:21:32 -0700284 next_ptr_list = head_ptr->next;
285
286 if (next_ptr_list) {
287 /* move ->next pointer to ext list */
288 qdf_nbuf_append_ext_list(head_ptr, next_ptr_list, 0);
289 dp_debug("appended next_ptr_list %pK to nbuf %pK ext list %pK",
290 qdf_nbuf_next(nbuf_list), nbuf_list,
291 qdf_nbuf_get_ext_list(nbuf_list));
292 }
293 qdf_nbuf_set_next(head_ptr, NULL);
294
295 qdf_nbuf_queue_head_enqueue_tail(&rx_thread->nbuf_queue, head_ptr);
Pramod Simha7ee7b532018-08-20 15:46:34 -0700296
297enq_done:
Mohit Khanna70322002018-05-15 19:21:32 -0700298 temp_qlen = qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue);
299
Manjunathappa Prakasha59028c2019-11-05 15:28:35 -0800300 rx_thread->stats.nbuf_queued[reo_ring_num] += nbuf_queued;
Yu Tiane6063462020-07-22 11:17:06 +0800301 rx_thread->stats.nbuf_queued_total += nbuf_queued;
302
303 dp_check_and_update_pending(tm_handle_cmn);
Mohit Khanna70322002018-05-15 19:21:32 -0700304
305 if (temp_qlen > rx_thread->stats.nbufq_max_len)
306 rx_thread->stats.nbufq_max_len = temp_qlen;
307
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700308 dp_debug("enqueue packet thread %pK wait queue %pK qlen %u",
309 rx_thread, wait_q_ptr,
310 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue));
311
Mohit Khanna70322002018-05-15 19:21:32 -0700312 qdf_set_bit(RX_POST_EVENT, &rx_thread->event_flag);
313 qdf_wake_up_interruptible(wait_q_ptr);
314
315 return QDF_STATUS_SUCCESS;
316}
317
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700318static QDF_STATUS dp_rx_tm_thread_gro_flush_ind(struct dp_rx_thread *rx_thread)
319{
320 struct dp_rx_tm_handle_cmn *tm_handle_cmn;
321 qdf_wait_queue_head_t *wait_q_ptr;
322
323 tm_handle_cmn = rx_thread->rtm_handle_cmn;
Mohit Khannaba8b3fe2019-07-29 00:24:24 -0700324 wait_q_ptr = &rx_thread->wait_q;
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700325
326 qdf_atomic_set(&rx_thread->gro_flush_ind, 1);
327
328 dp_debug("Flush indication received");
329
330 qdf_set_bit(RX_POST_EVENT, &rx_thread->event_flag);
331 qdf_wake_up_interruptible(wait_q_ptr);
332 return QDF_STATUS_SUCCESS;
333}
334
Mohit Khanna70322002018-05-15 19:21:32 -0700335/**
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530336 * dp_rx_thread_adjust_nbuf_list() - create an nbuf list from the frag list
337 * @head - nbuf list to be created
Mohit Khanna70322002018-05-15 19:21:32 -0700338 *
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530339 * Returns: void
Mohit Khanna70322002018-05-15 19:21:32 -0700340 */
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530341static void dp_rx_thread_adjust_nbuf_list(qdf_nbuf_t head)
Mohit Khanna70322002018-05-15 19:21:32 -0700342{
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530343 qdf_nbuf_t next_ptr_list, nbuf_list;
Mohit Khanna70322002018-05-15 19:21:32 -0700344
Mohit Khanna70322002018-05-15 19:21:32 -0700345 nbuf_list = head;
Pramod Simha7ee7b532018-08-20 15:46:34 -0700346 if (head && QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) > 1) {
Mohit Khanna70322002018-05-15 19:21:32 -0700347 /* move ext list to ->next pointer */
348 next_ptr_list = qdf_nbuf_get_ext_list(head);
349 qdf_nbuf_append_ext_list(head, NULL, 0);
350 qdf_nbuf_set_next(nbuf_list, next_ptr_list);
351 dp_rx_tm_walk_skb_list(nbuf_list);
352 }
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530353}
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700354
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530355/**
356 * dp_rx_tm_thread_dequeue() - dequeue nbuf list from rx_thread
357 * @rx_thread - rx_thread from which the nbuf needs to be dequeued
358 *
359 * Returns: nbuf or nbuf_list dequeued from rx_thread
360 */
361static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
362{
363 qdf_nbuf_t head;
364
365 head = qdf_nbuf_queue_head_dequeue(&rx_thread->nbuf_queue);
366 dp_rx_thread_adjust_nbuf_list(head);
367
368 dp_debug("Dequeued %pK nbuf_list", head);
369 return head;
370}
371
372/**
Mohit Khanna70322002018-05-15 19:21:32 -0700373 * dp_rx_thread_process_nbufq() - process nbuf queue of a thread
374 * @rx_thread - rx_thread whose nbuf queue needs to be processed
375 *
376 * Returns: 0 on success, error code on failure
377 */
378static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
379{
380 qdf_nbuf_t nbuf_list;
Vevek Venkatesan0ac759f2019-10-03 04:14:29 +0530381 uint8_t vdev_id;
Mohit Khanna70322002018-05-15 19:21:32 -0700382 ol_txrx_rx_fp stack_fn;
383 ol_osif_vdev_handle osif_vdev;
384 ol_txrx_soc_handle soc;
385 uint32_t num_list_elements = 0;
Mohit Khanna70322002018-05-15 19:21:32 -0700386
387 struct dp_txrx_handle_cmn *txrx_handle_cmn;
388
389 txrx_handle_cmn =
390 dp_rx_thread_get_txrx_handle(rx_thread->rtm_handle_cmn);
391
392 soc = dp_txrx_get_soc_from_ext_handle(txrx_handle_cmn);
Vevek Venkatesan0ac759f2019-10-03 04:14:29 +0530393 if (!soc) {
394 dp_err("invalid soc!");
Mohit Khanna70322002018-05-15 19:21:32 -0700395 QDF_BUG(0);
396 return -EFAULT;
397 }
398
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700399 dp_debug("enter: qlen %u",
400 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue));
401
Mohit Khanna70322002018-05-15 19:21:32 -0700402 nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
403 while (nbuf_list) {
404 num_list_elements =
405 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
406 rx_thread->stats.nbuf_dequeued += num_list_elements;
407
Vevek Venkatesan0ac759f2019-10-03 04:14:29 +0530408 vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf_list);
409 cdp_get_os_rx_handles_from_vdev(soc, vdev_id, &stack_fn,
Mohit Khanna70322002018-05-15 19:21:32 -0700410 &osif_vdev);
Jinwei Chen9cc7be42020-03-05 13:55:31 +0800411 dp_debug("rx_thread %pK sending packet %pK to stack",
412 rx_thread, nbuf_list);
413 if (!stack_fn || !osif_vdev ||
414 QDF_STATUS_SUCCESS != stack_fn(osif_vdev, nbuf_list)) {
Mohit Khannab7194392019-03-25 23:07:13 -0700415 rx_thread->stats.dropped_invalid_os_rx_handles +=
Mohit Khanna70322002018-05-15 19:21:32 -0700416 num_list_elements;
417 qdf_nbuf_list_free(nbuf_list);
Jinwei Chen9cc7be42020-03-05 13:55:31 +0800418 } else {
419 rx_thread->stats.nbuf_sent_to_stack +=
420 num_list_elements;
Mohit Khanna70322002018-05-15 19:21:32 -0700421 }
Mohit Khanna70322002018-05-15 19:21:32 -0700422 nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
423 }
424
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700425 dp_debug("exit: qlen %u",
426 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue));
427
Mohit Khanna70322002018-05-15 19:21:32 -0700428 return 0;
429}
430
431/**
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700432 * dp_rx_thread_gro_flush() - flush GRO packets for the RX thread
433 * @rx_thread - rx_thread to be processed
434 *
435 * Returns: void
436 */
437static void dp_rx_thread_gro_flush(struct dp_rx_thread *rx_thread)
438{
439 dp_debug("flushing packets for thread %u", rx_thread->id);
440
441 local_bh_disable();
442 napi_gro_flush(&rx_thread->napi, false);
443 local_bh_enable();
444
445 rx_thread->stats.gro_flushes++;
446}
447
448/**
Mohit Khanna70322002018-05-15 19:21:32 -0700449 * dp_rx_thread_sub_loop() - rx thread subloop
450 * @rx_thread - rx_thread to be processed
451 * @shutdown - pointer to shutdown variable
452 *
453 * The function handles shutdown and suspend events from other
454 * threads and processes nbuf queue of a rx thread. In case a
455 * shutdown event is received from some other wlan thread, the
456 * function sets the shutdown pointer to true and returns
457 *
458 * Returns: 0 on success, error code on failure
459 */
460static int dp_rx_thread_sub_loop(struct dp_rx_thread *rx_thread, bool *shutdown)
461{
462 while (true) {
463 if (qdf_atomic_test_and_clear_bit(RX_SHUTDOWN_EVENT,
464 &rx_thread->event_flag)) {
465 if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
466 &rx_thread->event_flag)) {
467 qdf_event_set(&rx_thread->suspend_event);
468 }
469 dp_debug("shutting down (%s) id %d pid %d",
470 qdf_get_current_comm(), rx_thread->id,
471 qdf_get_current_pid());
472 *shutdown = true;
473 break;
474 }
475
476 dp_rx_thread_process_nbufq(rx_thread);
477
Tiger Yu0da98c42020-01-10 12:31:36 +0800478 if (qdf_atomic_read(&rx_thread->gro_flush_ind) |
479 qdf_atomic_test_bit(RX_VDEV_DEL_EVENT,
480 &rx_thread->event_flag)) {
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700481 dp_rx_thread_gro_flush(rx_thread);
482 qdf_atomic_set(&rx_thread->gro_flush_ind, 0);
483 }
484
Tiger Yu0da98c42020-01-10 12:31:36 +0800485 if (qdf_atomic_test_and_clear_bit(RX_VDEV_DEL_EVENT,
486 &rx_thread->event_flag)) {
487 rx_thread->stats.gro_flushes_by_vdev_del++;
488 qdf_event_set(&rx_thread->vdev_del_event);
489 }
490
Mohit Khanna70322002018-05-15 19:21:32 -0700491 if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
492 &rx_thread->event_flag)) {
493 dp_debug("received suspend ind (%s) id %d pid %d",
494 qdf_get_current_comm(), rx_thread->id,
495 qdf_get_current_pid());
Mohit Khanna70322002018-05-15 19:21:32 -0700496 qdf_event_set(&rx_thread->suspend_event);
Mohit Khanna70322002018-05-15 19:21:32 -0700497 dp_debug("waiting for resume (%s) id %d pid %d",
498 qdf_get_current_comm(), rx_thread->id,
499 qdf_get_current_pid());
500 qdf_wait_single_event(&rx_thread->resume_event, 0);
501 }
502 break;
503 }
504 return 0;
505}
506
507/**
508 * dp_rx_thread_loop() - main dp rx thread loop
509 * @arg: pointer to dp_rx_thread structure for the rx thread
510 *
511 * Return: thread exit code
512 */
513static int dp_rx_thread_loop(void *arg)
514{
515 struct dp_rx_thread *rx_thread = arg;
516 bool shutdown = false;
517 int status;
518 struct dp_rx_tm_handle_cmn *tm_handle_cmn;
519
520 tm_handle_cmn = rx_thread->rtm_handle_cmn;
521
522 if (!arg) {
523 dp_err("bad Args passed");
524 return 0;
525 }
526
527 qdf_set_user_nice(qdf_get_current_task(), -1);
528 qdf_set_wake_up_idle(true);
529
530 qdf_event_set(&rx_thread->start_event);
531 dp_info("starting rx_thread (%s) id %d pid %d", qdf_get_current_comm(),
532 rx_thread->id, qdf_get_current_pid());
533 while (!shutdown) {
534 /* This implements the execution model algorithm */
535 dp_debug("sleeping");
536 status =
537 qdf_wait_queue_interruptible
Mohit Khannaba8b3fe2019-07-29 00:24:24 -0700538 (rx_thread->wait_q,
Mohit Khanna70322002018-05-15 19:21:32 -0700539 qdf_atomic_test_bit(RX_POST_EVENT,
540 &rx_thread->event_flag) ||
541 qdf_atomic_test_bit(RX_SUSPEND_EVENT,
Tiger Yu0da98c42020-01-10 12:31:36 +0800542 &rx_thread->event_flag) ||
543 qdf_atomic_test_bit(RX_VDEV_DEL_EVENT,
Mohit Khanna70322002018-05-15 19:21:32 -0700544 &rx_thread->event_flag));
545 dp_debug("woken up");
546
547 if (status == -ERESTARTSYS) {
Dustin Brown207286b2019-02-22 10:52:33 -0800548 QDF_DEBUG_PANIC("wait_event_interruptible returned -ERESTARTSYS");
Mohit Khanna70322002018-05-15 19:21:32 -0700549 break;
550 }
551 qdf_atomic_clear_bit(RX_POST_EVENT, &rx_thread->event_flag);
552 dp_rx_thread_sub_loop(rx_thread, &shutdown);
553 }
554
555 /* If we get here the scheduler thread must exit */
556 dp_info("exiting (%s) id %d pid %d", qdf_get_current_comm(),
557 rx_thread->id, qdf_get_current_pid());
558 qdf_event_set(&rx_thread->shutdown_event);
559 qdf_exit_thread(QDF_STATUS_SUCCESS);
560
561 return 0;
562}
563
Mohit Khanna81418772018-10-30 14:14:46 -0700564/**
565 * dp_rx_tm_thread_napi_poll() - dummy napi poll for rx_thread NAPI
566 * @napi: pointer to DP rx_thread NAPI
567 * @budget: NAPI BUDGET
568 *
569 * Return: 0 as it is not supposed to be polled at all as it is not scheduled.
570 */
571static int dp_rx_tm_thread_napi_poll(struct napi_struct *napi, int budget)
572{
Dustin Brown207286b2019-02-22 10:52:33 -0800573 QDF_DEBUG_PANIC("this napi_poll should not be polled as we don't schedule it");
574
Mohit Khanna81418772018-10-30 14:14:46 -0700575 return 0;
576}
577
578/**
579 * dp_rx_tm_thread_napi_init() - Initialize dummy rx_thread NAPI
580 * @rx_thread: dp_rx_thread structure containing dummy napi and netdev
581 *
582 * Return: None
583 */
584static void dp_rx_tm_thread_napi_init(struct dp_rx_thread *rx_thread)
585{
586 /* Todo - optimize to use only one dummy netdev for all thread napis */
587 init_dummy_netdev(&rx_thread->netdev);
588 netif_napi_add(&rx_thread->netdev, &rx_thread->napi,
589 dp_rx_tm_thread_napi_poll, 64);
590 napi_enable(&rx_thread->napi);
591}
592
593/**
594 * dp_rx_tm_thread_napi_deinit() - De-initialize dummy rx_thread NAPI
595 * @rx_thread: dp_rx_thread handle containing dummy napi and netdev
596 *
597 * Return: None
598 */
599static void dp_rx_tm_thread_napi_deinit(struct dp_rx_thread *rx_thread)
600{
601 netif_napi_del(&rx_thread->napi);
602}
603
Mohit Khanna70322002018-05-15 19:21:32 -0700604/*
605 * dp_rx_tm_thread_init() - Initialize dp_rx_thread structure and thread
606 *
607 * @rx_thread: dp_rx_thread structure to be initialized
608 * @id: id of the thread to be initialized
609 *
610 * Return: QDF_STATUS on success, QDF error code on failure
611 */
612static QDF_STATUS dp_rx_tm_thread_init(struct dp_rx_thread *rx_thread,
613 uint8_t id)
614{
615 char thread_name[15];
616 QDF_STATUS qdf_status;
617
hangtian127c9532019-01-12 13:29:07 +0800618 qdf_mem_zero(thread_name, sizeof(thread_name));
Mohit Khanna70322002018-05-15 19:21:32 -0700619
620 if (!rx_thread) {
621 dp_err("rx_thread is null!");
622 return QDF_STATUS_E_FAULT;
623 }
624 rx_thread->id = id;
625 rx_thread->event_flag = 0;
626 qdf_nbuf_queue_head_init(&rx_thread->nbuf_queue);
627 qdf_event_create(&rx_thread->start_event);
628 qdf_event_create(&rx_thread->suspend_event);
629 qdf_event_create(&rx_thread->resume_event);
630 qdf_event_create(&rx_thread->shutdown_event);
Tiger Yu0da98c42020-01-10 12:31:36 +0800631 qdf_event_create(&rx_thread->vdev_del_event);
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700632 qdf_atomic_init(&rx_thread->gro_flush_ind);
Mohit Khannaba8b3fe2019-07-29 00:24:24 -0700633 qdf_init_waitqueue_head(&rx_thread->wait_q);
Mohit Khanna70322002018-05-15 19:21:32 -0700634 qdf_scnprintf(thread_name, sizeof(thread_name), "dp_rx_thread_%u", id);
635 dp_info("%s %u", thread_name, id);
Mohit Khanna81418772018-10-30 14:14:46 -0700636
637 if (cdp_cfg_get(dp_rx_tm_get_soc_handle(rx_thread->rtm_handle_cmn),
638 cfg_dp_gro_enable))
639 dp_rx_tm_thread_napi_init(rx_thread);
640
Mohit Khanna70322002018-05-15 19:21:32 -0700641 rx_thread->task = qdf_create_thread(dp_rx_thread_loop,
642 rx_thread, thread_name);
643 if (!rx_thread->task) {
644 dp_err("could not create dp_rx_thread %d", id);
645 return QDF_STATUS_E_FAILURE;
646 }
647
648 qdf_wake_up_process(rx_thread->task);
649 qdf_status = qdf_wait_single_event(&rx_thread->start_event, 0);
650
651 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
652 dp_err("failed waiting for thread creation id %d", id);
653 return QDF_STATUS_E_FAILURE;
654 }
655 return QDF_STATUS_SUCCESS;
656}
657
658/*
659 * dp_rx_tm_thread_deinit() - De-Initialize dp_rx_thread structure and thread
660 * @rx_thread: dp_rx_thread structure to be de-initialized
661 * @id: id of the thread to be initialized
662 *
663 * Return: QDF_STATUS_SUCCESS
664 */
665static QDF_STATUS dp_rx_tm_thread_deinit(struct dp_rx_thread *rx_thread)
666{
667 qdf_event_destroy(&rx_thread->start_event);
668 qdf_event_destroy(&rx_thread->suspend_event);
669 qdf_event_destroy(&rx_thread->resume_event);
670 qdf_event_destroy(&rx_thread->shutdown_event);
Tiger Yu0da98c42020-01-10 12:31:36 +0800671 qdf_event_destroy(&rx_thread->vdev_del_event);
Mohit Khanna81418772018-10-30 14:14:46 -0700672
673 if (cdp_cfg_get(dp_rx_tm_get_soc_handle(rx_thread->rtm_handle_cmn),
674 cfg_dp_gro_enable))
675 dp_rx_tm_thread_napi_deinit(rx_thread);
676
Mohit Khanna70322002018-05-15 19:21:32 -0700677 return QDF_STATUS_SUCCESS;
678}
679
680QDF_STATUS dp_rx_tm_init(struct dp_rx_tm_handle *rx_tm_hdl,
681 uint8_t num_dp_rx_threads)
682{
683 int i;
684 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Mohit Khanna81418772018-10-30 14:14:46 -0700685
686 if (num_dp_rx_threads > DP_MAX_RX_THREADS) {
687 dp_err("unable to initialize %u number of threads. MAX %u",
688 num_dp_rx_threads, DP_MAX_RX_THREADS);
689 return QDF_STATUS_E_INVAL;
690 }
691
692 rx_tm_hdl->num_dp_rx_threads = num_dp_rx_threads;
Mohit Khanna7a2116a2019-09-17 15:34:18 -0700693 rx_tm_hdl->state = DP_RX_THREADS_INVALID;
Mohit Khanna81418772018-10-30 14:14:46 -0700694
695 dp_info("initializing %u threads", num_dp_rx_threads);
696
697 /* allocate an array to contain the DP RX thread pointers */
698 rx_tm_hdl->rx_thread = qdf_mem_malloc(num_dp_rx_threads *
699 sizeof(struct dp_rx_thread *));
700
701 if (qdf_unlikely(!rx_tm_hdl->rx_thread)) {
702 qdf_status = QDF_STATUS_E_NOMEM;
703 goto ret;
704 }
705
Mohit Khanna81418772018-10-30 14:14:46 -0700706 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700707 rx_tm_hdl->rx_thread[i] =
708 (struct dp_rx_thread *)
709 qdf_mem_malloc(sizeof(struct dp_rx_thread));
710 if (qdf_unlikely(!rx_tm_hdl->rx_thread[i])) {
711 QDF_ASSERT(0);
712 qdf_status = QDF_STATUS_E_NOMEM;
Mohit Khanna70322002018-05-15 19:21:32 -0700713 goto ret;
714 }
715 rx_tm_hdl->rx_thread[i]->rtm_handle_cmn =
716 (struct dp_rx_tm_handle_cmn *)rx_tm_hdl;
717 qdf_status =
718 dp_rx_tm_thread_init(rx_tm_hdl->rx_thread[i], i);
719 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
720 break;
721 }
722ret:
723 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
724 dp_rx_tm_deinit(rx_tm_hdl);
Mohit Khanna7a2116a2019-09-17 15:34:18 -0700725 else
726 rx_tm_hdl->state = DP_RX_THREADS_RUNNING;
Mohit Khanna70322002018-05-15 19:21:32 -0700727
728 return qdf_status;
729}
730
731/**
732 * dp_rx_tm_resume() - suspend DP RX threads
733 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
734 * infrastructure
735 *
Jingxiang Ge23fc3872020-02-20 23:15:15 +0800736 * Return: Success/Failure
Mohit Khanna70322002018-05-15 19:21:32 -0700737 */
738QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_hdl)
739{
740 int i;
741 QDF_STATUS qdf_status;
742 struct dp_rx_thread *rx_thread;
743
Mohit Khanna7a2116a2019-09-17 15:34:18 -0700744 if (rx_tm_hdl->state == DP_RX_THREADS_SUSPENDED) {
745 dp_info("already in suspend state! Ignoring.");
746 return QDF_STATUS_E_INVAL;
747 }
748
Jingxiang Ge23fc3872020-02-20 23:15:15 +0800749 rx_tm_hdl->state = DP_RX_THREADS_SUSPENDING;
750
Mohit Khanna81418772018-10-30 14:14:46 -0700751 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700752 if (!rx_tm_hdl->rx_thread[i])
753 continue;
Jingxiang Ge23fc3872020-02-20 23:15:15 +0800754 qdf_event_reset(&rx_tm_hdl->rx_thread[i]->resume_event);
Jingxiang Ge7ec9ead2020-01-20 14:29:51 +0800755 qdf_event_reset(&rx_tm_hdl->rx_thread[i]->suspend_event);
Mohit Khanna70322002018-05-15 19:21:32 -0700756 qdf_set_bit(RX_SUSPEND_EVENT,
757 &rx_tm_hdl->rx_thread[i]->event_flag);
Mohit Khannaba8b3fe2019-07-29 00:24:24 -0700758 qdf_wake_up_interruptible(&rx_tm_hdl->rx_thread[i]->wait_q);
Mohit Khanna70322002018-05-15 19:21:32 -0700759 }
760
Mohit Khanna81418772018-10-30 14:14:46 -0700761 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700762 rx_thread = rx_tm_hdl->rx_thread[i];
763 if (!rx_thread)
764 continue;
765 dp_debug("thread %d", i);
766 qdf_status = qdf_wait_single_event(&rx_thread->suspend_event,
767 DP_RX_THREAD_WAIT_TIMEOUT);
768 if (QDF_IS_STATUS_SUCCESS(qdf_status))
769 dp_debug("thread:%d suspended", rx_thread->id);
Mohit Khanna70322002018-05-15 19:21:32 -0700770 else
Jingxiang Ge23fc3872020-02-20 23:15:15 +0800771 goto suspend_fail;
Mohit Khanna70322002018-05-15 19:21:32 -0700772 }
Mohit Khanna7a2116a2019-09-17 15:34:18 -0700773 rx_tm_hdl->state = DP_RX_THREADS_SUSPENDED;
Mohit Khanna70322002018-05-15 19:21:32 -0700774
775 return QDF_STATUS_SUCCESS;
Jingxiang Ge23fc3872020-02-20 23:15:15 +0800776
777suspend_fail:
778 dp_err("thread:%d %s(%d) while waiting for suspend",
779 rx_thread->id,
780 qdf_status == QDF_STATUS_E_TIMEOUT ? "timeout out" : "failed",
781 qdf_status);
782
783 dp_rx_tm_resume(rx_tm_hdl);
784
785 return qdf_status;
Mohit Khanna70322002018-05-15 19:21:32 -0700786}
787
788/**
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530789 * dp_rx_thread_flush_by_vdev_id() - flush rx packets by vdev_id in
790 a particular rx thread queue
791 * @rx_thread - rx_thread pointer of the queue from which packets are
792 * to be flushed out
793 * @vdev_id: vdev id for which packets are to be flushed
794 *
Tiger Yu0da98c42020-01-10 12:31:36 +0800795 * The function will flush the RX packets by vdev_id in a particular
796 * RX thead queue. And will notify and wait the TX thread to flush the
797 * packets in the NAPI RX GRO hash list
798 *
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530799 * Return: void
800 */
801static inline
802void dp_rx_thread_flush_by_vdev_id(struct dp_rx_thread *rx_thread,
803 uint8_t vdev_id)
804{
805 qdf_nbuf_t nbuf_list, tmp_nbuf_list;
806 uint32_t num_list_elements = 0;
Tiger Yu0da98c42020-01-10 12:31:36 +0800807 QDF_STATUS qdf_status;
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530808
809 qdf_nbuf_queue_head_lock(&rx_thread->nbuf_queue);
810 QDF_NBUF_QUEUE_WALK_SAFE(&rx_thread->nbuf_queue, nbuf_list,
811 tmp_nbuf_list) {
812 if (QDF_NBUF_CB_RX_VDEV_ID(nbuf_list) == vdev_id) {
813 qdf_nbuf_unlink_no_lock(nbuf_list,
814 &rx_thread->nbuf_queue);
815 dp_rx_thread_adjust_nbuf_list(nbuf_list);
816 num_list_elements =
817 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
818 rx_thread->stats.rx_flushed += num_list_elements;
819 qdf_nbuf_list_free(nbuf_list);
820 }
821 }
822 qdf_nbuf_queue_head_unlock(&rx_thread->nbuf_queue);
Tiger Yu6bc77dc2020-01-03 17:30:58 +0800823
Tiger Yu0da98c42020-01-10 12:31:36 +0800824 qdf_set_bit(RX_VDEV_DEL_EVENT, &rx_thread->event_flag);
825 qdf_wake_up_interruptible(&rx_thread->wait_q);
Tiger Yu6bc77dc2020-01-03 17:30:58 +0800826
Tiger Yu0da98c42020-01-10 12:31:36 +0800827 qdf_status = qdf_wait_single_event(&rx_thread->vdev_del_event,
828 DP_RX_THREAD_WAIT_TIMEOUT);
829 if (QDF_IS_STATUS_SUCCESS(qdf_status))
830 dp_debug("thread:%d napi gro flush successfully",
831 rx_thread->id);
832 else if (qdf_status == QDF_STATUS_E_TIMEOUT)
833 dp_err("thread:%d timed out waiting for napi gro flush",
834 rx_thread->id);
835 else
836 dp_err("thread:%d failed while waiting for napi gro flush",
837 rx_thread->id);
Rakesh Pillai246f1df2019-10-24 06:40:20 +0530838}
839
840/**
841 * dp_rx_tm_flush_by_vdev_id() - flush rx packets by vdev_id in all
842 rx thread queues
843 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
844 * infrastructure
845 * @vdev_id: vdev id for which packets are to be flushed
846 *
847 * Return: QDF_STATUS_SUCCESS
848 */
849QDF_STATUS dp_rx_tm_flush_by_vdev_id(struct dp_rx_tm_handle *rx_tm_hdl,
850 uint8_t vdev_id)
851{
852 struct dp_rx_thread *rx_thread;
853 int i;
854
855 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
856 rx_thread = rx_tm_hdl->rx_thread[i];
857 if (!rx_thread)
858 continue;
859
860 dp_debug("thread %d", i);
861 dp_rx_thread_flush_by_vdev_id(rx_thread, vdev_id);
862 }
863
864 return QDF_STATUS_SUCCESS;
865}
866
867/**
Mohit Khanna70322002018-05-15 19:21:32 -0700868 * dp_rx_tm_resume() - resume DP RX threads
869 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
870 * infrastructure
871 *
Mohit Khanna7a2116a2019-09-17 15:34:18 -0700872 * Return: QDF_STATUS_SUCCESS on resume success. QDF error otherwise.
Mohit Khanna70322002018-05-15 19:21:32 -0700873 */
874QDF_STATUS dp_rx_tm_resume(struct dp_rx_tm_handle *rx_tm_hdl)
875{
876 int i;
877
Jingxiang Ge23fc3872020-02-20 23:15:15 +0800878 if (rx_tm_hdl->state != DP_RX_THREADS_SUSPENDED &&
879 rx_tm_hdl->state != DP_RX_THREADS_SUSPENDING) {
Mohit Khanna7a2116a2019-09-17 15:34:18 -0700880 dp_info("resume callback received w/o suspend! Ignoring.");
881 return QDF_STATUS_E_INVAL;
Mohit Khanna70322002018-05-15 19:21:32 -0700882 }
883
Mohit Khanna81418772018-10-30 14:14:46 -0700884 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700885 if (!rx_tm_hdl->rx_thread[i])
886 continue;
887 dp_debug("calling thread %d to resume", i);
Jingxiang Ge23fc3872020-02-20 23:15:15 +0800888
889 /* postively reset event_flag for DP_RX_THREADS_SUSPENDING
890 * state
891 */
892 qdf_clear_bit(RX_SUSPEND_EVENT,
893 &rx_tm_hdl->rx_thread[i]->event_flag);
Mohit Khanna70322002018-05-15 19:21:32 -0700894 qdf_event_set(&rx_tm_hdl->rx_thread[i]->resume_event);
895 }
896
Mohit Khanna7a2116a2019-09-17 15:34:18 -0700897 rx_tm_hdl->state = DP_RX_THREADS_RUNNING;
898
Mohit Khanna70322002018-05-15 19:21:32 -0700899 return QDF_STATUS_SUCCESS;
900}
901
902/**
903 * dp_rx_tm_shutdown() - shutdown all DP RX threads
904 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
905 *
906 * Return: QDF_STATUS_SUCCESS
907 */
908static QDF_STATUS dp_rx_tm_shutdown(struct dp_rx_tm_handle *rx_tm_hdl)
909{
910 int i;
911
Mohit Khanna81418772018-10-30 14:14:46 -0700912 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700913 if (!rx_tm_hdl->rx_thread[i])
914 continue;
915 qdf_set_bit(RX_SHUTDOWN_EVENT,
916 &rx_tm_hdl->rx_thread[i]->event_flag);
917 qdf_set_bit(RX_POST_EVENT,
918 &rx_tm_hdl->rx_thread[i]->event_flag);
Mohit Khannaba8b3fe2019-07-29 00:24:24 -0700919 qdf_wake_up_interruptible(&rx_tm_hdl->rx_thread[i]->wait_q);
Mohit Khanna70322002018-05-15 19:21:32 -0700920 }
921
Mohit Khanna70322002018-05-15 19:21:32 -0700922
Mohit Khanna81418772018-10-30 14:14:46 -0700923 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700924 if (!rx_tm_hdl->rx_thread[i])
925 continue;
926 dp_debug("waiting for shutdown of thread %d", i);
927 qdf_wait_single_event(&rx_tm_hdl->rx_thread[i]->shutdown_event,
928 0);
929 }
Mohit Khanna7a2116a2019-09-17 15:34:18 -0700930 rx_tm_hdl->state = DP_RX_THREADS_INVALID;
Mohit Khanna70322002018-05-15 19:21:32 -0700931 return QDF_STATUS_SUCCESS;
932}
933
934/**
935 * dp_rx_tm_deinit() - de-initialize RX thread infrastructure
936 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
937 * infrastructure
938 *
939 * Return: QDF_STATUS_SUCCESS
940 */
941QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl)
942{
943 int i = 0;
Mohit Khanna81418772018-10-30 14:14:46 -0700944 if (!rx_tm_hdl->rx_thread) {
945 dp_err("rx_tm_hdl->rx_thread not initialized!");
946 return QDF_STATUS_SUCCESS;
947 }
Mohit Khanna70322002018-05-15 19:21:32 -0700948
949 dp_rx_tm_shutdown(rx_tm_hdl);
950
Mohit Khanna81418772018-10-30 14:14:46 -0700951 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700952 if (!rx_tm_hdl->rx_thread[i])
953 continue;
954 dp_rx_tm_thread_deinit(rx_tm_hdl->rx_thread[i]);
955 qdf_mem_free(rx_tm_hdl->rx_thread[i]);
956 }
Mohit Khanna81418772018-10-30 14:14:46 -0700957
958 /* free the array of RX thread pointers*/
959 qdf_mem_free(rx_tm_hdl->rx_thread);
960 rx_tm_hdl->rx_thread = NULL;
961
Mohit Khanna70322002018-05-15 19:21:32 -0700962 return QDF_STATUS_SUCCESS;
963}
964
965/**
966 * dp_rx_tm_select_thread() - select a DP RX thread for a nbuf
967 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
968 * infrastructure
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700969 * @reo_ring_num: REO ring number corresponding to the thread
Mohit Khanna70322002018-05-15 19:21:32 -0700970 *
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700971 * The function relies on the presence of QDF_NBUF_CB_RX_CTX_ID passed to it
972 * from the nbuf list. Depending on the RX_CTX (copy engine or reo
Mohit Khanna70322002018-05-15 19:21:32 -0700973 * ring) on which the packet was received, the function selects
974 * a corresponding rx_thread.
Mohit Khanna70322002018-05-15 19:21:32 -0700975 *
976 * Return: rx thread ID selected for the nbuf
977 */
978static uint8_t dp_rx_tm_select_thread(struct dp_rx_tm_handle *rx_tm_hdl,
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700979 uint8_t reo_ring_num)
Mohit Khanna70322002018-05-15 19:21:32 -0700980{
981 uint8_t selected_rx_thread;
Mohit Khanna70322002018-05-15 19:21:32 -0700982
Mohit Khanna81418772018-10-30 14:14:46 -0700983 if (reo_ring_num >= rx_tm_hdl->num_dp_rx_threads) {
984 dp_err_rl("unexpected ring number");
985 QDF_BUG(0);
986 return 0;
987 }
Mohit Khanna70322002018-05-15 19:21:32 -0700988
Mohit Khanna81418772018-10-30 14:14:46 -0700989 selected_rx_thread = reo_ring_num;
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700990 dp_debug("selected thread %u", selected_rx_thread);
Mohit Khanna70322002018-05-15 19:21:32 -0700991 return selected_rx_thread;
992}
993
994QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
995 qdf_nbuf_t nbuf_list)
996{
997 uint8_t selected_thread_id;
998
Mohit Khannaf0620ce2019-07-28 21:31:05 -0700999 selected_thread_id =
1000 dp_rx_tm_select_thread(rx_tm_hdl,
1001 QDF_NBUF_CB_RX_CTX_ID(nbuf_list));
Mohit Khanna70322002018-05-15 19:21:32 -07001002 dp_rx_tm_thread_enqueue(rx_tm_hdl->rx_thread[selected_thread_id],
1003 nbuf_list);
1004 return QDF_STATUS_SUCCESS;
1005}
1006
Mohit Khannaf0620ce2019-07-28 21:31:05 -07001007QDF_STATUS
1008dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle *rx_tm_hdl, int rx_ctx_id)
1009{
1010 uint8_t selected_thread_id;
1011
1012 selected_thread_id = dp_rx_tm_select_thread(rx_tm_hdl, rx_ctx_id);
1013 dp_rx_tm_thread_gro_flush_ind(rx_tm_hdl->rx_thread[selected_thread_id]);
1014
1015 return QDF_STATUS_SUCCESS;
1016}
1017
Mohit Khanna81418772018-10-30 14:14:46 -07001018struct napi_struct *dp_rx_tm_get_napi_context(struct dp_rx_tm_handle *rx_tm_hdl,
1019 uint8_t rx_ctx_id)
1020{
1021 if (rx_ctx_id >= rx_tm_hdl->num_dp_rx_threads) {
1022 dp_err_rl("unexpected rx_ctx_id %u", rx_ctx_id);
1023 QDF_BUG(0);
1024 return NULL;
1025 }
1026
1027 return &rx_tm_hdl->rx_thread[rx_ctx_id]->napi;
1028}
Mohit Khannac77d69d2019-06-18 20:00:50 -07001029
1030QDF_STATUS dp_rx_tm_set_cpu_mask(struct dp_rx_tm_handle *rx_tm_hdl,
1031 qdf_cpu_mask *new_mask)
1032{
1033 int i = 0;
1034
1035 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
1036 if (!rx_tm_hdl->rx_thread[i])
1037 continue;
1038 qdf_thread_set_cpus_allowed_mask(rx_tm_hdl->rx_thread[i]->task,
1039 new_mask);
1040 }
1041 return QDF_STATUS_SUCCESS;
1042}