blob: 69dea8eeb6e6e43be6916a39773387a18577437d [file] [log] [blame]
Mohit Khanna70322002018-05-15 19:21:32 -07001/*
Mohit Khannac9649652018-11-28 18:10:28 -08002 * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
Mohit Khanna70322002018-05-15 19:21:32 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <dp_txrx.h>
20#include <cdp_txrx_cmn_struct.h>
21#include <cdp_txrx_peer_ops.h>
22#include <cds_sched.h>
23
24/* Timeout in ms to wait for a DP rx thread */
25#define DP_RX_THREAD_WAIT_TIMEOUT 200
26
27#define DP_RX_TM_DEBUG 0
28#if DP_RX_TM_DEBUG
29/**
30 * dp_rx_tm_walk_skb_list() - Walk skb list and print members
31 * @nbuf_list - nbuf list to print
32 *
33 * Returns: None
34 */
35static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
36{
37 qdf_nbuf_t nbuf;
38 int i = 0;
39
40 nbuf = nbuf_list;
41 while (nbuf) {
42 dp_debug("%d nbuf:%pk nbuf->next:%pK nbuf->data:%pk ", i,
43 nbuf, qdf_nbuf_next(nbuf), qdf_nbuf_data(nbuf));
44 nbuf = qdf_nbuf_next(nbuf);
45 i++;
46 }
47}
48#else
49static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
50{ }
51#endif /* DP_RX_TM_DEBUG */
52
53/**
Mohit Khanna81418772018-10-30 14:14:46 -070054 * dp_rx_tm_get_soc_handle() - get soc handle from struct dp_rx_tm_handle_cmn
55 * @rx_tm_handle_cmn - rx thread manager cmn handle
56 *
57 * Returns: ol_txrx_soc_handle on success, NULL on failure.
58 */
59static inline
60ol_txrx_soc_handle dp_rx_tm_get_soc_handle(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn)
61{
62 struct dp_txrx_handle_cmn *txrx_handle_cmn;
63 ol_txrx_soc_handle soc;
64
65 txrx_handle_cmn =
66 dp_rx_thread_get_txrx_handle(rx_tm_handle_cmn);
67
68 soc = dp_txrx_get_soc_from_ext_handle(txrx_handle_cmn);
69 return soc;
70}
71
72/**
Mohit Khanna70322002018-05-15 19:21:32 -070073 * dp_rx_tm_thread_dump_stats() - display stats for a rx_thread
74 * @rx_thread - rx_thread pointer for which the stats need to be
75 * displayed
76 *
77 * Returns: None
78 */
79static void dp_rx_tm_thread_dump_stats(struct dp_rx_thread *rx_thread)
80{
81 uint8_t reo_ring_num;
82 uint32_t off = 0;
83 char nbuf_queued_string[100];
84 uint32_t total_queued = 0;
85 uint32_t temp = 0;
86
hangtian127c9532019-01-12 13:29:07 +080087 qdf_mem_zero(nbuf_queued_string, sizeof(nbuf_queued_string));
Mohit Khanna70322002018-05-15 19:21:32 -070088
89 for (reo_ring_num = 0; reo_ring_num < DP_RX_TM_MAX_REO_RINGS;
90 reo_ring_num++) {
91 temp = rx_thread->stats.nbuf_queued[reo_ring_num];
92 if (!temp)
93 continue;
94 total_queued += temp;
95 if (off >= sizeof(nbuf_queued_string))
96 continue;
97 off += qdf_scnprintf(&nbuf_queued_string[off],
98 sizeof(nbuf_queued_string) - off,
99 "reo[%u]:%u ", reo_ring_num, temp);
100 }
Mohit Khannac9649652018-11-28 18:10:28 -0800101
102 if (!total_queued)
103 return;
104
Mohit Khannab7194392019-03-25 23:07:13 -0700105 dp_info("thread:%u - qlen:%u queued:(total:%u %s) dequeued:%u stack:%u max_len:%u invalid(peer:%u vdev:%u rx-handle:%u others:%u)",
Mohit Khanna70322002018-05-15 19:21:32 -0700106 rx_thread->id,
107 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue),
108 total_queued,
109 nbuf_queued_string,
110 rx_thread->stats.nbuf_dequeued,
111 rx_thread->stats.nbuf_sent_to_stack,
112 rx_thread->stats.nbufq_max_len,
113 rx_thread->stats.dropped_invalid_peer,
114 rx_thread->stats.dropped_invalid_vdev,
Mohit Khannab7194392019-03-25 23:07:13 -0700115 rx_thread->stats.dropped_invalid_os_rx_handles,
Mohit Khanna70322002018-05-15 19:21:32 -0700116 rx_thread->stats.dropped_others);
117}
118
119QDF_STATUS dp_rx_tm_dump_stats(struct dp_rx_tm_handle *rx_tm_hdl)
120{
121 int i;
122
Mohit Khanna81418772018-10-30 14:14:46 -0700123 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700124 if (!rx_tm_hdl->rx_thread[i])
125 continue;
126 dp_rx_tm_thread_dump_stats(rx_tm_hdl->rx_thread[i]);
127 }
128 return QDF_STATUS_SUCCESS;
129}
130
131/**
132 * dp_rx_tm_thread_enqueue() - enqueue nbuf list into rx_thread
133 * @rx_thread - rx_thread in which the nbuf needs to be queued
134 * @nbuf_list - list of packets to be queued into the thread
135 *
136 * Enqueue packet into rx_thread and wake it up. The function
137 * moves the next pointer of the nbuf_list into the ext list of
138 * the first nbuf for storage into the thread. Only the first
139 * nbuf is queued into the thread nbuf queue. The reverse is
140 * done at the time of dequeue.
141 *
142 * Returns: QDF_STATUS_SUCCESS on success or qdf error code on
143 * failure
144 */
145static QDF_STATUS dp_rx_tm_thread_enqueue(struct dp_rx_thread *rx_thread,
146 qdf_nbuf_t nbuf_list)
147{
148 qdf_nbuf_t head_ptr, next_ptr_list;
149 uint32_t temp_qlen;
150 uint32_t num_elements_in_nbuf;
151 struct dp_rx_tm_handle_cmn *tm_handle_cmn;
152 uint8_t reo_ring_num = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
153 qdf_wait_queue_head_t *wait_q_ptr;
154
155 tm_handle_cmn = rx_thread->rtm_handle_cmn;
156
157 if (!tm_handle_cmn) {
158 dp_alert("tm_handle_cmn is null!");
159 QDF_BUG(0);
160 return QDF_STATUS_E_FAILURE;
161 }
162
163 wait_q_ptr = dp_rx_thread_get_wait_queue(tm_handle_cmn);
164
165 if (reo_ring_num >= DP_RX_TM_MAX_REO_RINGS) {
166 dp_alert("incorrect ring %u", reo_ring_num);
167 QDF_BUG(0);
168 return QDF_STATUS_E_FAILURE;
169 }
170
171 num_elements_in_nbuf = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
172
173 dp_rx_tm_walk_skb_list(nbuf_list);
174
175 head_ptr = nbuf_list;
Pramod Simha7ee7b532018-08-20 15:46:34 -0700176
177 /* Ensure head doesn't have an ext list */
178 while (qdf_unlikely(head_ptr && qdf_nbuf_get_ext_list(head_ptr))) {
179 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head_ptr) = 1;
180 num_elements_in_nbuf--;
181 next_ptr_list = head_ptr->next;
182 qdf_nbuf_set_next(head_ptr, NULL);
183 qdf_nbuf_queue_head_enqueue_tail(&rx_thread->nbuf_queue,
184 head_ptr);
185 head_ptr = next_ptr_list;
186 }
187
188 if (!head_ptr)
189 goto enq_done;
190
Mohit Khanna70322002018-05-15 19:21:32 -0700191 next_ptr_list = head_ptr->next;
192
193 if (next_ptr_list) {
194 /* move ->next pointer to ext list */
195 qdf_nbuf_append_ext_list(head_ptr, next_ptr_list, 0);
196 dp_debug("appended next_ptr_list %pK to nbuf %pK ext list %pK",
197 qdf_nbuf_next(nbuf_list), nbuf_list,
198 qdf_nbuf_get_ext_list(nbuf_list));
199 }
200 qdf_nbuf_set_next(head_ptr, NULL);
201
202 qdf_nbuf_queue_head_enqueue_tail(&rx_thread->nbuf_queue, head_ptr);
Pramod Simha7ee7b532018-08-20 15:46:34 -0700203
204enq_done:
Mohit Khanna70322002018-05-15 19:21:32 -0700205 temp_qlen = qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue);
206
207 rx_thread->stats.nbuf_queued[reo_ring_num] += num_elements_in_nbuf;
208
209 if (temp_qlen > rx_thread->stats.nbufq_max_len)
210 rx_thread->stats.nbufq_max_len = temp_qlen;
211
212 qdf_set_bit(RX_POST_EVENT, &rx_thread->event_flag);
213 qdf_wake_up_interruptible(wait_q_ptr);
214
215 return QDF_STATUS_SUCCESS;
216}
217
218/**
219 * dp_rx_tm_thread_dequeue() - dequeue nbuf list from rx_thread
220 * @rx_thread - rx_thread from which the nbuf needs to be dequeued
221 *
222 * Returns: nbuf or nbuf_list dequeued from rx_thread
223 */
224static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
225{
226 qdf_nbuf_t head, next_ptr_list, nbuf_list;
227
228 head = qdf_nbuf_queue_head_dequeue(&rx_thread->nbuf_queue);
229 nbuf_list = head;
Pramod Simha7ee7b532018-08-20 15:46:34 -0700230 if (head && QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) > 1) {
Mohit Khanna70322002018-05-15 19:21:32 -0700231 /* move ext list to ->next pointer */
232 next_ptr_list = qdf_nbuf_get_ext_list(head);
233 qdf_nbuf_append_ext_list(head, NULL, 0);
234 qdf_nbuf_set_next(nbuf_list, next_ptr_list);
235 dp_rx_tm_walk_skb_list(nbuf_list);
236 }
237 return nbuf_list;
238}
239
240/**
241 * dp_rx_thread_process_nbufq() - process nbuf queue of a thread
242 * @rx_thread - rx_thread whose nbuf queue needs to be processed
243 *
244 * Returns: 0 on success, error code on failure
245 */
246static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
247{
248 qdf_nbuf_t nbuf_list;
249 uint32_t peer_local_id;
250 void *peer;
251 struct cdp_vdev *vdev;
252 ol_txrx_rx_fp stack_fn;
253 ol_osif_vdev_handle osif_vdev;
254 ol_txrx_soc_handle soc;
255 uint32_t num_list_elements = 0;
256 struct cdp_pdev *pdev;
257
258 struct dp_txrx_handle_cmn *txrx_handle_cmn;
259
260 txrx_handle_cmn =
261 dp_rx_thread_get_txrx_handle(rx_thread->rtm_handle_cmn);
262
263 soc = dp_txrx_get_soc_from_ext_handle(txrx_handle_cmn);
264 pdev = dp_txrx_get_pdev_from_ext_handle(txrx_handle_cmn);
265
266 if (!soc || !pdev) {
267 dp_err("invalid soc or pdev!");
268 QDF_BUG(0);
269 return -EFAULT;
270 }
271
272 nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
273 while (nbuf_list) {
274 num_list_elements =
275 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
276 rx_thread->stats.nbuf_dequeued += num_list_elements;
277
278 peer_local_id = QDF_NBUF_CB_RX_PEER_LOCAL_ID(nbuf_list);
279 peer = cdp_peer_find_by_local_id(soc, pdev, peer_local_id);
280
281 if (!peer) {
282 rx_thread->stats.dropped_invalid_peer +=
283 num_list_elements;
284 dp_err("peer not found for local_id %u!",
285 peer_local_id);
286 qdf_nbuf_list_free(nbuf_list);
Sravan Kumar Kairam0512a842018-09-12 15:27:45 +0530287 goto dequeue_rx_thread;
Mohit Khanna70322002018-05-15 19:21:32 -0700288 }
289
290 vdev = cdp_peer_get_vdev(soc, peer);
291 if (!vdev) {
292 rx_thread->stats.dropped_invalid_vdev +=
293 num_list_elements;
294 dp_err("vdev not found for local_id %u!, pkt dropped",
295 peer_local_id);
296 qdf_nbuf_list_free(nbuf_list);
Sravan Kumar Kairam0512a842018-09-12 15:27:45 +0530297 goto dequeue_rx_thread;
Mohit Khanna70322002018-05-15 19:21:32 -0700298 }
299
300 cdp_get_os_rx_handles_from_vdev(soc, vdev, &stack_fn,
301 &osif_vdev);
302 if (!stack_fn || !osif_vdev) {
Mohit Khannab7194392019-03-25 23:07:13 -0700303 rx_thread->stats.dropped_invalid_os_rx_handles +=
Mohit Khanna70322002018-05-15 19:21:32 -0700304 num_list_elements;
305 qdf_nbuf_list_free(nbuf_list);
Sravan Kumar Kairam0512a842018-09-12 15:27:45 +0530306 goto dequeue_rx_thread;
Mohit Khanna70322002018-05-15 19:21:32 -0700307 }
308 stack_fn(osif_vdev, nbuf_list);
309 rx_thread->stats.nbuf_sent_to_stack += num_list_elements;
310
Sravan Kumar Kairam0512a842018-09-12 15:27:45 +0530311dequeue_rx_thread:
Mohit Khanna70322002018-05-15 19:21:32 -0700312 nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
313 }
314
315 return 0;
316}
317
318/**
319 * dp_rx_thread_sub_loop() - rx thread subloop
320 * @rx_thread - rx_thread to be processed
321 * @shutdown - pointer to shutdown variable
322 *
323 * The function handles shutdown and suspend events from other
324 * threads and processes nbuf queue of a rx thread. In case a
325 * shutdown event is received from some other wlan thread, the
326 * function sets the shutdown pointer to true and returns
327 *
328 * Returns: 0 on success, error code on failure
329 */
330static int dp_rx_thread_sub_loop(struct dp_rx_thread *rx_thread, bool *shutdown)
331{
332 while (true) {
333 if (qdf_atomic_test_and_clear_bit(RX_SHUTDOWN_EVENT,
334 &rx_thread->event_flag)) {
335 if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
336 &rx_thread->event_flag)) {
337 qdf_event_set(&rx_thread->suspend_event);
338 }
339 dp_debug("shutting down (%s) id %d pid %d",
340 qdf_get_current_comm(), rx_thread->id,
341 qdf_get_current_pid());
342 *shutdown = true;
343 break;
344 }
345
346 dp_rx_thread_process_nbufq(rx_thread);
347
348 if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
349 &rx_thread->event_flag)) {
350 dp_debug("received suspend ind (%s) id %d pid %d",
351 qdf_get_current_comm(), rx_thread->id,
352 qdf_get_current_pid());
Mohit Khanna70322002018-05-15 19:21:32 -0700353 qdf_event_reset(&rx_thread->resume_event);
354 qdf_event_set(&rx_thread->suspend_event);
Mohit Khanna70322002018-05-15 19:21:32 -0700355 dp_debug("waiting for resume (%s) id %d pid %d",
356 qdf_get_current_comm(), rx_thread->id,
357 qdf_get_current_pid());
358 qdf_wait_single_event(&rx_thread->resume_event, 0);
359 }
360 break;
361 }
362 return 0;
363}
364
365/**
366 * dp_rx_thread_loop() - main dp rx thread loop
367 * @arg: pointer to dp_rx_thread structure for the rx thread
368 *
369 * Return: thread exit code
370 */
371static int dp_rx_thread_loop(void *arg)
372{
373 struct dp_rx_thread *rx_thread = arg;
374 bool shutdown = false;
375 int status;
376 struct dp_rx_tm_handle_cmn *tm_handle_cmn;
377
378 tm_handle_cmn = rx_thread->rtm_handle_cmn;
379
380 if (!arg) {
381 dp_err("bad Args passed");
382 return 0;
383 }
384
385 qdf_set_user_nice(qdf_get_current_task(), -1);
386 qdf_set_wake_up_idle(true);
387
388 qdf_event_set(&rx_thread->start_event);
389 dp_info("starting rx_thread (%s) id %d pid %d", qdf_get_current_comm(),
390 rx_thread->id, qdf_get_current_pid());
391 while (!shutdown) {
392 /* This implements the execution model algorithm */
393 dp_debug("sleeping");
394 status =
395 qdf_wait_queue_interruptible
396 (DP_RX_THREAD_GET_WAIT_QUEUE_OBJ(tm_handle_cmn),
397 qdf_atomic_test_bit(RX_POST_EVENT,
398 &rx_thread->event_flag) ||
399 qdf_atomic_test_bit(RX_SUSPEND_EVENT,
400 &rx_thread->event_flag));
401 dp_debug("woken up");
402
403 if (status == -ERESTARTSYS) {
Dustin Brown207286b2019-02-22 10:52:33 -0800404 QDF_DEBUG_PANIC("wait_event_interruptible returned -ERESTARTSYS");
Mohit Khanna70322002018-05-15 19:21:32 -0700405 break;
406 }
407 qdf_atomic_clear_bit(RX_POST_EVENT, &rx_thread->event_flag);
408 dp_rx_thread_sub_loop(rx_thread, &shutdown);
409 }
410
411 /* If we get here the scheduler thread must exit */
412 dp_info("exiting (%s) id %d pid %d", qdf_get_current_comm(),
413 rx_thread->id, qdf_get_current_pid());
414 qdf_event_set(&rx_thread->shutdown_event);
415 qdf_exit_thread(QDF_STATUS_SUCCESS);
416
417 return 0;
418}
419
Mohit Khanna81418772018-10-30 14:14:46 -0700420/**
421 * dp_rx_tm_thread_napi_poll() - dummy napi poll for rx_thread NAPI
422 * @napi: pointer to DP rx_thread NAPI
423 * @budget: NAPI BUDGET
424 *
425 * Return: 0 as it is not supposed to be polled at all as it is not scheduled.
426 */
427static int dp_rx_tm_thread_napi_poll(struct napi_struct *napi, int budget)
428{
Dustin Brown207286b2019-02-22 10:52:33 -0800429 QDF_DEBUG_PANIC("this napi_poll should not be polled as we don't schedule it");
430
Mohit Khanna81418772018-10-30 14:14:46 -0700431 return 0;
432}
433
434/**
435 * dp_rx_tm_thread_napi_init() - Initialize dummy rx_thread NAPI
436 * @rx_thread: dp_rx_thread structure containing dummy napi and netdev
437 *
438 * Return: None
439 */
440static void dp_rx_tm_thread_napi_init(struct dp_rx_thread *rx_thread)
441{
442 /* Todo - optimize to use only one dummy netdev for all thread napis */
443 init_dummy_netdev(&rx_thread->netdev);
444 netif_napi_add(&rx_thread->netdev, &rx_thread->napi,
445 dp_rx_tm_thread_napi_poll, 64);
446 napi_enable(&rx_thread->napi);
447}
448
449/**
450 * dp_rx_tm_thread_napi_deinit() - De-initialize dummy rx_thread NAPI
451 * @rx_thread: dp_rx_thread handle containing dummy napi and netdev
452 *
453 * Return: None
454 */
455static void dp_rx_tm_thread_napi_deinit(struct dp_rx_thread *rx_thread)
456{
457 netif_napi_del(&rx_thread->napi);
458}
459
Mohit Khanna70322002018-05-15 19:21:32 -0700460/*
461 * dp_rx_tm_thread_init() - Initialize dp_rx_thread structure and thread
462 *
463 * @rx_thread: dp_rx_thread structure to be initialized
464 * @id: id of the thread to be initialized
465 *
466 * Return: QDF_STATUS on success, QDF error code on failure
467 */
468static QDF_STATUS dp_rx_tm_thread_init(struct dp_rx_thread *rx_thread,
469 uint8_t id)
470{
471 char thread_name[15];
472 QDF_STATUS qdf_status;
473
hangtian127c9532019-01-12 13:29:07 +0800474 qdf_mem_zero(thread_name, sizeof(thread_name));
Mohit Khanna70322002018-05-15 19:21:32 -0700475
476 if (!rx_thread) {
477 dp_err("rx_thread is null!");
478 return QDF_STATUS_E_FAULT;
479 }
480 rx_thread->id = id;
481 rx_thread->event_flag = 0;
482 qdf_nbuf_queue_head_init(&rx_thread->nbuf_queue);
483 qdf_event_create(&rx_thread->start_event);
484 qdf_event_create(&rx_thread->suspend_event);
485 qdf_event_create(&rx_thread->resume_event);
486 qdf_event_create(&rx_thread->shutdown_event);
487 qdf_scnprintf(thread_name, sizeof(thread_name), "dp_rx_thread_%u", id);
488 dp_info("%s %u", thread_name, id);
Mohit Khanna81418772018-10-30 14:14:46 -0700489
490 if (cdp_cfg_get(dp_rx_tm_get_soc_handle(rx_thread->rtm_handle_cmn),
491 cfg_dp_gro_enable))
492 dp_rx_tm_thread_napi_init(rx_thread);
493
Mohit Khanna70322002018-05-15 19:21:32 -0700494 rx_thread->task = qdf_create_thread(dp_rx_thread_loop,
495 rx_thread, thread_name);
496 if (!rx_thread->task) {
497 dp_err("could not create dp_rx_thread %d", id);
498 return QDF_STATUS_E_FAILURE;
499 }
500
501 qdf_wake_up_process(rx_thread->task);
502 qdf_status = qdf_wait_single_event(&rx_thread->start_event, 0);
503
504 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
505 dp_err("failed waiting for thread creation id %d", id);
506 return QDF_STATUS_E_FAILURE;
507 }
508 return QDF_STATUS_SUCCESS;
509}
510
511/*
512 * dp_rx_tm_thread_deinit() - De-Initialize dp_rx_thread structure and thread
513 * @rx_thread: dp_rx_thread structure to be de-initialized
514 * @id: id of the thread to be initialized
515 *
516 * Return: QDF_STATUS_SUCCESS
517 */
518static QDF_STATUS dp_rx_tm_thread_deinit(struct dp_rx_thread *rx_thread)
519{
520 qdf_event_destroy(&rx_thread->start_event);
521 qdf_event_destroy(&rx_thread->suspend_event);
522 qdf_event_destroy(&rx_thread->resume_event);
523 qdf_event_destroy(&rx_thread->shutdown_event);
Mohit Khanna81418772018-10-30 14:14:46 -0700524
525 if (cdp_cfg_get(dp_rx_tm_get_soc_handle(rx_thread->rtm_handle_cmn),
526 cfg_dp_gro_enable))
527 dp_rx_tm_thread_napi_deinit(rx_thread);
528
Mohit Khanna70322002018-05-15 19:21:32 -0700529 return QDF_STATUS_SUCCESS;
530}
531
532QDF_STATUS dp_rx_tm_init(struct dp_rx_tm_handle *rx_tm_hdl,
533 uint8_t num_dp_rx_threads)
534{
535 int i;
536 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Mohit Khanna81418772018-10-30 14:14:46 -0700537
538 if (num_dp_rx_threads > DP_MAX_RX_THREADS) {
539 dp_err("unable to initialize %u number of threads. MAX %u",
540 num_dp_rx_threads, DP_MAX_RX_THREADS);
541 return QDF_STATUS_E_INVAL;
542 }
543
544 rx_tm_hdl->num_dp_rx_threads = num_dp_rx_threads;
545
546 dp_info("initializing %u threads", num_dp_rx_threads);
547
548 /* allocate an array to contain the DP RX thread pointers */
549 rx_tm_hdl->rx_thread = qdf_mem_malloc(num_dp_rx_threads *
550 sizeof(struct dp_rx_thread *));
551
552 if (qdf_unlikely(!rx_tm_hdl->rx_thread)) {
553 qdf_status = QDF_STATUS_E_NOMEM;
554 goto ret;
555 }
556
Mohit Khanna70322002018-05-15 19:21:32 -0700557 qdf_init_waitqueue_head(&rx_tm_hdl->wait_q);
558
Mohit Khanna81418772018-10-30 14:14:46 -0700559 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700560 rx_tm_hdl->rx_thread[i] =
561 (struct dp_rx_thread *)
562 qdf_mem_malloc(sizeof(struct dp_rx_thread));
563 if (qdf_unlikely(!rx_tm_hdl->rx_thread[i])) {
564 QDF_ASSERT(0);
565 qdf_status = QDF_STATUS_E_NOMEM;
Mohit Khanna70322002018-05-15 19:21:32 -0700566 goto ret;
567 }
568 rx_tm_hdl->rx_thread[i]->rtm_handle_cmn =
569 (struct dp_rx_tm_handle_cmn *)rx_tm_hdl;
570 qdf_status =
571 dp_rx_tm_thread_init(rx_tm_hdl->rx_thread[i], i);
572 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
573 break;
574 }
575ret:
576 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
577 dp_rx_tm_deinit(rx_tm_hdl);
578
579 return qdf_status;
580}
581
582/**
583 * dp_rx_tm_resume() - suspend DP RX threads
584 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
585 * infrastructure
586 *
587 * Return: QDF_STATUS_SUCCESS
588 */
589QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_hdl)
590{
591 int i;
592 QDF_STATUS qdf_status;
593 struct dp_rx_thread *rx_thread;
594
Mohit Khanna81418772018-10-30 14:14:46 -0700595 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700596 if (!rx_tm_hdl->rx_thread[i])
597 continue;
598 qdf_set_bit(RX_SUSPEND_EVENT,
599 &rx_tm_hdl->rx_thread[i]->event_flag);
600 }
601
602 qdf_wake_up_interruptible(&rx_tm_hdl->wait_q);
603
Mohit Khanna81418772018-10-30 14:14:46 -0700604 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700605 rx_thread = rx_tm_hdl->rx_thread[i];
606 if (!rx_thread)
607 continue;
608 dp_debug("thread %d", i);
609 qdf_status = qdf_wait_single_event(&rx_thread->suspend_event,
610 DP_RX_THREAD_WAIT_TIMEOUT);
611 if (QDF_IS_STATUS_SUCCESS(qdf_status))
612 dp_debug("thread:%d suspended", rx_thread->id);
613 else if (qdf_status == QDF_STATUS_E_TIMEOUT)
614 dp_err("thread:%d timed out waiting for suspend",
615 rx_thread->id);
616 else
617 dp_err("thread:%d failed while waiting for suspend",
618 rx_thread->id);
619 }
620 rx_tm_hdl->state = DP_RX_THREAD_SUSPENDED;
621
622 return QDF_STATUS_SUCCESS;
623}
624
625/**
626 * dp_rx_tm_resume() - resume DP RX threads
627 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
628 * infrastructure
629 *
630 * Return: QDF_STATUS_SUCCESS
631 */
632QDF_STATUS dp_rx_tm_resume(struct dp_rx_tm_handle *rx_tm_hdl)
633{
634 int i;
635
636 if (rx_tm_hdl->state != DP_RX_THREAD_SUSPENDED) {
637 dp_err("resume callback received without suspend");
638 return QDF_STATUS_E_FAULT;
639 }
640
Mohit Khanna81418772018-10-30 14:14:46 -0700641 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700642 if (!rx_tm_hdl->rx_thread[i])
643 continue;
644 dp_debug("calling thread %d to resume", i);
645 qdf_event_set(&rx_tm_hdl->rx_thread[i]->resume_event);
646 }
647
648 return QDF_STATUS_SUCCESS;
649}
650
651/**
652 * dp_rx_tm_shutdown() - shutdown all DP RX threads
653 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
654 *
655 * Return: QDF_STATUS_SUCCESS
656 */
657static QDF_STATUS dp_rx_tm_shutdown(struct dp_rx_tm_handle *rx_tm_hdl)
658{
659 int i;
660
Mohit Khanna81418772018-10-30 14:14:46 -0700661 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700662 if (!rx_tm_hdl->rx_thread[i])
663 continue;
664 qdf_set_bit(RX_SHUTDOWN_EVENT,
665 &rx_tm_hdl->rx_thread[i]->event_flag);
666 qdf_set_bit(RX_POST_EVENT,
667 &rx_tm_hdl->rx_thread[i]->event_flag);
668 }
669
670 qdf_wake_up_interruptible(&rx_tm_hdl->wait_q);
671
Mohit Khanna81418772018-10-30 14:14:46 -0700672 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700673 if (!rx_tm_hdl->rx_thread[i])
674 continue;
675 dp_debug("waiting for shutdown of thread %d", i);
676 qdf_wait_single_event(&rx_tm_hdl->rx_thread[i]->shutdown_event,
677 0);
678 }
679 return QDF_STATUS_SUCCESS;
680}
681
682/**
683 * dp_rx_tm_deinit() - de-initialize RX thread infrastructure
684 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
685 * infrastructure
686 *
687 * Return: QDF_STATUS_SUCCESS
688 */
689QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl)
690{
691 int i = 0;
Mohit Khanna81418772018-10-30 14:14:46 -0700692 if (!rx_tm_hdl->rx_thread) {
693 dp_err("rx_tm_hdl->rx_thread not initialized!");
694 return QDF_STATUS_SUCCESS;
695 }
Mohit Khanna70322002018-05-15 19:21:32 -0700696
697 dp_rx_tm_shutdown(rx_tm_hdl);
698
Mohit Khanna81418772018-10-30 14:14:46 -0700699 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700700 if (!rx_tm_hdl->rx_thread[i])
701 continue;
702 dp_rx_tm_thread_deinit(rx_tm_hdl->rx_thread[i]);
703 qdf_mem_free(rx_tm_hdl->rx_thread[i]);
704 }
Mohit Khanna81418772018-10-30 14:14:46 -0700705
706 /* free the array of RX thread pointers*/
707 qdf_mem_free(rx_tm_hdl->rx_thread);
708 rx_tm_hdl->rx_thread = NULL;
709
Mohit Khanna70322002018-05-15 19:21:32 -0700710 return QDF_STATUS_SUCCESS;
711}
712
713/**
714 * dp_rx_tm_select_thread() - select a DP RX thread for a nbuf
715 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
716 * infrastructure
717 * @nbuf_list: list of nbufs to be enqueued in to the thread
718 *
719 * The function relies on the presence of QDF_NBUF_CB_RX_CTX_ID
720 * in the nbuf list. Depending on the RX_CTX (copy engine or reo
721 * ring) on which the packet was received, the function selects
722 * a corresponding rx_thread.
Mohit Khanna70322002018-05-15 19:21:32 -0700723 *
724 * Return: rx thread ID selected for the nbuf
725 */
726static uint8_t dp_rx_tm_select_thread(struct dp_rx_tm_handle *rx_tm_hdl,
727 qdf_nbuf_t nbuf_list)
728{
729 uint8_t selected_rx_thread;
730 uint8_t reo_ring_num = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
731
Mohit Khanna81418772018-10-30 14:14:46 -0700732 if (reo_ring_num >= rx_tm_hdl->num_dp_rx_threads) {
733 dp_err_rl("unexpected ring number");
734 QDF_BUG(0);
735 return 0;
736 }
Mohit Khanna70322002018-05-15 19:21:32 -0700737
Mohit Khanna81418772018-10-30 14:14:46 -0700738 selected_rx_thread = reo_ring_num;
Mohit Khanna70322002018-05-15 19:21:32 -0700739 return selected_rx_thread;
740}
741
742QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
743 qdf_nbuf_t nbuf_list)
744{
745 uint8_t selected_thread_id;
746
747 selected_thread_id = dp_rx_tm_select_thread(rx_tm_hdl, nbuf_list);
Mohit Khanna70322002018-05-15 19:21:32 -0700748 dp_rx_tm_thread_enqueue(rx_tm_hdl->rx_thread[selected_thread_id],
749 nbuf_list);
750 return QDF_STATUS_SUCCESS;
751}
752
Mohit Khanna81418772018-10-30 14:14:46 -0700753struct napi_struct *dp_rx_tm_get_napi_context(struct dp_rx_tm_handle *rx_tm_hdl,
754 uint8_t rx_ctx_id)
755{
756 if (rx_ctx_id >= rx_tm_hdl->num_dp_rx_threads) {
757 dp_err_rl("unexpected rx_ctx_id %u", rx_ctx_id);
758 QDF_BUG(0);
759 return NULL;
760 }
761
762 return &rx_tm_hdl->rx_thread[rx_ctx_id]->napi;
763}
Mohit Khannac77d69d2019-06-18 20:00:50 -0700764
765QDF_STATUS dp_rx_tm_set_cpu_mask(struct dp_rx_tm_handle *rx_tm_hdl,
766 qdf_cpu_mask *new_mask)
767{
768 int i = 0;
769
770 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
771 if (!rx_tm_hdl->rx_thread[i])
772 continue;
773 qdf_thread_set_cpus_allowed_mask(rx_tm_hdl->rx_thread[i]->task,
774 new_mask);
775 }
776 return QDF_STATUS_SUCCESS;
777}