blob: 9fb9c6327400ddd1134e24abbe6dc1b21d524f54 [file] [log] [blame]
Mohit Khanna70322002018-05-15 19:21:32 -07001/*
Mohit Khannac9649652018-11-28 18:10:28 -08002 * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
Mohit Khanna70322002018-05-15 19:21:32 -07003 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <dp_txrx.h>
20#include <cdp_txrx_cmn_struct.h>
21#include <cdp_txrx_peer_ops.h>
22#include <cds_sched.h>
23
24/* Timeout in ms to wait for a DP rx thread */
25#define DP_RX_THREAD_WAIT_TIMEOUT 200
26
27#define DP_RX_TM_DEBUG 0
28#if DP_RX_TM_DEBUG
29/**
30 * dp_rx_tm_walk_skb_list() - Walk skb list and print members
31 * @nbuf_list - nbuf list to print
32 *
33 * Returns: None
34 */
35static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
36{
37 qdf_nbuf_t nbuf;
38 int i = 0;
39
40 nbuf = nbuf_list;
41 while (nbuf) {
42 dp_debug("%d nbuf:%pk nbuf->next:%pK nbuf->data:%pk ", i,
43 nbuf, qdf_nbuf_next(nbuf), qdf_nbuf_data(nbuf));
44 nbuf = qdf_nbuf_next(nbuf);
45 i++;
46 }
47}
48#else
49static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
50{ }
51#endif /* DP_RX_TM_DEBUG */
52
53/**
Mohit Khanna81418772018-10-30 14:14:46 -070054 * dp_rx_tm_get_soc_handle() - get soc handle from struct dp_rx_tm_handle_cmn
55 * @rx_tm_handle_cmn - rx thread manager cmn handle
56 *
57 * Returns: ol_txrx_soc_handle on success, NULL on failure.
58 */
59static inline
60ol_txrx_soc_handle dp_rx_tm_get_soc_handle(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn)
61{
62 struct dp_txrx_handle_cmn *txrx_handle_cmn;
63 ol_txrx_soc_handle soc;
64
65 txrx_handle_cmn =
66 dp_rx_thread_get_txrx_handle(rx_tm_handle_cmn);
67
68 soc = dp_txrx_get_soc_from_ext_handle(txrx_handle_cmn);
69 return soc;
70}
71
72/**
Mohit Khanna70322002018-05-15 19:21:32 -070073 * dp_rx_tm_thread_dump_stats() - display stats for a rx_thread
74 * @rx_thread - rx_thread pointer for which the stats need to be
75 * displayed
76 *
77 * Returns: None
78 */
79static void dp_rx_tm_thread_dump_stats(struct dp_rx_thread *rx_thread)
80{
81 uint8_t reo_ring_num;
82 uint32_t off = 0;
83 char nbuf_queued_string[100];
84 uint32_t total_queued = 0;
85 uint32_t temp = 0;
86
87 qdf_mem_set(nbuf_queued_string, 0, sizeof(nbuf_queued_string));
88
89 for (reo_ring_num = 0; reo_ring_num < DP_RX_TM_MAX_REO_RINGS;
90 reo_ring_num++) {
91 temp = rx_thread->stats.nbuf_queued[reo_ring_num];
92 if (!temp)
93 continue;
94 total_queued += temp;
95 if (off >= sizeof(nbuf_queued_string))
96 continue;
97 off += qdf_scnprintf(&nbuf_queued_string[off],
98 sizeof(nbuf_queued_string) - off,
99 "reo[%u]:%u ", reo_ring_num, temp);
100 }
Mohit Khannac9649652018-11-28 18:10:28 -0800101
102 if (!total_queued)
103 return;
104
Mohit Khanna70322002018-05-15 19:21:32 -0700105 dp_info("thread:%u - qlen:%u queued:(total:%u %s) dequeued:%u stack:%u max_len:%u invalid(peer:%u vdev:%u others:%u)",
106 rx_thread->id,
107 qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue),
108 total_queued,
109 nbuf_queued_string,
110 rx_thread->stats.nbuf_dequeued,
111 rx_thread->stats.nbuf_sent_to_stack,
112 rx_thread->stats.nbufq_max_len,
113 rx_thread->stats.dropped_invalid_peer,
114 rx_thread->stats.dropped_invalid_vdev,
115 rx_thread->stats.dropped_others);
116}
117
118QDF_STATUS dp_rx_tm_dump_stats(struct dp_rx_tm_handle *rx_tm_hdl)
119{
120 int i;
121
Mohit Khanna81418772018-10-30 14:14:46 -0700122 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700123 if (!rx_tm_hdl->rx_thread[i])
124 continue;
125 dp_rx_tm_thread_dump_stats(rx_tm_hdl->rx_thread[i]);
126 }
127 return QDF_STATUS_SUCCESS;
128}
129
130/**
131 * dp_rx_tm_thread_enqueue() - enqueue nbuf list into rx_thread
132 * @rx_thread - rx_thread in which the nbuf needs to be queued
133 * @nbuf_list - list of packets to be queued into the thread
134 *
135 * Enqueue packet into rx_thread and wake it up. The function
136 * moves the next pointer of the nbuf_list into the ext list of
137 * the first nbuf for storage into the thread. Only the first
138 * nbuf is queued into the thread nbuf queue. The reverse is
139 * done at the time of dequeue.
140 *
141 * Returns: QDF_STATUS_SUCCESS on success or qdf error code on
142 * failure
143 */
144static QDF_STATUS dp_rx_tm_thread_enqueue(struct dp_rx_thread *rx_thread,
145 qdf_nbuf_t nbuf_list)
146{
147 qdf_nbuf_t head_ptr, next_ptr_list;
148 uint32_t temp_qlen;
149 uint32_t num_elements_in_nbuf;
150 struct dp_rx_tm_handle_cmn *tm_handle_cmn;
151 uint8_t reo_ring_num = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
152 qdf_wait_queue_head_t *wait_q_ptr;
153
154 tm_handle_cmn = rx_thread->rtm_handle_cmn;
155
156 if (!tm_handle_cmn) {
157 dp_alert("tm_handle_cmn is null!");
158 QDF_BUG(0);
159 return QDF_STATUS_E_FAILURE;
160 }
161
162 wait_q_ptr = dp_rx_thread_get_wait_queue(tm_handle_cmn);
163
164 if (reo_ring_num >= DP_RX_TM_MAX_REO_RINGS) {
165 dp_alert("incorrect ring %u", reo_ring_num);
166 QDF_BUG(0);
167 return QDF_STATUS_E_FAILURE;
168 }
169
170 num_elements_in_nbuf = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
171
172 dp_rx_tm_walk_skb_list(nbuf_list);
173
174 head_ptr = nbuf_list;
Pramod Simha7ee7b532018-08-20 15:46:34 -0700175
176 /* Ensure head doesn't have an ext list */
177 while (qdf_unlikely(head_ptr && qdf_nbuf_get_ext_list(head_ptr))) {
178 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head_ptr) = 1;
179 num_elements_in_nbuf--;
180 next_ptr_list = head_ptr->next;
181 qdf_nbuf_set_next(head_ptr, NULL);
182 qdf_nbuf_queue_head_enqueue_tail(&rx_thread->nbuf_queue,
183 head_ptr);
184 head_ptr = next_ptr_list;
185 }
186
187 if (!head_ptr)
188 goto enq_done;
189
Mohit Khanna70322002018-05-15 19:21:32 -0700190 next_ptr_list = head_ptr->next;
191
192 if (next_ptr_list) {
193 /* move ->next pointer to ext list */
194 qdf_nbuf_append_ext_list(head_ptr, next_ptr_list, 0);
195 dp_debug("appended next_ptr_list %pK to nbuf %pK ext list %pK",
196 qdf_nbuf_next(nbuf_list), nbuf_list,
197 qdf_nbuf_get_ext_list(nbuf_list));
198 }
199 qdf_nbuf_set_next(head_ptr, NULL);
200
201 qdf_nbuf_queue_head_enqueue_tail(&rx_thread->nbuf_queue, head_ptr);
Pramod Simha7ee7b532018-08-20 15:46:34 -0700202
203enq_done:
Mohit Khanna70322002018-05-15 19:21:32 -0700204 temp_qlen = qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue);
205
206 rx_thread->stats.nbuf_queued[reo_ring_num] += num_elements_in_nbuf;
207
208 if (temp_qlen > rx_thread->stats.nbufq_max_len)
209 rx_thread->stats.nbufq_max_len = temp_qlen;
210
211 qdf_set_bit(RX_POST_EVENT, &rx_thread->event_flag);
212 qdf_wake_up_interruptible(wait_q_ptr);
213
214 return QDF_STATUS_SUCCESS;
215}
216
217/**
218 * dp_rx_tm_thread_dequeue() - dequeue nbuf list from rx_thread
219 * @rx_thread - rx_thread from which the nbuf needs to be dequeued
220 *
221 * Returns: nbuf or nbuf_list dequeued from rx_thread
222 */
223static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
224{
225 qdf_nbuf_t head, next_ptr_list, nbuf_list;
226
227 head = qdf_nbuf_queue_head_dequeue(&rx_thread->nbuf_queue);
228 nbuf_list = head;
Pramod Simha7ee7b532018-08-20 15:46:34 -0700229 if (head && QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) > 1) {
Mohit Khanna70322002018-05-15 19:21:32 -0700230 /* move ext list to ->next pointer */
231 next_ptr_list = qdf_nbuf_get_ext_list(head);
232 qdf_nbuf_append_ext_list(head, NULL, 0);
233 qdf_nbuf_set_next(nbuf_list, next_ptr_list);
234 dp_rx_tm_walk_skb_list(nbuf_list);
235 }
236 return nbuf_list;
237}
238
239/**
240 * dp_rx_thread_process_nbufq() - process nbuf queue of a thread
241 * @rx_thread - rx_thread whose nbuf queue needs to be processed
242 *
243 * Returns: 0 on success, error code on failure
244 */
245static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
246{
247 qdf_nbuf_t nbuf_list;
248 uint32_t peer_local_id;
249 void *peer;
250 struct cdp_vdev *vdev;
251 ol_txrx_rx_fp stack_fn;
252 ol_osif_vdev_handle osif_vdev;
253 ol_txrx_soc_handle soc;
254 uint32_t num_list_elements = 0;
255 struct cdp_pdev *pdev;
256
257 struct dp_txrx_handle_cmn *txrx_handle_cmn;
258
259 txrx_handle_cmn =
260 dp_rx_thread_get_txrx_handle(rx_thread->rtm_handle_cmn);
261
262 soc = dp_txrx_get_soc_from_ext_handle(txrx_handle_cmn);
263 pdev = dp_txrx_get_pdev_from_ext_handle(txrx_handle_cmn);
264
265 if (!soc || !pdev) {
266 dp_err("invalid soc or pdev!");
267 QDF_BUG(0);
268 return -EFAULT;
269 }
270
271 nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
272 while (nbuf_list) {
273 num_list_elements =
274 QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
275 rx_thread->stats.nbuf_dequeued += num_list_elements;
276
277 peer_local_id = QDF_NBUF_CB_RX_PEER_LOCAL_ID(nbuf_list);
278 peer = cdp_peer_find_by_local_id(soc, pdev, peer_local_id);
279
280 if (!peer) {
281 rx_thread->stats.dropped_invalid_peer +=
282 num_list_elements;
283 dp_err("peer not found for local_id %u!",
284 peer_local_id);
285 qdf_nbuf_list_free(nbuf_list);
Sravan Kumar Kairam0512a842018-09-12 15:27:45 +0530286 goto dequeue_rx_thread;
Mohit Khanna70322002018-05-15 19:21:32 -0700287 }
288
289 vdev = cdp_peer_get_vdev(soc, peer);
290 if (!vdev) {
291 rx_thread->stats.dropped_invalid_vdev +=
292 num_list_elements;
293 dp_err("vdev not found for local_id %u!, pkt dropped",
294 peer_local_id);
295 qdf_nbuf_list_free(nbuf_list);
Sravan Kumar Kairam0512a842018-09-12 15:27:45 +0530296 goto dequeue_rx_thread;
Mohit Khanna70322002018-05-15 19:21:32 -0700297 }
298
299 cdp_get_os_rx_handles_from_vdev(soc, vdev, &stack_fn,
300 &osif_vdev);
301 if (!stack_fn || !osif_vdev) {
302 dp_alert("stack_fn or osif_vdev is null, pkt dropped!");
303 QDF_BUG(0);
304 rx_thread->stats.dropped_others +=
305 num_list_elements;
306 qdf_nbuf_list_free(nbuf_list);
Sravan Kumar Kairam0512a842018-09-12 15:27:45 +0530307 goto dequeue_rx_thread;
Mohit Khanna70322002018-05-15 19:21:32 -0700308 }
309 stack_fn(osif_vdev, nbuf_list);
310 rx_thread->stats.nbuf_sent_to_stack += num_list_elements;
311
Sravan Kumar Kairam0512a842018-09-12 15:27:45 +0530312dequeue_rx_thread:
Mohit Khanna70322002018-05-15 19:21:32 -0700313 nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
314 }
315
316 return 0;
317}
318
319/**
320 * dp_rx_thread_sub_loop() - rx thread subloop
321 * @rx_thread - rx_thread to be processed
322 * @shutdown - pointer to shutdown variable
323 *
324 * The function handles shutdown and suspend events from other
325 * threads and processes nbuf queue of a rx thread. In case a
326 * shutdown event is received from some other wlan thread, the
327 * function sets the shutdown pointer to true and returns
328 *
329 * Returns: 0 on success, error code on failure
330 */
331static int dp_rx_thread_sub_loop(struct dp_rx_thread *rx_thread, bool *shutdown)
332{
333 while (true) {
334 if (qdf_atomic_test_and_clear_bit(RX_SHUTDOWN_EVENT,
335 &rx_thread->event_flag)) {
336 if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
337 &rx_thread->event_flag)) {
338 qdf_event_set(&rx_thread->suspend_event);
339 }
340 dp_debug("shutting down (%s) id %d pid %d",
341 qdf_get_current_comm(), rx_thread->id,
342 qdf_get_current_pid());
343 *shutdown = true;
344 break;
345 }
346
347 dp_rx_thread_process_nbufq(rx_thread);
348
349 if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
350 &rx_thread->event_flag)) {
351 dp_debug("received suspend ind (%s) id %d pid %d",
352 qdf_get_current_comm(), rx_thread->id,
353 qdf_get_current_pid());
Mohit Khanna70322002018-05-15 19:21:32 -0700354 qdf_event_reset(&rx_thread->resume_event);
355 qdf_event_set(&rx_thread->suspend_event);
Mohit Khanna70322002018-05-15 19:21:32 -0700356 dp_debug("waiting for resume (%s) id %d pid %d",
357 qdf_get_current_comm(), rx_thread->id,
358 qdf_get_current_pid());
359 qdf_wait_single_event(&rx_thread->resume_event, 0);
360 }
361 break;
362 }
363 return 0;
364}
365
366/**
367 * dp_rx_thread_loop() - main dp rx thread loop
368 * @arg: pointer to dp_rx_thread structure for the rx thread
369 *
370 * Return: thread exit code
371 */
372static int dp_rx_thread_loop(void *arg)
373{
374 struct dp_rx_thread *rx_thread = arg;
375 bool shutdown = false;
376 int status;
377 struct dp_rx_tm_handle_cmn *tm_handle_cmn;
378
379 tm_handle_cmn = rx_thread->rtm_handle_cmn;
380
381 if (!arg) {
382 dp_err("bad Args passed");
383 return 0;
384 }
385
386 qdf_set_user_nice(qdf_get_current_task(), -1);
387 qdf_set_wake_up_idle(true);
388
389 qdf_event_set(&rx_thread->start_event);
390 dp_info("starting rx_thread (%s) id %d pid %d", qdf_get_current_comm(),
391 rx_thread->id, qdf_get_current_pid());
392 while (!shutdown) {
393 /* This implements the execution model algorithm */
394 dp_debug("sleeping");
395 status =
396 qdf_wait_queue_interruptible
397 (DP_RX_THREAD_GET_WAIT_QUEUE_OBJ(tm_handle_cmn),
398 qdf_atomic_test_bit(RX_POST_EVENT,
399 &rx_thread->event_flag) ||
400 qdf_atomic_test_bit(RX_SUSPEND_EVENT,
401 &rx_thread->event_flag));
402 dp_debug("woken up");
403
404 if (status == -ERESTARTSYS) {
405 dp_err("wait_event_interruptible returned -ERESTARTSYS");
406 QDF_DEBUG_PANIC();
407 break;
408 }
409 qdf_atomic_clear_bit(RX_POST_EVENT, &rx_thread->event_flag);
410 dp_rx_thread_sub_loop(rx_thread, &shutdown);
411 }
412
413 /* If we get here the scheduler thread must exit */
414 dp_info("exiting (%s) id %d pid %d", qdf_get_current_comm(),
415 rx_thread->id, qdf_get_current_pid());
416 qdf_event_set(&rx_thread->shutdown_event);
417 qdf_exit_thread(QDF_STATUS_SUCCESS);
418
419 return 0;
420}
421
Mohit Khanna81418772018-10-30 14:14:46 -0700422/**
423 * dp_rx_tm_thread_napi_poll() - dummy napi poll for rx_thread NAPI
424 * @napi: pointer to DP rx_thread NAPI
425 * @budget: NAPI BUDGET
426 *
427 * Return: 0 as it is not supposed to be polled at all as it is not scheduled.
428 */
429static int dp_rx_tm_thread_napi_poll(struct napi_struct *napi, int budget)
430{
431 dp_err("this napi_poll should not be polled as we don't schedule it");
432 QDF_BUG(0);
433 return 0;
434}
435
436/**
437 * dp_rx_tm_thread_napi_init() - Initialize dummy rx_thread NAPI
438 * @rx_thread: dp_rx_thread structure containing dummy napi and netdev
439 *
440 * Return: None
441 */
442static void dp_rx_tm_thread_napi_init(struct dp_rx_thread *rx_thread)
443{
444 /* Todo - optimize to use only one dummy netdev for all thread napis */
445 init_dummy_netdev(&rx_thread->netdev);
446 netif_napi_add(&rx_thread->netdev, &rx_thread->napi,
447 dp_rx_tm_thread_napi_poll, 64);
448 napi_enable(&rx_thread->napi);
449}
450
451/**
452 * dp_rx_tm_thread_napi_deinit() - De-initialize dummy rx_thread NAPI
453 * @rx_thread: dp_rx_thread handle containing dummy napi and netdev
454 *
455 * Return: None
456 */
457static void dp_rx_tm_thread_napi_deinit(struct dp_rx_thread *rx_thread)
458{
459 netif_napi_del(&rx_thread->napi);
460}
461
Mohit Khanna70322002018-05-15 19:21:32 -0700462/*
463 * dp_rx_tm_thread_init() - Initialize dp_rx_thread structure and thread
464 *
465 * @rx_thread: dp_rx_thread structure to be initialized
466 * @id: id of the thread to be initialized
467 *
468 * Return: QDF_STATUS on success, QDF error code on failure
469 */
470static QDF_STATUS dp_rx_tm_thread_init(struct dp_rx_thread *rx_thread,
471 uint8_t id)
472{
473 char thread_name[15];
474 QDF_STATUS qdf_status;
475
476 qdf_mem_set(thread_name, 0, sizeof(thread_name));
477
478 if (!rx_thread) {
479 dp_err("rx_thread is null!");
480 return QDF_STATUS_E_FAULT;
481 }
482 rx_thread->id = id;
483 rx_thread->event_flag = 0;
484 qdf_nbuf_queue_head_init(&rx_thread->nbuf_queue);
485 qdf_event_create(&rx_thread->start_event);
486 qdf_event_create(&rx_thread->suspend_event);
487 qdf_event_create(&rx_thread->resume_event);
488 qdf_event_create(&rx_thread->shutdown_event);
489 qdf_scnprintf(thread_name, sizeof(thread_name), "dp_rx_thread_%u", id);
490 dp_info("%s %u", thread_name, id);
Mohit Khanna81418772018-10-30 14:14:46 -0700491
492 if (cdp_cfg_get(dp_rx_tm_get_soc_handle(rx_thread->rtm_handle_cmn),
493 cfg_dp_gro_enable))
494 dp_rx_tm_thread_napi_init(rx_thread);
495
Mohit Khanna70322002018-05-15 19:21:32 -0700496 rx_thread->task = qdf_create_thread(dp_rx_thread_loop,
497 rx_thread, thread_name);
498 if (!rx_thread->task) {
499 dp_err("could not create dp_rx_thread %d", id);
500 return QDF_STATUS_E_FAILURE;
501 }
502
503 qdf_wake_up_process(rx_thread->task);
504 qdf_status = qdf_wait_single_event(&rx_thread->start_event, 0);
505
506 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
507 dp_err("failed waiting for thread creation id %d", id);
508 return QDF_STATUS_E_FAILURE;
509 }
510 return QDF_STATUS_SUCCESS;
511}
512
513/*
514 * dp_rx_tm_thread_deinit() - De-Initialize dp_rx_thread structure and thread
515 * @rx_thread: dp_rx_thread structure to be de-initialized
516 * @id: id of the thread to be initialized
517 *
518 * Return: QDF_STATUS_SUCCESS
519 */
520static QDF_STATUS dp_rx_tm_thread_deinit(struct dp_rx_thread *rx_thread)
521{
522 qdf_event_destroy(&rx_thread->start_event);
523 qdf_event_destroy(&rx_thread->suspend_event);
524 qdf_event_destroy(&rx_thread->resume_event);
525 qdf_event_destroy(&rx_thread->shutdown_event);
Mohit Khanna81418772018-10-30 14:14:46 -0700526
527 if (cdp_cfg_get(dp_rx_tm_get_soc_handle(rx_thread->rtm_handle_cmn),
528 cfg_dp_gro_enable))
529 dp_rx_tm_thread_napi_deinit(rx_thread);
530
Mohit Khanna70322002018-05-15 19:21:32 -0700531 return QDF_STATUS_SUCCESS;
532}
533
534QDF_STATUS dp_rx_tm_init(struct dp_rx_tm_handle *rx_tm_hdl,
535 uint8_t num_dp_rx_threads)
536{
537 int i;
538 QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
Mohit Khanna81418772018-10-30 14:14:46 -0700539
540 if (num_dp_rx_threads > DP_MAX_RX_THREADS) {
541 dp_err("unable to initialize %u number of threads. MAX %u",
542 num_dp_rx_threads, DP_MAX_RX_THREADS);
543 return QDF_STATUS_E_INVAL;
544 }
545
546 rx_tm_hdl->num_dp_rx_threads = num_dp_rx_threads;
547
548 dp_info("initializing %u threads", num_dp_rx_threads);
549
550 /* allocate an array to contain the DP RX thread pointers */
551 rx_tm_hdl->rx_thread = qdf_mem_malloc(num_dp_rx_threads *
552 sizeof(struct dp_rx_thread *));
553
554 if (qdf_unlikely(!rx_tm_hdl->rx_thread)) {
555 qdf_status = QDF_STATUS_E_NOMEM;
556 goto ret;
557 }
558
Mohit Khanna70322002018-05-15 19:21:32 -0700559 qdf_init_waitqueue_head(&rx_tm_hdl->wait_q);
560
Mohit Khanna81418772018-10-30 14:14:46 -0700561 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700562 rx_tm_hdl->rx_thread[i] =
563 (struct dp_rx_thread *)
564 qdf_mem_malloc(sizeof(struct dp_rx_thread));
565 if (qdf_unlikely(!rx_tm_hdl->rx_thread[i])) {
566 QDF_ASSERT(0);
567 qdf_status = QDF_STATUS_E_NOMEM;
Mohit Khanna70322002018-05-15 19:21:32 -0700568 goto ret;
569 }
570 rx_tm_hdl->rx_thread[i]->rtm_handle_cmn =
571 (struct dp_rx_tm_handle_cmn *)rx_tm_hdl;
572 qdf_status =
573 dp_rx_tm_thread_init(rx_tm_hdl->rx_thread[i], i);
574 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
575 break;
576 }
577ret:
578 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
579 dp_rx_tm_deinit(rx_tm_hdl);
580
581 return qdf_status;
582}
583
584/**
585 * dp_rx_tm_resume() - suspend DP RX threads
586 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
587 * infrastructure
588 *
589 * Return: QDF_STATUS_SUCCESS
590 */
591QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_hdl)
592{
593 int i;
594 QDF_STATUS qdf_status;
595 struct dp_rx_thread *rx_thread;
596
Mohit Khanna81418772018-10-30 14:14:46 -0700597 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700598 if (!rx_tm_hdl->rx_thread[i])
599 continue;
600 qdf_set_bit(RX_SUSPEND_EVENT,
601 &rx_tm_hdl->rx_thread[i]->event_flag);
602 }
603
604 qdf_wake_up_interruptible(&rx_tm_hdl->wait_q);
605
Mohit Khanna81418772018-10-30 14:14:46 -0700606 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700607 rx_thread = rx_tm_hdl->rx_thread[i];
608 if (!rx_thread)
609 continue;
610 dp_debug("thread %d", i);
611 qdf_status = qdf_wait_single_event(&rx_thread->suspend_event,
612 DP_RX_THREAD_WAIT_TIMEOUT);
613 if (QDF_IS_STATUS_SUCCESS(qdf_status))
614 dp_debug("thread:%d suspended", rx_thread->id);
615 else if (qdf_status == QDF_STATUS_E_TIMEOUT)
616 dp_err("thread:%d timed out waiting for suspend",
617 rx_thread->id);
618 else
619 dp_err("thread:%d failed while waiting for suspend",
620 rx_thread->id);
621 }
622 rx_tm_hdl->state = DP_RX_THREAD_SUSPENDED;
623
624 return QDF_STATUS_SUCCESS;
625}
626
627/**
628 * dp_rx_tm_resume() - resume DP RX threads
629 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
630 * infrastructure
631 *
632 * Return: QDF_STATUS_SUCCESS
633 */
634QDF_STATUS dp_rx_tm_resume(struct dp_rx_tm_handle *rx_tm_hdl)
635{
636 int i;
637
638 if (rx_tm_hdl->state != DP_RX_THREAD_SUSPENDED) {
639 dp_err("resume callback received without suspend");
640 return QDF_STATUS_E_FAULT;
641 }
642
Mohit Khanna81418772018-10-30 14:14:46 -0700643 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700644 if (!rx_tm_hdl->rx_thread[i])
645 continue;
646 dp_debug("calling thread %d to resume", i);
647 qdf_event_set(&rx_tm_hdl->rx_thread[i]->resume_event);
648 }
649
650 return QDF_STATUS_SUCCESS;
651}
652
653/**
654 * dp_rx_tm_shutdown() - shutdown all DP RX threads
655 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
656 *
657 * Return: QDF_STATUS_SUCCESS
658 */
659static QDF_STATUS dp_rx_tm_shutdown(struct dp_rx_tm_handle *rx_tm_hdl)
660{
661 int i;
662
Mohit Khanna81418772018-10-30 14:14:46 -0700663 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700664 if (!rx_tm_hdl->rx_thread[i])
665 continue;
666 qdf_set_bit(RX_SHUTDOWN_EVENT,
667 &rx_tm_hdl->rx_thread[i]->event_flag);
668 qdf_set_bit(RX_POST_EVENT,
669 &rx_tm_hdl->rx_thread[i]->event_flag);
670 }
671
672 qdf_wake_up_interruptible(&rx_tm_hdl->wait_q);
673
Mohit Khanna81418772018-10-30 14:14:46 -0700674 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700675 if (!rx_tm_hdl->rx_thread[i])
676 continue;
677 dp_debug("waiting for shutdown of thread %d", i);
678 qdf_wait_single_event(&rx_tm_hdl->rx_thread[i]->shutdown_event,
679 0);
680 }
681 return QDF_STATUS_SUCCESS;
682}
683
684/**
685 * dp_rx_tm_deinit() - de-initialize RX thread infrastructure
686 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
687 * infrastructure
688 *
689 * Return: QDF_STATUS_SUCCESS
690 */
691QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl)
692{
693 int i = 0;
Mohit Khanna81418772018-10-30 14:14:46 -0700694 if (!rx_tm_hdl->rx_thread) {
695 dp_err("rx_tm_hdl->rx_thread not initialized!");
696 return QDF_STATUS_SUCCESS;
697 }
Mohit Khanna70322002018-05-15 19:21:32 -0700698
699 dp_rx_tm_shutdown(rx_tm_hdl);
700
Mohit Khanna81418772018-10-30 14:14:46 -0700701 for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
Mohit Khanna70322002018-05-15 19:21:32 -0700702 if (!rx_tm_hdl->rx_thread[i])
703 continue;
704 dp_rx_tm_thread_deinit(rx_tm_hdl->rx_thread[i]);
705 qdf_mem_free(rx_tm_hdl->rx_thread[i]);
706 }
Mohit Khanna81418772018-10-30 14:14:46 -0700707
708 /* free the array of RX thread pointers*/
709 qdf_mem_free(rx_tm_hdl->rx_thread);
710 rx_tm_hdl->rx_thread = NULL;
711
Mohit Khanna70322002018-05-15 19:21:32 -0700712 return QDF_STATUS_SUCCESS;
713}
714
715/**
716 * dp_rx_tm_select_thread() - select a DP RX thread for a nbuf
717 * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
718 * infrastructure
719 * @nbuf_list: list of nbufs to be enqueued in to the thread
720 *
721 * The function relies on the presence of QDF_NBUF_CB_RX_CTX_ID
722 * in the nbuf list. Depending on the RX_CTX (copy engine or reo
723 * ring) on which the packet was received, the function selects
724 * a corresponding rx_thread.
Mohit Khanna70322002018-05-15 19:21:32 -0700725 *
726 * Return: rx thread ID selected for the nbuf
727 */
728static uint8_t dp_rx_tm_select_thread(struct dp_rx_tm_handle *rx_tm_hdl,
729 qdf_nbuf_t nbuf_list)
730{
731 uint8_t selected_rx_thread;
732 uint8_t reo_ring_num = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
733
Mohit Khanna81418772018-10-30 14:14:46 -0700734 if (reo_ring_num >= rx_tm_hdl->num_dp_rx_threads) {
735 dp_err_rl("unexpected ring number");
736 QDF_BUG(0);
737 return 0;
738 }
Mohit Khanna70322002018-05-15 19:21:32 -0700739
Mohit Khanna81418772018-10-30 14:14:46 -0700740 selected_rx_thread = reo_ring_num;
Mohit Khanna70322002018-05-15 19:21:32 -0700741 return selected_rx_thread;
742}
743
744QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
745 qdf_nbuf_t nbuf_list)
746{
747 uint8_t selected_thread_id;
748
749 selected_thread_id = dp_rx_tm_select_thread(rx_tm_hdl, nbuf_list);
Mohit Khanna70322002018-05-15 19:21:32 -0700750 dp_rx_tm_thread_enqueue(rx_tm_hdl->rx_thread[selected_thread_id],
751 nbuf_list);
752 return QDF_STATUS_SUCCESS;
753}
754
Mohit Khanna81418772018-10-30 14:14:46 -0700755struct napi_struct *dp_rx_tm_get_napi_context(struct dp_rx_tm_handle *rx_tm_hdl,
756 uint8_t rx_ctx_id)
757{
758 if (rx_ctx_id >= rx_tm_hdl->num_dp_rx_threads) {
759 dp_err_rl("unexpected rx_ctx_id %u", rx_ctx_id);
760 QDF_BUG(0);
761 return NULL;
762 }
763
764 return &rx_tm_hdl->rx_thread[rx_ctx_id]->napi;
765}