Mohit Khanna | 7032200 | 2018-05-15 19:21:32 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * Permission to use, copy, modify, and/or distribute this software for |
| 5 | * any purpose with or without fee is hereby granted, provided that the |
| 6 | * above copyright notice and this permission notice appear in all |
| 7 | * copies. |
| 8 | * |
| 9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 10 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 11 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 12 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 13 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 14 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 15 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 16 | * PERFORMANCE OF THIS SOFTWARE. |
| 17 | */ |
| 18 | |
Jianmin Zhu | 13921e9 | 2018-09-05 17:30:58 +0800 | [diff] [blame] | 19 | #if !defined(__DP_RX_THREAD_H) |
Mohit Khanna | 7032200 | 2018-05-15 19:21:32 -0700 | [diff] [blame] | 20 | #define __DP_RX_THREAD_H |
| 21 | |
| 22 | #include <qdf_lock.h> |
| 23 | #include <qdf_event.h> |
| 24 | #include <qdf_threads.h> |
| 25 | #include <wlan_objmgr_vdev_obj.h> |
| 26 | /* Maximum number of REO rings supported (for stats tracking) */ |
| 27 | #define DP_RX_TM_MAX_REO_RINGS 4 |
| 28 | |
| 29 | /* Number of DP RX threads supported */ |
| 30 | #define DP_MAX_RX_THREADS 3 |
| 31 | |
| 32 | /* |
| 33 | * Macro to get to wait_queue structure. Needed since wait_q is an object. |
| 34 | * API qdf_wait_queue_interruptible needs the object be passed to it and not a |
| 35 | * pointer |
| 36 | */ |
| 37 | #define DP_RX_THREAD_GET_WAIT_QUEUE_OBJ(rx_tm_handle_cmn) \ |
| 38 | (((struct dp_rx_tm_handle *)rx_tm_handle_cmn)->wait_q) |
| 39 | /* |
| 40 | * struct dp_rx_tm_handle_cmn - Opaque handle for rx_threads to store |
| 41 | * rx_tm_handle. This handle will be common for all the threads. |
| 42 | * Individual threads should not be accessing |
| 43 | * elements from dp_rx_tm_handle. It should be via an API. |
| 44 | */ |
| 45 | struct dp_rx_tm_handle_cmn; |
| 46 | |
| 47 | /** |
| 48 | * struct dp_rx_thread_stats - structure holding stats for DP RX thread |
| 49 | * @nbuf_queued: packets queued into the thread per reo ring |
| 50 | * @nbuf_dequeued: packets de-queued from the thread |
| 51 | * @nbuf_sent_to_stack: packets sent to the stack. some dequeued packets may be |
| 52 | * dropped due to no peer or vdev, hence this stat. |
| 53 | * @nbufq_max_len: maximum number of nbuf_lists queued for the thread |
| 54 | * @dropped_invalid_vdev: packets(nbuf_list) dropped due to no vdev |
| 55 | * @dropped_invalid_peer: packets(nbuf_list) dropped due to no peer |
| 56 | * @dropped_others: packets dropped due to other reasons |
| 57 | |
| 58 | */ |
| 59 | struct dp_rx_thread_stats { |
| 60 | unsigned int nbuf_queued[DP_RX_TM_MAX_REO_RINGS]; |
| 61 | unsigned int nbuf_dequeued; |
| 62 | unsigned int nbuf_sent_to_stack; |
| 63 | unsigned int nbufq_max_len; |
| 64 | unsigned int dropped_invalid_vdev; |
| 65 | unsigned int dropped_invalid_peer; |
| 66 | unsigned int dropped_others; |
| 67 | }; |
| 68 | |
| 69 | /** |
| 70 | * struct dp_rx_thread - structure holding variables for a single DP RX thread |
| 71 | * @task: task structure corresponding to the thread |
| 72 | * @start_event: handle of Event for DP Rx thread to signal startup |
| 73 | * @suspend_event: handle of Event for DP Rx thread to signal suspend |
| 74 | * @resume_event: handle of Event for DP Rx thread to signal resume |
| 75 | * @shutdown_event: handle of Event for DP Rx thread to signal shutdown |
| 76 | * @event_flag: event flag to post events to DP Rx thread |
| 77 | * @nbuf_queue:nbuf queue used to store RX packets |
| 78 | * @nbufq_len: length of the nbuf queue |
| 79 | * @aff_mask: cuurent affinity mask of the DP Rx thread |
| 80 | * @stats: per thread stats |
| 81 | * @id: id of the dp_rx_thread (0 or 1 or 2..DP_MAX_RX_THREADS - 1) |
| 82 | * @rtm_handle_cmn: abstract RX TM handle. This allows access to the dp_rx_tm |
| 83 | * structures via APIs. |
| 84 | */ |
| 85 | struct dp_rx_thread { |
| 86 | qdf_thread_t *task; |
Mohit Khanna | 7032200 | 2018-05-15 19:21:32 -0700 | [diff] [blame] | 87 | qdf_event_t start_event; |
| 88 | qdf_event_t suspend_event; |
| 89 | qdf_event_t resume_event; |
| 90 | qdf_event_t shutdown_event; |
| 91 | unsigned long event_flag; |
| 92 | qdf_nbuf_queue_head_t nbuf_queue; |
| 93 | unsigned long aff_mask; |
| 94 | struct dp_rx_thread_stats stats; |
| 95 | uint8_t id; |
| 96 | struct dp_rx_tm_handle_cmn *rtm_handle_cmn; |
| 97 | }; |
| 98 | |
| 99 | /** |
| 100 | * enum dp_rx_thread_state - enum to keep track of the state of the rx thread |
| 101 | * @DP_RX_THREAD_INVALID: initial invalid state |
| 102 | * @DP_RX_THREAD_INIT: state after being initialized |
| 103 | * @DP_RX_THREAD_RUNNING: rx thread is functional(NOT suspended, processing |
| 104 | * packets or waiting on a wait_queue) |
| 105 | * @DP_RX_THREAD_SUSPENDED: rx_thread operation is suspeded from cfg8011 suspend |
| 106 | */ |
| 107 | enum dp_rx_thread_state { |
| 108 | DP_RX_THREAD_INVALID, |
| 109 | DP_RX_THREAD_INIT, |
| 110 | DP_RX_THREAD_RUNNING, |
| 111 | DP_RX_THREAD_SUSPENDED |
| 112 | }; |
| 113 | |
| 114 | /** |
| 115 | * struct dp_rx_tm_handle - DP RX thread infrastructure handle |
| 116 | * @txrx_handle_cmn: opaque txrx handle to get to pdev and soc |
| 117 | * wait_q: wait_queue for the rx_threads to wait on and expect an event |
| 118 | * @state: state of the rx_threads. All of them should be in the same state. |
| 119 | * @rx_thread: array of pointers of type struct dp_rx_thread |
| 120 | */ |
| 121 | struct dp_rx_tm_handle { |
| 122 | struct dp_txrx_handle_cmn *txrx_handle_cmn; |
| 123 | qdf_wait_queue_head_t wait_q; |
| 124 | enum dp_rx_thread_state state; |
| 125 | struct dp_rx_thread *rx_thread[DP_MAX_RX_THREADS]; |
| 126 | }; |
| 127 | |
| 128 | /** |
| 129 | * dp_rx_tm_init() - initialize DP Rx thread infrastructure |
| 130 | * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure |
| 131 | * @num_dp_rx_threads: number of DP Rx threads to be initialized |
| 132 | * |
| 133 | * Return: QDF_STATUS_SUCCESS |
| 134 | */ |
| 135 | QDF_STATUS dp_rx_tm_init(struct dp_rx_tm_handle *rx_tm_hdl, |
| 136 | uint8_t num_dp_rx_threads); |
| 137 | |
| 138 | /** |
| 139 | * dp_rx_tm_deinit() - de-initialize DP Rx thread infrastructure |
| 140 | * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure |
| 141 | * |
| 142 | * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure |
| 143 | */ |
| 144 | QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl); |
| 145 | |
| 146 | /** |
| 147 | * dp_rx_tm_enqueue_pkt() - enqueue RX packet into RXTI |
| 148 | * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure |
| 149 | * @nbuf_list: single or a list of nbufs to be enqueued into RXTI |
| 150 | * |
| 151 | * Return: QDF_STATUS_SUCCESS |
| 152 | */ |
| 153 | QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl, |
| 154 | qdf_nbuf_t nbuf_list); |
| 155 | |
| 156 | /** |
| 157 | * dp_rx_tm_suspend() - suspend all threads in RXTI |
| 158 | * @rx_tm_handle: pointer to dp_rx_tm_handle object |
| 159 | * |
| 160 | * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure |
| 161 | */ |
| 162 | QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_handle); |
| 163 | |
| 164 | /** |
| 165 | * dp_rx_tm_resume() - resume all threads in RXTI |
| 166 | * @rx_tm_handle: pointer to dp_rx_tm_handle object |
| 167 | * |
| 168 | * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure |
| 169 | */ |
| 170 | QDF_STATUS dp_rx_tm_resume(struct dp_rx_tm_handle *rx_tm_handle); |
| 171 | |
| 172 | /** |
| 173 | * dp_rx_tm_dump_stats() - dump stats for all threads in RXTI |
| 174 | * @rx_tm_handle: pointer to dp_rx_tm_handle object |
| 175 | * |
| 176 | * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure |
| 177 | */ |
| 178 | QDF_STATUS dp_rx_tm_dump_stats(struct dp_rx_tm_handle *rx_tm_handle); |
| 179 | |
| 180 | /** |
| 181 | * dp_rx_thread_get_txrx_handle() - get txrx handle from rx_tm_handle_cmn |
| 182 | * @rx_tm_handle_cmn: opaque pointer to dp_rx_tm_handle_cmn struct |
| 183 | * |
| 184 | * Return: pointer to dp_txrx_handle_cmn handle |
| 185 | */ |
| 186 | static inline struct dp_txrx_handle_cmn* |
| 187 | dp_rx_thread_get_txrx_handle(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn) |
| 188 | { |
| 189 | return (((struct dp_rx_tm_handle *)rx_tm_handle_cmn)->txrx_handle_cmn); |
| 190 | } |
| 191 | |
| 192 | /** |
| 193 | * dp_rx_thread_get_wait_queue() - get wait_q from dp_rx_tm_handle |
| 194 | * @rx_tm_handle_cmn: opaque pointer to dp_rx_tm_handle_cmn struct |
| 195 | * |
| 196 | * The function is needed since dp_rx_thread does not have access to the real |
| 197 | * dp_rx_tm_handle structure, but only an opaque dp_rx_tm_handle_cmn handle |
| 198 | * |
| 199 | * Return: pointer to dp_txrx_handle_cmn handle |
| 200 | */ |
| 201 | static inline qdf_wait_queue_head_t* |
| 202 | dp_rx_thread_get_wait_queue(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn) |
| 203 | { |
| 204 | struct dp_rx_tm_handle *rx_tm_handle; |
| 205 | |
| 206 | rx_tm_handle = (struct dp_rx_tm_handle *)rx_tm_handle_cmn; |
| 207 | return &rx_tm_handle->wait_q; |
| 208 | } |
| 209 | |
| 210 | #endif /* __DP_RX_THREAD_H */ |