blob: cfdffe8889d5560b4f96f315b592be18fd7ab017 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wlan_hdd_ipa.c
30 *
31 * WLAN HDD and ipa interface implementation
32 * Originally written by Qualcomm Atheros, Inc
33 */
34
35#ifdef IPA_OFFLOAD
36
37/* Include Files */
38#include <wlan_hdd_includes.h>
39#include <wlan_hdd_ipa.h>
40
41#include <linux/etherdevice.h>
42#include <linux/atomic.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/debugfs.h>
47#include <linux/inetdevice.h>
48#include <linux/ip.h>
49#include <wlan_hdd_softap_tx_rx.h>
50#include <ol_txrx_osif_api.h>
51
52#include "cds_sched.h"
53
54#include "wma.h"
55#include "wma_api.h"
56
57#define HDD_IPA_DESC_BUFFER_RATIO 4
58#define HDD_IPA_IPV4_NAME_EXT "_ipv4"
59#define HDD_IPA_IPV6_NAME_EXT "_ipv6"
60
61#define HDD_IPA_RX_INACTIVITY_MSEC_DELAY 1000
62#define HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET 12
63#define HDD_IPA_UC_WLAN_8023_HDR_SIZE 14
64/* WDI TX and RX PIPE */
65#define HDD_IPA_UC_NUM_WDI_PIPE 2
66#define HDD_IPA_UC_MAX_PENDING_EVENT 33
67
68#define HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE 32000
69#define HDD_IPA_UC_RT_DEBUG_PERIOD 300
70#define HDD_IPA_UC_RT_DEBUG_BUF_COUNT 30
71#define HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL 10000
72
73#define HDD_IPA_WLAN_HDR_DES_MAC_OFFSET 0
74#define HDD_IPA_MAX_IFACE 3
75#define HDD_IPA_MAX_SYSBAM_PIPE 4
76#define HDD_IPA_RX_PIPE HDD_IPA_MAX_IFACE
77#define HDD_IPA_ENABLE_MASK BIT(0)
78#define HDD_IPA_PRE_FILTER_ENABLE_MASK BIT(1)
79#define HDD_IPA_IPV6_ENABLE_MASK BIT(2)
80#define HDD_IPA_RM_ENABLE_MASK BIT(3)
81#define HDD_IPA_CLK_SCALING_ENABLE_MASK BIT(4)
82#define HDD_IPA_UC_ENABLE_MASK BIT(5)
83#define HDD_IPA_UC_STA_ENABLE_MASK BIT(6)
84#define HDD_IPA_REAL_TIME_DEBUGGING BIT(8)
85
Yun Parkf19e07d2015-11-20 11:34:27 -080086#define HDD_IPA_MAX_PENDING_EVENT_COUNT 20
87
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088typedef enum {
89 HDD_IPA_UC_OPCODE_TX_SUSPEND = 0,
90 HDD_IPA_UC_OPCODE_TX_RESUME = 1,
91 HDD_IPA_UC_OPCODE_RX_SUSPEND = 2,
92 HDD_IPA_UC_OPCODE_RX_RESUME = 3,
93 HDD_IPA_UC_OPCODE_STATS = 4,
94 /* keep this last */
95 HDD_IPA_UC_OPCODE_MAX
96} hdd_ipa_uc_op_code;
97
98/**
99 * enum - Reason codes for stat query
100 *
101 * @HDD_IPA_UC_STAT_REASON_NONE: Initial value
102 * @HDD_IPA_UC_STAT_REASON_DEBUG: For debug/info
103 * @HDD_IPA_UC_STAT_REASON_BW_CAL: For bandwidth calibration
104 */
105enum {
106 HDD_IPA_UC_STAT_REASON_NONE,
107 HDD_IPA_UC_STAT_REASON_DEBUG,
108 HDD_IPA_UC_STAT_REASON_BW_CAL
109};
110
111/**
112 * enum hdd_ipa_rm_state - IPA resource manager state
113 * @HDD_IPA_RM_RELEASED: PROD pipe resource released
114 * @HDD_IPA_RM_GRANT_PENDING: PROD pipe resource requested but not granted yet
115 * @HDD_IPA_RM_GRANTED: PROD pipe resource granted
116 */
117enum hdd_ipa_rm_state {
118 HDD_IPA_RM_RELEASED,
119 HDD_IPA_RM_GRANT_PENDING,
120 HDD_IPA_RM_GRANTED,
121};
122
123struct llc_snap_hdr {
124 uint8_t dsap;
125 uint8_t ssap;
126 uint8_t resv[4];
127 __be16 eth_type;
128} __packed;
129
Leo Chang3bc8fed2015-11-13 10:59:47 -0800130/**
131 * struct hdd_ipa_tx_hdr - header type which IPA should handle to TX packet
132 * @eth: ether II header
133 * @llc_snap: LLC snap header
134 *
135 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136struct hdd_ipa_tx_hdr {
137 struct ethhdr eth;
138 struct llc_snap_hdr llc_snap;
139} __packed;
140
Leo Chang3bc8fed2015-11-13 10:59:47 -0800141/**
142 * struct frag_header - fragment header type registered to IPA hardware
143 * @length: fragment length
144 * @reserved1: Reserved not used
145 * @reserved2: Reserved not used
146 *
147 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800148struct frag_header {
Leo Chang3bc8fed2015-11-13 10:59:47 -0800149 uint16_t length;
150 uint32_t reserved1;
151 uint32_t reserved2;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800152} __packed;
153
Leo Chang3bc8fed2015-11-13 10:59:47 -0800154/**
155 * struct ipa_header - ipa header type registered to IPA hardware
156 * @vdev_id: vdev id
157 * @reserved: Reserved not used
158 *
159 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800160struct ipa_header {
161 uint32_t
162 vdev_id:8, /* vdev_id field is LSB of IPA DESC */
163 reserved:24;
164} __packed;
165
Leo Chang3bc8fed2015-11-13 10:59:47 -0800166/**
167 * struct hdd_ipa_uc_tx_hdr - full tx header registered to IPA hardware
168 * @frag_hd: fragment header
169 * @ipa_hd: ipa header
170 * @eth: ether II header
171 *
172 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173struct hdd_ipa_uc_tx_hdr {
174 struct frag_header frag_hd;
175 struct ipa_header ipa_hd;
176 struct ethhdr eth;
177} __packed;
178
179#define HDD_IPA_WLAN_FRAG_HEADER sizeof(struct frag_header)
180#define HDD_IPA_WLAN_IPA_HEADER sizeof(struct frag_header)
181
182/**
183 * struct hdd_ipa_cld_hdr - IPA CLD Header
184 * @reserved: reserved fields
185 * @iface_id: interface ID
186 * @sta_id: Station ID
187 *
188 * Packed 32-bit structure
189 * +----------+----------+--------------+--------+
190 * | Reserved | QCMAP ID | interface id | STA ID |
191 * +----------+----------+--------------+--------+
192 */
193struct hdd_ipa_cld_hdr {
194 uint8_t reserved[2];
195 uint8_t iface_id;
196 uint8_t sta_id;
197} __packed;
198
199struct hdd_ipa_rx_hdr {
200 struct hdd_ipa_cld_hdr cld_hdr;
201 struct ethhdr eth;
202} __packed;
203
204struct hdd_ipa_pm_tx_cb {
205 struct hdd_ipa_iface_context *iface_context;
206 struct ipa_rx_data *ipa_tx_desc;
207};
208
209struct hdd_ipa_uc_rx_hdr {
210 struct ethhdr eth;
211} __packed;
212
213struct hdd_ipa_sys_pipe {
214 uint32_t conn_hdl;
215 uint8_t conn_hdl_valid;
216 struct ipa_sys_connect_params ipa_sys_params;
217};
218
219struct hdd_ipa_iface_stats {
220 uint64_t num_tx;
221 uint64_t num_tx_drop;
222 uint64_t num_tx_err;
223 uint64_t num_tx_cac_drop;
224 uint64_t num_rx_prefilter;
225 uint64_t num_rx_ipa_excep;
226 uint64_t num_rx_recv;
227 uint64_t num_rx_recv_mul;
228 uint64_t num_rx_send_desc_err;
229 uint64_t max_rx_mul;
230};
231
232struct hdd_ipa_priv;
233
234struct hdd_ipa_iface_context {
235 struct hdd_ipa_priv *hdd_ipa;
236 hdd_adapter_t *adapter;
237 void *tl_context;
238
239 enum ipa_client_type cons_client;
240 enum ipa_client_type prod_client;
241
242 uint8_t iface_id; /* This iface ID */
243 uint8_t sta_id; /* This iface station ID */
244 cdf_spinlock_t interface_lock;
245 uint32_t ifa_address;
246 struct hdd_ipa_iface_stats stats;
247};
248
249struct hdd_ipa_stats {
250 uint32_t event[IPA_WLAN_EVENT_MAX];
251 uint64_t num_send_msg;
252 uint64_t num_free_msg;
253
254 uint64_t num_rm_grant;
255 uint64_t num_rm_release;
256 uint64_t num_rm_grant_imm;
257 uint64_t num_cons_perf_req;
258 uint64_t num_prod_perf_req;
259
260 uint64_t num_rx_drop;
261 uint64_t num_rx_ipa_tx_dp;
262 uint64_t num_rx_ipa_splice;
263 uint64_t num_rx_ipa_loop;
264 uint64_t num_rx_ipa_tx_dp_err;
265 uint64_t num_rx_ipa_write_done;
266 uint64_t num_max_ipa_tx_mul;
267 uint64_t num_rx_ipa_hw_maxed_out;
268 uint64_t max_pend_q_cnt;
269
270 uint64_t num_tx_comp_cnt;
271 uint64_t num_tx_queued;
272 uint64_t num_tx_dequeued;
273 uint64_t num_max_pm_queue;
274
275 uint64_t num_freeq_empty;
276 uint64_t num_pri_freeq_empty;
277 uint64_t num_rx_excep;
278 uint64_t num_tx_bcmc;
279 uint64_t num_tx_bcmc_err;
280};
281
282struct ipa_uc_stas_map {
283 bool is_reserved;
284 uint8_t sta_id;
285};
286struct op_msg_type {
287 uint8_t msg_t;
288 uint8_t rsvd;
289 uint16_t op_code;
290 uint16_t len;
291 uint16_t rsvd_snd;
292};
293
294struct ipa_uc_fw_stats {
295 uint32_t tx_comp_ring_base;
296 uint32_t tx_comp_ring_size;
297 uint32_t tx_comp_ring_dbell_addr;
298 uint32_t tx_comp_ring_dbell_ind_val;
299 uint32_t tx_comp_ring_dbell_cached_val;
300 uint32_t tx_pkts_enqueued;
301 uint32_t tx_pkts_completed;
302 uint32_t tx_is_suspend;
303 uint32_t tx_reserved;
304 uint32_t rx_ind_ring_base;
305 uint32_t rx_ind_ring_size;
306 uint32_t rx_ind_ring_dbell_addr;
307 uint32_t rx_ind_ring_dbell_ind_val;
308 uint32_t rx_ind_ring_dbell_ind_cached_val;
309 uint32_t rx_ind_ring_rdidx_addr;
310 uint32_t rx_ind_ring_rd_idx_cached_val;
311 uint32_t rx_refill_idx;
312 uint32_t rx_num_pkts_indicated;
313 uint32_t rx_buf_refilled;
314 uint32_t rx_num_ind_drop_no_space;
315 uint32_t rx_num_ind_drop_no_buf;
316 uint32_t rx_is_suspend;
317 uint32_t rx_reserved;
318};
319
320struct ipa_uc_pending_event {
321 cdf_list_node_t node;
322 hdd_adapter_t *adapter;
323 enum ipa_wlan_event type;
324 uint8_t sta_id;
325 uint8_t mac_addr[CDF_MAC_ADDR_SIZE];
326};
327
328/**
329 * struct uc_rm_work_struct
330 * @work: uC RM work
331 * @event: IPA RM event
332 */
333struct uc_rm_work_struct {
334 struct work_struct work;
335 enum ipa_rm_event event;
336};
337
338/**
339 * struct uc_op_work_struct
340 * @work: uC OP work
341 * @msg: OP message
342 */
343struct uc_op_work_struct {
344 struct work_struct work;
345 struct op_msg_type *msg;
346};
347static uint8_t vdev_to_iface[CSR_ROAM_SESSION_MAX];
348
349/**
350 * struct uc_rt_debug_info
351 * @time: system time
352 * @ipa_excep_count: IPA exception packet count
353 * @rx_drop_count: IPA Rx drop packet count
354 * @net_sent_count: IPA Rx packet sent to network stack count
355 * @rx_discard_count: IPA Rx discard packet count
356 * @rx_mcbc_count: IPA Rx BCMC packet count
357 * @tx_mcbc_count: IPA Tx BCMC packet countt
358 * @tx_fwd_count: IPA Tx forward packet count
359 * @rx_destructor_call: IPA Rx packet destructor count
360 */
361struct uc_rt_debug_info {
362 v_TIME_t time;
363 uint64_t ipa_excep_count;
364 uint64_t rx_drop_count;
365 uint64_t net_sent_count;
366 uint64_t rx_discard_count;
367 uint64_t rx_mcbc_count;
368 uint64_t tx_mcbc_count;
369 uint64_t tx_fwd_count;
370 uint64_t rx_destructor_call;
371};
372
373struct hdd_ipa_priv {
374 struct hdd_ipa_sys_pipe sys_pipe[HDD_IPA_MAX_SYSBAM_PIPE];
375 struct hdd_ipa_iface_context iface_context[HDD_IPA_MAX_IFACE];
376 uint8_t num_iface;
377 enum hdd_ipa_rm_state rm_state;
378 /*
379 * IPA driver can send RM notifications with IRQ disabled so using cdf
380 * APIs as it is taken care gracefully. Without this, kernel would throw
381 * an warning if spin_lock_bh is used while IRQ is disabled
382 */
383 cdf_spinlock_t rm_lock;
384 struct uc_rm_work_struct uc_rm_work;
385 struct uc_op_work_struct uc_op_work[HDD_IPA_UC_OPCODE_MAX];
386 cdf_wake_lock_t wake_lock;
387 struct delayed_work wake_lock_work;
388 bool wake_lock_released;
389
390 enum ipa_client_type prod_client;
391
392 atomic_t tx_ref_cnt;
393 cdf_nbuf_queue_t pm_queue_head;
394 struct work_struct pm_work;
395 cdf_spinlock_t pm_lock;
396 bool suspended;
397
398 uint32_t pending_hw_desc_cnt;
399 uint32_t hw_desc_cnt;
400 spinlock_t q_lock;
401 uint32_t freeq_cnt;
402 struct list_head free_desc_head;
403
404 uint32_t pend_q_cnt;
405 struct list_head pend_desc_head;
406
407 hdd_context_t *hdd_ctx;
408
409 struct dentry *debugfs_dir;
410 struct hdd_ipa_stats stats;
411
412 struct notifier_block ipv4_notifier;
413 uint32_t curr_prod_bw;
414 uint32_t curr_cons_bw;
415
416 uint8_t activated_fw_pipe;
417 uint8_t sap_num_connected_sta;
418 uint8_t sta_connected;
419 uint32_t tx_pipe_handle;
420 uint32_t rx_pipe_handle;
421 bool resource_loading;
422 bool resource_unloading;
423 bool pending_cons_req;
424 struct ipa_uc_stas_map assoc_stas_map[WLAN_MAX_STA_COUNT];
425 cdf_list_t pending_event;
426 cdf_mutex_t event_lock;
Leo Change3e49442015-10-26 20:07:13 -0700427 bool ipa_pipes_down;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800428 uint32_t ipa_tx_packets_diff;
429 uint32_t ipa_rx_packets_diff;
430 uint32_t ipa_p_tx_packets;
431 uint32_t ipa_p_rx_packets;
432 uint32_t stat_req_reason;
433 uint64_t ipa_tx_forward;
434 uint64_t ipa_rx_discard;
435 uint64_t ipa_rx_net_send_count;
436 uint64_t ipa_rx_internel_drop_count;
437 uint64_t ipa_rx_destructor_count;
438 cdf_mc_timer_t rt_debug_timer;
439 struct uc_rt_debug_info rt_bug_buffer[HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
440 unsigned int rt_buf_fill_index;
441 cdf_mc_timer_t rt_debug_fill_timer;
442 cdf_mutex_t rt_debug_lock;
Yun Parke59b3912015-11-09 13:19:06 -0800443 cdf_mutex_t ipa_lock;
Leo Chang3bc8fed2015-11-13 10:59:47 -0800444
445 /* CE resources */
446 cdf_dma_addr_t ce_sr_base_paddr;
447 uint32_t ce_sr_ring_size;
448 cdf_dma_addr_t ce_reg_paddr;
449
450 /* WLAN TX:IPA->WLAN */
451 cdf_dma_addr_t tx_comp_ring_base_paddr;
452 uint32_t tx_comp_ring_size;
453 uint32_t tx_num_alloc_buffer;
454
455 /* WLAN RX:WLAN->IPA */
456 cdf_dma_addr_t rx_rdy_ring_base_paddr;
457 uint32_t rx_rdy_ring_size;
458 cdf_dma_addr_t rx_proc_done_idx_paddr;
459 void *rx_proc_done_idx_vaddr;
460
461 /* WLAN RX2:WLAN->IPA */
462 cdf_dma_addr_t rx2_rdy_ring_base_paddr;
463 uint32_t rx2_rdy_ring_size;
464 cdf_dma_addr_t rx2_proc_done_idx_paddr;
465 void *rx2_proc_done_idx_vaddr;
466
467 /* IPA UC doorbell registers paddr */
468 cdf_dma_addr_t tx_comp_doorbell_paddr;
469 cdf_dma_addr_t rx_ready_doorbell_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470};
471
472#define HDD_IPA_WLAN_CLD_HDR_LEN sizeof(struct hdd_ipa_cld_hdr)
473#define HDD_IPA_UC_WLAN_CLD_HDR_LEN 0
474#define HDD_IPA_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_tx_hdr)
475#define HDD_IPA_UC_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_uc_tx_hdr)
476#define HDD_IPA_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_rx_hdr)
477#define HDD_IPA_UC_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_uc_rx_hdr)
478
Leo Chang3bc8fed2015-11-13 10:59:47 -0800479#define HDD_IPA_FW_RX_DESC_DISCARD_M 0x1
480#define HDD_IPA_FW_RX_DESC_FORWARD_M 0x2
481
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800482#define HDD_IPA_GET_IFACE_ID(_data) \
483 (((struct hdd_ipa_cld_hdr *) (_data))->iface_id)
484
485#define HDD_IPA_LOG(LVL, fmt, args ...) \
486 CDF_TRACE(CDF_MODULE_ID_HDD, LVL, \
487 "%s:%d: "fmt, __func__, __LINE__, ## args)
488
489#define HDD_IPA_DBG_DUMP(_lvl, _prefix, _buf, _len) \
490 do { \
491 CDF_TRACE(CDF_MODULE_ID_HDD, _lvl, "%s:", _prefix); \
492 CDF_TRACE_HEX_DUMP(CDF_MODULE_ID_HDD, _lvl, _buf, _len); \
493 } while (0)
494
495#define HDD_IPA_IS_CONFIG_ENABLED(_hdd_ctx, _mask) \
496 (((_hdd_ctx)->config->IpaConfig & (_mask)) == (_mask))
497
498#define HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa) \
499 do { \
500 hdd_ipa->ipa_rx_internel_drop_count++; \
501 } while (0)
502#define HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa) \
503 do { \
504 hdd_ipa->ipa_rx_net_send_count++; \
505 } while (0)
506#define HDD_BW_GET_DIFF(_x, _y) (unsigned long)((ULONG_MAX - (_y)) + (_x) + 1)
507
Leo Chang3bc8fed2015-11-13 10:59:47 -0800508/* Temporary macro to make a build without IPA V2 */
509#ifdef IPA_V2
510#define HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt) \
511do { \
512 pipe_in.u.ul.rdy_ring_rp_va = ipa_ctxt->rx_proc_done_idx_vaddr; \
513 pipe_in.u.ul.rdy_comp_ring_base_pa = ipa_ctxt->rx2_rdy_ring_base_paddr;\
514 pipe_in.u.ul.rdy_comp_ring_size = ipa_ctxt->rx2_rdy_ring_size; \
515 pipe_in.u.ul.rdy_comp_ring_wp_pa = ipa_ctxt->rx2_proc_done_idx_paddr; \
516 pipe_in.u.ul.rdy_comp_ring_wp_va = ipa_ctxt->rx2_proc_done_idx_vaddr; \
517} while (0)
518#else
519/* Do nothing */
520#define HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt)
521#endif /* IPA_V2 */
522
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800523static struct hdd_ipa_adapter_2_client {
524 enum ipa_client_type cons_client;
525 enum ipa_client_type prod_client;
526} hdd_ipa_adapter_2_client[HDD_IPA_MAX_IFACE] = {
527 {
528 IPA_CLIENT_WLAN2_CONS, IPA_CLIENT_WLAN1_PROD
529 }, {
530 IPA_CLIENT_WLAN3_CONS, IPA_CLIENT_WLAN1_PROD
531 }, {
532 IPA_CLIENT_WLAN4_CONS, IPA_CLIENT_WLAN1_PROD
533 },
534};
535
536/* For Tx pipes, use Ethernet-II Header format */
537struct hdd_ipa_uc_tx_hdr ipa_uc_tx_hdr = {
538 {
Leo Chang3bc8fed2015-11-13 10:59:47 -0800539 0x0000,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540 0x00000000,
541 0x00000000
542 },
543 {
544 0x00000000
545 },
546 {
547 {0x00, 0x03, 0x7f, 0xaa, 0xbb, 0xcc},
548 {0x00, 0x03, 0x7f, 0xdd, 0xee, 0xff},
549 0x0008
550 }
551};
552
553/* For Tx pipes, use 802.3 Header format */
554static struct hdd_ipa_tx_hdr ipa_tx_hdr = {
555 {
556 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
557 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
558 0x00 /* length can be zero */
559 },
560 {
561 /* LLC SNAP header 8 bytes */
562 0xaa, 0xaa,
563 {0x03, 0x00, 0x00, 0x00},
564 0x0008 /* type value(2 bytes) ,filled by wlan */
565 /* 0x0800 - IPV4, 0x86dd - IPV6 */
566 }
567};
568
569static const char *op_string[] = {
570 "TX_SUSPEND",
571 "TX_RESUME",
572 "RX_SUSPEND",
573 "RX_RESUME",
574 "STATS",
575};
576
577static struct hdd_ipa_priv *ghdd_ipa;
578
579/* Local Function Prototypes */
580static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
581 unsigned long data);
582static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
583 unsigned long data);
584
585static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context);
586
587/**
588 * hdd_ipa_is_enabled() - Is IPA enabled?
589 * @hdd_ctx: Global HDD context
590 *
591 * Return: true if IPA is enabled, false otherwise
592 */
593bool hdd_ipa_is_enabled(hdd_context_t *hdd_ctx)
594{
595 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_ENABLE_MASK);
596}
597
598/**
599 * hdd_ipa_uc_is_enabled() - Is IPA uC offload enabled?
600 * @hdd_ctx: Global HDD context
601 *
602 * Return: true if IPA uC offload is enabled, false otherwise
603 */
604bool hdd_ipa_uc_is_enabled(hdd_context_t *hdd_ctx)
605{
606 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_ENABLE_MASK);
607}
608
609/**
610 * hdd_ipa_uc_sta_is_enabled() - Is STA mode IPA uC offload enabled?
611 * @hdd_ctx: Global HDD context
612 *
613 * Return: true if STA mode IPA uC offload is enabled, false otherwise
614 */
615static inline bool hdd_ipa_uc_sta_is_enabled(hdd_context_t *hdd_ctx)
616{
617 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_STA_ENABLE_MASK);
618}
619
620/**
621 * hdd_ipa_is_pre_filter_enabled() - Is IPA pre-filter enabled?
622 * @hdd_ipa: Global HDD IPA context
623 *
624 * Return: true if pre-filter is enabled, otherwise false
625 */
626static inline bool hdd_ipa_is_pre_filter_enabled(hdd_context_t *hdd_ctx)
627{
628 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
629 HDD_IPA_PRE_FILTER_ENABLE_MASK);
630}
631
632/**
633 * hdd_ipa_is_ipv6_enabled() - Is IPA IPv6 enabled?
634 * @hdd_ipa: Global HDD IPA context
635 *
636 * Return: true if IPv6 is enabled, otherwise false
637 */
638static inline bool hdd_ipa_is_ipv6_enabled(hdd_context_t *hdd_ctx)
639{
640 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_IPV6_ENABLE_MASK);
641}
642
643/**
644 * hdd_ipa_is_rm_enabled() - Is IPA resource manager enabled?
645 * @hdd_ipa: Global HDD IPA context
646 *
647 * Return: true if resource manager is enabled, otherwise false
648 */
649static inline bool hdd_ipa_is_rm_enabled(hdd_context_t *hdd_ctx)
650{
651 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_RM_ENABLE_MASK);
652}
653
654/**
655 * hdd_ipa_is_rt_debugging_enabled() - Is IPA real-time debug enabled?
656 * @hdd_ipa: Global HDD IPA context
657 *
658 * Return: true if resource manager is enabled, otherwise false
659 */
660static inline bool hdd_ipa_is_rt_debugging_enabled(hdd_context_t *hdd_ctx)
661{
662 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_REAL_TIME_DEBUGGING);
663}
664
665/**
666 * hdd_ipa_is_clk_scaling_enabled() - Is IPA clock scaling enabled?
667 * @hdd_ipa: Global HDD IPA context
668 *
669 * Return: true if clock scaling is enabled, otherwise false
670 */
671static inline bool hdd_ipa_is_clk_scaling_enabled(hdd_context_t *hdd_ctx)
672{
673 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
674 HDD_IPA_CLK_SCALING_ENABLE_MASK |
675 HDD_IPA_RM_ENABLE_MASK);
676}
677
678/**
679 * hdd_ipa_uc_rt_debug_host_fill - fill rt debug buffer
680 * @ctext: pointer to hdd context.
681 *
682 * If rt debug enabled, periodically called, and fill debug buffer
683 *
684 * Return: none
685 */
686static void hdd_ipa_uc_rt_debug_host_fill(void *ctext)
687{
688 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
689 struct hdd_ipa_priv *hdd_ipa;
690 struct uc_rt_debug_info *dump_info = NULL;
691
692 if (wlan_hdd_validate_context(hdd_ctx))
693 return;
694
695 if (!hdd_ctx->hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
696 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
697 "%s: IPA UC is not enabled", __func__);
698 return;
699 }
700
701 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
702
703 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
704 dump_info = &hdd_ipa->rt_bug_buffer[
705 hdd_ipa->rt_buf_fill_index % HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
706
707 dump_info->time = cdf_mc_timer_get_system_time();
708 dump_info->ipa_excep_count = hdd_ipa->stats.num_rx_excep;
709 dump_info->rx_drop_count = hdd_ipa->ipa_rx_internel_drop_count;
710 dump_info->net_sent_count = hdd_ipa->ipa_rx_net_send_count;
711 dump_info->rx_discard_count = hdd_ipa->ipa_rx_discard;
712 dump_info->tx_mcbc_count = hdd_ipa->stats.num_tx_bcmc;
713 dump_info->tx_fwd_count = hdd_ipa->ipa_tx_forward;
714 dump_info->rx_destructor_call = hdd_ipa->ipa_rx_destructor_count;
715 hdd_ipa->rt_buf_fill_index++;
716 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
717
718 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
719 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
720}
721
722/**
723 * hdd_ipa_uc_rt_debug_host_dump - dump rt debug buffer
724 * @hdd_ctx: pointer to hdd context.
725 *
726 * If rt debug enabled, dump debug buffer contents based on requirement
727 *
728 * Return: none
729 */
730void hdd_ipa_uc_rt_debug_host_dump(hdd_context_t *hdd_ctx)
731{
732 struct hdd_ipa_priv *hdd_ipa;
733 unsigned int dump_count;
734 unsigned int dump_index;
735 struct uc_rt_debug_info *dump_info = NULL;
736
737 if (wlan_hdd_validate_context(hdd_ctx))
738 return;
739
740 hdd_ipa = hdd_ctx->hdd_ipa;
741 if (!hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
742 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
743 "%s: IPA UC is not enabled", __func__);
744 return;
745 }
746
747 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
748 "========= WLAN-IPA DEBUG BUF DUMP ==========\n");
749 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
750 " TM : EXEP : DROP : NETS : MCBC : TXFD : DSTR : DSCD\n");
751
752 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
753 for (dump_count = 0;
754 dump_count < HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
755 dump_count++) {
756 dump_index = (hdd_ipa->rt_buf_fill_index + dump_count) %
757 HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
758 dump_info = &hdd_ipa->rt_bug_buffer[dump_index];
759 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
760 "%12lu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu\n",
761 dump_info->time, dump_info->ipa_excep_count,
762 dump_info->rx_drop_count, dump_info->net_sent_count,
763 dump_info->tx_mcbc_count, dump_info->tx_fwd_count,
764 dump_info->rx_destructor_call,
765 dump_info->rx_discard_count);
766 }
767 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
768 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
769 "======= WLAN-IPA DEBUG BUF DUMP END ========\n");
770}
771
772/**
773 * hdd_ipa_uc_rt_debug_handler - periodic memory health monitor handler
774 * @ctext: pointer to hdd context.
775 *
776 * periodically called by timer expire
777 * will try to alloc dummy memory and detect out of memory condition
778 * if out of memory detected, dump wlan-ipa stats
779 *
780 * Return: none
781 */
782static void hdd_ipa_uc_rt_debug_handler(void *ctext)
783{
784 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
785 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
786 void *dummy_ptr = NULL;
787
788 if (wlan_hdd_validate_context(hdd_ctx))
789 return;
790
791 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
792 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
793 "%s: IPA RT debug is not enabled", __func__);
794 return;
795 }
796
797 /* Allocate dummy buffer periodically and free immediately. this will
798 * proactively detect OOM and if allocation fails dump ipa stats
799 */
800 dummy_ptr = kmalloc(HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE,
801 GFP_KERNEL | GFP_ATOMIC);
802 if (!dummy_ptr) {
803 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
804 "%s: Dummy alloc fail", __func__);
805 hdd_ipa_uc_rt_debug_host_dump(hdd_ctx);
806 hdd_ipa_uc_stat_request(
807 hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP), 1);
808 } else {
809 kfree(dummy_ptr);
810 }
811
812 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
813 HDD_IPA_UC_RT_DEBUG_PERIOD);
814}
815
816/**
817 * hdd_ipa_uc_rt_debug_destructor - called by data packet free
818 * @skb: packet pinter
819 *
820 * when free data packet, will be invoked by wlan client and will increase
821 * free counter
822 *
823 * Return: none
824 */
825void hdd_ipa_uc_rt_debug_destructor(struct sk_buff *skb)
826{
827 if (!ghdd_ipa) {
828 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
829 "%s: invalid hdd context", __func__);
830 return;
831 }
832
833 ghdd_ipa->ipa_rx_destructor_count++;
834}
835
836/**
837 * hdd_ipa_uc_rt_debug_deinit - remove resources to handle rt debugging
838 * @hdd_ctx: hdd main context
839 *
840 * free all rt debugging resources
841 *
842 * Return: none
843 */
844static void hdd_ipa_uc_rt_debug_deinit(hdd_context_t *hdd_ctx)
845{
846 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
847
848 if (CDF_TIMER_STATE_STOPPED !=
849 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_fill_timer)) {
850 cdf_mc_timer_stop(&hdd_ipa->rt_debug_fill_timer);
851 }
852 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_fill_timer);
853 cdf_mutex_destroy(&hdd_ipa->rt_debug_lock);
854
855 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
856 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
857 "%s: IPA RT debug is not enabled", __func__);
858 return;
859 }
860
861 if (CDF_TIMER_STATE_STOPPED !=
862 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_timer)) {
863 cdf_mc_timer_stop(&hdd_ipa->rt_debug_timer);
864 }
865 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_timer);
866}
867
868/**
869 * hdd_ipa_uc_rt_debug_init - intialize resources to handle rt debugging
870 * @hdd_ctx: hdd main context
871 *
872 * alloc and initialize all rt debugging resources
873 *
874 * Return: none
875 */
876static void hdd_ipa_uc_rt_debug_init(hdd_context_t *hdd_ctx)
877{
878 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
879
880 cdf_mutex_init(&hdd_ipa->rt_debug_lock);
881 cdf_mc_timer_init(&hdd_ipa->rt_debug_fill_timer, CDF_TIMER_TYPE_SW,
882 hdd_ipa_uc_rt_debug_host_fill, (void *)hdd_ctx);
883 hdd_ipa->rt_buf_fill_index = 0;
884 cdf_mem_zero(hdd_ipa->rt_bug_buffer,
885 sizeof(struct uc_rt_debug_info) *
886 HDD_IPA_UC_RT_DEBUG_BUF_COUNT);
887 hdd_ipa->ipa_tx_forward = 0;
888 hdd_ipa->ipa_rx_discard = 0;
889 hdd_ipa->ipa_rx_net_send_count = 0;
890 hdd_ipa->ipa_rx_internel_drop_count = 0;
891 hdd_ipa->ipa_rx_destructor_count = 0;
892
893 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
894 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
895
896 /* Reatime debug enable on feature enable */
897 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
898 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
899 "%s: IPA RT debug is not enabled", __func__);
900 return;
901 }
902 cdf_mc_timer_init(&hdd_ipa->rt_debug_timer, CDF_TIMER_TYPE_SW,
903 hdd_ipa_uc_rt_debug_handler, (void *)hdd_ctx);
904 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
905 HDD_IPA_UC_RT_DEBUG_PERIOD);
906
907}
908
909/**
910 * hdd_ipa_uc_stat_query() - Query the IPA stats
911 * @hdd_ctx: Global HDD context
912 * @ipa_tx_diff: tx packet count diff from previous
913 * tx packet count
914 * @ipa_rx_diff: rx packet count diff from previous
915 * rx packet count
916 *
917 * Return: true if IPA is enabled, false otherwise
918 */
919void hdd_ipa_uc_stat_query(hdd_context_t *pHddCtx,
920 uint32_t *ipa_tx_diff, uint32_t *ipa_rx_diff)
921{
922 struct hdd_ipa_priv *hdd_ipa;
923
924 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
925 *ipa_tx_diff = 0;
926 *ipa_rx_diff = 0;
927
928 if (!hdd_ipa_is_enabled(pHddCtx) ||
929 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
930 return;
931 }
932
Yun Parke59b3912015-11-09 13:19:06 -0800933 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800934 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
935 (false == hdd_ipa->resource_loading)) {
936 *ipa_tx_diff = hdd_ipa->ipa_tx_packets_diff;
937 *ipa_rx_diff = hdd_ipa->ipa_rx_packets_diff;
938 HDD_IPA_LOG(LOG1, "STAT Query TX DIFF %d, RX DIFF %d",
939 *ipa_tx_diff, *ipa_rx_diff);
940 }
Yun Parke59b3912015-11-09 13:19:06 -0800941 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800942 return;
943}
944
945/**
946 * hdd_ipa_uc_stat_request() - Get IPA stats from IPA.
947 * @adapter: network adapter
948 * @reason: STAT REQ Reason
949 *
950 * Return: None
951 */
952void hdd_ipa_uc_stat_request(hdd_adapter_t *adapter, uint8_t reason)
953{
954 hdd_context_t *pHddCtx;
955 struct hdd_ipa_priv *hdd_ipa;
956
957 if (!adapter) {
958 return;
959 }
960
961 pHddCtx = (hdd_context_t *)adapter->pHddCtx;
962 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
963 if (!hdd_ipa_is_enabled(pHddCtx) ||
964 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
965 return;
966 }
967
968 HDD_IPA_LOG(LOG1, "STAT REQ Reason %d", reason);
Yun Parke59b3912015-11-09 13:19:06 -0800969 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800970 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
971 (false == hdd_ipa->resource_loading)) {
972 hdd_ipa->stat_req_reason = reason;
973 wma_cli_set_command(
974 (int)adapter->sessionId,
975 (int)WMA_VDEV_TXRX_GET_IPA_UC_FW_STATS_CMDID,
976 0, VDEV_CMD);
977 }
Yun Parke59b3912015-11-09 13:19:06 -0800978 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800979}
980
981/**
982 * hdd_ipa_uc_find_add_assoc_sta() - Find associated station
983 * @hdd_ipa: Global HDD IPA context
984 * @sta_add: Should station be added
985 * @sta_id: ID of the station being queried
986 *
987 * Return: true if the station was found
988 */
989static bool hdd_ipa_uc_find_add_assoc_sta(struct hdd_ipa_priv *hdd_ipa,
990 bool sta_add, uint8_t sta_id)
991{
992 bool sta_found = false;
993 uint8_t idx;
994 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
995 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
996 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
997 sta_found = true;
998 break;
999 }
1000 }
1001 if (sta_add && sta_found) {
1002 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1003 "%s: STA ID %d already exist, cannot add",
1004 __func__, sta_id);
1005 return sta_found;
1006 }
1007 if (sta_add) {
1008 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1009 if (!hdd_ipa->assoc_stas_map[idx].is_reserved) {
1010 hdd_ipa->assoc_stas_map[idx].is_reserved = true;
1011 hdd_ipa->assoc_stas_map[idx].sta_id = sta_id;
1012 return sta_found;
1013 }
1014 }
1015 }
1016 if (!sta_add && !sta_found) {
1017 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1018 "%s: STA ID %d does not exist, cannot delete",
1019 __func__, sta_id);
1020 return sta_found;
1021 }
1022 if (!sta_add) {
1023 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1024 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
1025 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
1026 hdd_ipa->assoc_stas_map[idx].is_reserved =
1027 false;
1028 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
1029 return sta_found;
1030 }
1031 }
1032 }
1033 return sta_found;
1034}
1035
1036/**
1037 * hdd_ipa_uc_enable_pipes() - Enable IPA uC pipes
1038 * @hdd_ipa: Global HDD IPA context
1039 *
1040 * Return: 0 on success, negative errno if error
1041 */
1042static int hdd_ipa_uc_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
1043{
1044 int result;
1045 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
1046
1047 /* ACTIVATE TX PIPE */
Yun Park4cab6ee2015-10-27 11:43:40 -07001048 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1049 "%s: Enable TX PIPE(tx_pipe_handle=%d)",
1050 __func__, hdd_ipa->tx_pipe_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001051 result = ipa_enable_wdi_pipe(hdd_ipa->tx_pipe_handle);
1052 if (result) {
1053 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1054 "%s: Enable TX PIPE fail, code %d",
1055 __func__, result);
1056 return result;
1057 }
1058 result = ipa_resume_wdi_pipe(hdd_ipa->tx_pipe_handle);
1059 if (result) {
1060 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1061 "%s: Resume TX PIPE fail, code %d",
1062 __func__, result);
1063 return result;
1064 }
1065 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, true);
1066
1067 /* ACTIVATE RX PIPE */
Yun Park4cab6ee2015-10-27 11:43:40 -07001068 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1069 "%s: Enable RX PIPE(rx_pipe_handle=%d)",
1070 __func__, hdd_ipa->rx_pipe_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001071 result = ipa_enable_wdi_pipe(hdd_ipa->rx_pipe_handle);
1072 if (result) {
1073 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1074 "%s: Enable RX PIPE fail, code %d",
1075 __func__, result);
1076 return result;
1077 }
1078 result = ipa_resume_wdi_pipe(hdd_ipa->rx_pipe_handle);
1079 if (result) {
1080 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1081 "%s: Resume RX PIPE fail, code %d",
1082 __func__, result);
1083 return result;
1084 }
1085 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, false);
Leo Change3e49442015-10-26 20:07:13 -07001086 hdd_ipa->ipa_pipes_down = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001087 return 0;
1088}
1089
1090/**
1091 * hdd_ipa_uc_disable_pipes() - Disable IPA uC pipes
1092 * @hdd_ipa: Global HDD IPA context
1093 *
1094 * Return: 0 on success, negative errno if error
1095 */
1096static int hdd_ipa_uc_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
1097{
1098 int result;
1099
Leo Change3e49442015-10-26 20:07:13 -07001100 hdd_ipa->ipa_pipes_down = true;
1101
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001102 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable RX PIPE", __func__);
1103 result = ipa_suspend_wdi_pipe(hdd_ipa->rx_pipe_handle);
1104 if (result) {
1105 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1106 "%s: Suspend RX PIPE fail, code %d",
1107 __func__, result);
1108 return result;
1109 }
1110 result = ipa_disable_wdi_pipe(hdd_ipa->rx_pipe_handle);
1111 if (result) {
1112 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1113 "%s: Disable RX PIPE fail, code %d",
1114 __func__, result);
1115 return result;
1116 }
1117
1118 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable TX PIPE", __func__);
1119 result = ipa_suspend_wdi_pipe(hdd_ipa->tx_pipe_handle);
1120 if (result) {
1121 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1122 "%s: Suspend TX PIPE fail, code %d",
1123 __func__, result);
1124 return result;
1125 }
1126 result = ipa_disable_wdi_pipe(hdd_ipa->tx_pipe_handle);
1127 if (result) {
1128 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1129 "%s: Disable TX PIPE fail, code %d",
1130 __func__, result);
1131 return result;
1132 }
1133
1134 return 0;
1135}
1136
1137/**
1138 * hdd_ipa_uc_handle_first_con() - Handle first uC IPA connection
1139 * @hdd_ipa: Global HDD IPA context
1140 *
1141 * Return: 0 on success, negative errno if error
1142 */
1143static int hdd_ipa_uc_handle_first_con(struct hdd_ipa_priv *hdd_ipa)
1144{
1145 hdd_ipa->activated_fw_pipe = 0;
1146 hdd_ipa->resource_loading = true;
Yun Park4cab6ee2015-10-27 11:43:40 -07001147
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001148 /* If RM feature enabled
1149 * Request PROD Resource first
1150 * PROD resource may return sync or async manners */
Yun Park4cab6ee2015-10-27 11:43:40 -07001151 if (hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx)) {
1152 if (!ipa_rm_request_resource(IPA_RM_RESOURCE_WLAN_PROD)) {
1153 /* RM PROD request sync return
1154 * enable pipe immediately
1155 */
1156 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1157 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1158 "%s: IPA WDI Pipe activation failed",
1159 __func__);
1160 hdd_ipa->resource_loading = false;
1161 return -EBUSY;
1162 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001163 }
1164 } else {
1165 /* RM Disabled
Yun Park4cab6ee2015-10-27 11:43:40 -07001166 * Just enabled all the PIPEs
1167 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001168 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1169 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
Yun Park4cab6ee2015-10-27 11:43:40 -07001170 "%s: IPA WDI Pipe activation failed",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001171 __func__);
1172 hdd_ipa->resource_loading = false;
1173 return -EBUSY;
1174 }
1175 hdd_ipa->resource_loading = false;
1176 }
Yun Park4cab6ee2015-10-27 11:43:40 -07001177
1178 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1179 "%s: IPA WDI Pipes activated successfully", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001180 return 0;
1181}
1182
1183/**
1184 * hdd_ipa_uc_handle_last_discon() - Handle last uC IPA disconnection
1185 * @hdd_ipa: Global HDD IPA context
1186 *
1187 * Return: None
1188 */
1189static void hdd_ipa_uc_handle_last_discon(struct hdd_ipa_priv *hdd_ipa)
1190{
1191 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
1192
1193 hdd_ipa->resource_unloading = true;
1194 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW RX PIPE", __func__);
1195 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, false);
1196 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW TX PIPE", __func__);
1197 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, true);
1198}
1199
1200/**
1201 * hdd_ipa_uc_rm_notify_handler() - IPA uC resource notification handler
1202 * @context: User context registered with TL (the IPA Global context is
1203 * registered
1204 * @rxpkt: Packet containing the notification
1205 * @staid: ID of the station associated with the packet
1206 *
1207 * Return: None
1208 */
1209static void
1210hdd_ipa_uc_rm_notify_handler(void *context, enum ipa_rm_event event)
1211{
1212 struct hdd_ipa_priv *hdd_ipa = context;
1213 CDF_STATUS status = CDF_STATUS_SUCCESS;
1214
1215 /*
1216 * When SSR is going on or driver is unloading, just return.
1217 */
1218 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
1219 if (0 != status) {
1220 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1221 return;
1222 }
1223
1224 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
1225 return;
1226
1227 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s, event code %d",
1228 __func__, event);
1229
1230 switch (event) {
1231 case IPA_RM_RESOURCE_GRANTED:
1232 /* Differed RM Granted */
1233 hdd_ipa_uc_enable_pipes(hdd_ipa);
Yun Parke59b3912015-11-09 13:19:06 -08001234 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001235 if ((false == hdd_ipa->resource_unloading) &&
1236 (!hdd_ipa->activated_fw_pipe)) {
1237 hdd_ipa_uc_enable_pipes(hdd_ipa);
1238 }
Yun Parke59b3912015-11-09 13:19:06 -08001239 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001240 if (hdd_ipa->pending_cons_req) {
1241 ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
1242 IPA_RM_RESOURCE_WLAN_CONS);
1243 }
1244 hdd_ipa->pending_cons_req = false;
1245 break;
1246
1247 case IPA_RM_RESOURCE_RELEASED:
1248 /* Differed RM Released */
1249 hdd_ipa->resource_unloading = false;
1250 if (hdd_ipa->pending_cons_req) {
1251 ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED,
1252 IPA_RM_RESOURCE_WLAN_CONS);
1253 }
1254 hdd_ipa->pending_cons_req = false;
1255 break;
1256
1257 default:
1258 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1259 "%s, invalid event code %d", __func__, event);
1260 break;
1261 }
1262}
1263
1264/**
1265 * hdd_ipa_uc_rm_notify_defer() - Defer IPA uC notification
1266 * @hdd_ipa: Global HDD IPA context
1267 * @event: IPA resource manager event to be deferred
1268 *
1269 * This function is called when a resource manager event is received
1270 * from firmware in interrupt context. This function will defer the
1271 * handling to the OL RX thread
1272 *
1273 * Return: None
1274 */
1275static void hdd_ipa_uc_rm_notify_defer(struct work_struct *work)
1276{
1277 enum ipa_rm_event event;
1278 struct uc_rm_work_struct *uc_rm_work = container_of(work,
1279 struct uc_rm_work_struct, work);
1280 struct hdd_ipa_priv *hdd_ipa = container_of(uc_rm_work,
1281 struct hdd_ipa_priv, uc_rm_work);
1282
1283 cds_ssr_protect(__func__);
1284 event = uc_rm_work->event;
1285 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1286 "%s, posted event %d", __func__, event);
1287
1288 hdd_ipa_uc_rm_notify_handler(hdd_ipa, event);
1289 cds_ssr_unprotect(__func__);
1290
1291 return;
1292}
1293
1294/**
1295 * hdd_ipa_uc_proc_pending_event() - Process IPA uC pending events
1296 * @hdd_ipa: Global HDD IPA context
1297 *
1298 * Return: None
1299 */
1300static void hdd_ipa_uc_proc_pending_event(struct hdd_ipa_priv *hdd_ipa)
1301{
1302 unsigned int pending_event_count;
1303 struct ipa_uc_pending_event *pending_event = NULL;
1304
1305 cdf_list_size(&hdd_ipa->pending_event, &pending_event_count);
1306 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1307 "%s, Pending Event Count %d", __func__, pending_event_count);
1308 if (!pending_event_count) {
1309 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1310 "%s, No Pending Event", __func__);
1311 return;
1312 }
1313
1314 cdf_list_remove_front(&hdd_ipa->pending_event,
1315 (cdf_list_node_t **)&pending_event);
1316 while (pending_event != NULL) {
1317 hdd_ipa_wlan_evt(pending_event->adapter,
1318 pending_event->type,
1319 pending_event->sta_id,
1320 pending_event->mac_addr);
1321 cdf_mem_free(pending_event);
1322 pending_event = NULL;
1323 cdf_list_remove_front(&hdd_ipa->pending_event,
1324 (cdf_list_node_t **)&pending_event);
1325 }
1326}
1327
1328/**
1329 * hdd_ipa_uc_op_cb() - IPA uC operation callback
1330 * @op_msg: operation message received from firmware
1331 * @usr_ctxt: user context registered with TL (we register the HDD Global
1332 * context)
1333 *
1334 * Return: None
1335 */
1336static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
1337{
1338 struct op_msg_type *msg = op_msg;
1339 struct ipa_uc_fw_stats *uc_fw_stat;
1340 struct IpaHwStatsWDIInfoData_t ipa_stat;
1341 struct hdd_ipa_priv *hdd_ipa;
1342 hdd_context_t *hdd_ctx;
1343 CDF_STATUS status = CDF_STATUS_SUCCESS;
1344
1345 if (!op_msg || !usr_ctxt) {
1346 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s, INVALID ARG", __func__);
1347 return;
1348 }
1349
1350 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1351 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1352 "%s, INVALID OPCODE %d", __func__, msg->op_code);
1353 return;
1354 }
1355
1356 hdd_ctx = (hdd_context_t *) usr_ctxt;
1357
1358 /*
1359 * When SSR is going on or driver is unloading, just return.
1360 */
1361 status = wlan_hdd_validate_context(hdd_ctx);
1362 if (0 != status) {
1363 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1364 cdf_mem_free(op_msg);
1365 return;
1366 }
1367
1368 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1369
1370 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
1371 "%s, OPCODE %s", __func__, op_string[msg->op_code]);
1372
1373 if ((HDD_IPA_UC_OPCODE_TX_RESUME == msg->op_code) ||
1374 (HDD_IPA_UC_OPCODE_RX_RESUME == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001375 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001376 hdd_ipa->activated_fw_pipe++;
1377 if (HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) {
1378 hdd_ipa->resource_loading = false;
1379 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1380 }
Yun Parke59b3912015-11-09 13:19:06 -08001381 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382 }
1383
1384 if ((HDD_IPA_UC_OPCODE_TX_SUSPEND == msg->op_code) ||
1385 (HDD_IPA_UC_OPCODE_RX_SUSPEND == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001386 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001387 hdd_ipa->activated_fw_pipe--;
1388 if (!hdd_ipa->activated_fw_pipe) {
1389 hdd_ipa_uc_disable_pipes(hdd_ipa);
1390 if ((hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx)) &&
1391 (!ipa_rm_release_resource(IPA_RM_RESOURCE_WLAN_PROD))) {
1392 /* Sync return success from IPA
1393 * Enable/resume all the PIPEs */
1394 hdd_ipa->resource_unloading = false;
1395 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1396 } else {
1397 hdd_ipa->resource_unloading = false;
1398 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1399 }
1400 }
Yun Parke59b3912015-11-09 13:19:06 -08001401 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001402 }
1403
1404 if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1405 (HDD_IPA_UC_STAT_REASON_DEBUG == hdd_ipa->stat_req_reason)) {
1406
1407 /* STATs from host */
1408 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1409 "==== IPA_UC WLAN_HOST CE ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001410 "CE RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001411 "CE RING SIZE: %d\n"
1412 "CE REG ADDR : 0x%llx",
Leo Chang3bc8fed2015-11-13 10:59:47 -08001413 hdd_ipa->ce_sr_base_paddr,
1414 hdd_ipa->ce_sr_ring_size,
1415 hdd_ipa->ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001416 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1417 "==== IPA_UC WLAN_HOST TX ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001418 "COMP RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001419 "COMP RING SIZE: %d\n"
1420 "NUM ALLOC BUF: %d\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001421 "COMP RING DBELL : 0x%llx",
1422 hdd_ipa->tx_comp_ring_base_paddr,
1423 hdd_ipa->tx_comp_ring_size,
1424 hdd_ipa->tx_num_alloc_buffer,
1425 hdd_ipa->tx_comp_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001426 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1427 "==== IPA_UC WLAN_HOST RX ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001428 "IND RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001429 "IND RING SIZE: %d\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001430 "IND RING DBELL : 0x%llx\n"
1431 "PROC DONE IND ADDR : 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001432 "NUM EXCP PKT : %llu\n"
1433 "NUM TX BCMC : %llu\n"
1434 "NUM TX BCMC ERR : %llu",
Leo Chang3bc8fed2015-11-13 10:59:47 -08001435 hdd_ipa->rx_rdy_ring_base_paddr,
1436 hdd_ipa->rx_rdy_ring_size,
1437 hdd_ipa->rx_ready_doorbell_paddr,
1438 hdd_ipa->rx_proc_done_idx_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001439 hdd_ipa->stats.num_rx_excep,
1440 hdd_ipa->stats.num_tx_bcmc,
1441 hdd_ipa->stats.num_tx_bcmc_err);
1442 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1443 "==== IPA_UC WLAN_HOST CONTROL ====\n"
1444 "SAP NUM STAs: %d\n"
1445 "STA CONNECTED: %d\n"
1446 "TX PIPE HDL: %d\n"
1447 "RX PIPE HDL : %d\n"
1448 "RSC LOADING : %d\n"
1449 "RSC UNLOADING : %d\n"
1450 "PNDNG CNS RQT : %d",
1451 hdd_ipa->sap_num_connected_sta,
1452 hdd_ipa->sta_connected,
1453 hdd_ipa->tx_pipe_handle,
1454 hdd_ipa->rx_pipe_handle,
1455 (unsigned int)hdd_ipa->resource_loading,
1456 (unsigned int)hdd_ipa->resource_unloading,
1457 (unsigned int)hdd_ipa->pending_cons_req);
1458
1459 /* STATs from FW */
1460 uc_fw_stat = (struct ipa_uc_fw_stats *)
1461 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
1462 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1463 "==== IPA_UC WLAN_FW TX ====\n"
1464 "COMP RING BASE: 0x%x\n"
1465 "COMP RING SIZE: %d\n"
1466 "COMP RING DBELL : 0x%x\n"
1467 "COMP RING DBELL IND VAL : %d\n"
1468 "COMP RING DBELL CACHED VAL : %d\n"
1469 "COMP RING DBELL CACHED VAL : %d\n"
1470 "PKTS ENQ : %d\n"
1471 "PKTS COMP : %d\n"
1472 "IS SUSPEND : %d\n"
1473 "RSVD : 0x%x",
1474 uc_fw_stat->tx_comp_ring_base,
1475 uc_fw_stat->tx_comp_ring_size,
1476 uc_fw_stat->tx_comp_ring_dbell_addr,
1477 uc_fw_stat->tx_comp_ring_dbell_ind_val,
1478 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1479 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1480 uc_fw_stat->tx_pkts_enqueued,
1481 uc_fw_stat->tx_pkts_completed,
1482 uc_fw_stat->tx_is_suspend, uc_fw_stat->tx_reserved);
1483 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1484 "==== IPA_UC WLAN_FW RX ====\n"
1485 "IND RING BASE: 0x%x\n"
1486 "IND RING SIZE: %d\n"
1487 "IND RING DBELL : 0x%x\n"
1488 "IND RING DBELL IND VAL : %d\n"
1489 "IND RING DBELL CACHED VAL : %d\n"
1490 "RDY IND ADDR : 0x%x\n"
1491 "RDY IND CACHE VAL : %d\n"
1492 "RFIL IND : %d\n"
1493 "NUM PKT INDICAT : %d\n"
1494 "BUF REFIL : %d\n"
1495 "NUM DROP NO SPC : %d\n"
1496 "NUM DROP NO BUF : %d\n"
1497 "IS SUSPND : %d\n"
1498 "RSVD : 0x%x\n",
1499 uc_fw_stat->rx_ind_ring_base,
1500 uc_fw_stat->rx_ind_ring_size,
1501 uc_fw_stat->rx_ind_ring_dbell_addr,
1502 uc_fw_stat->rx_ind_ring_dbell_ind_val,
1503 uc_fw_stat->rx_ind_ring_dbell_ind_cached_val,
1504 uc_fw_stat->rx_ind_ring_rdidx_addr,
1505 uc_fw_stat->rx_ind_ring_rd_idx_cached_val,
1506 uc_fw_stat->rx_refill_idx,
1507 uc_fw_stat->rx_num_pkts_indicated,
1508 uc_fw_stat->rx_buf_refilled,
1509 uc_fw_stat->rx_num_ind_drop_no_space,
1510 uc_fw_stat->rx_num_ind_drop_no_buf,
1511 uc_fw_stat->rx_is_suspend, uc_fw_stat->rx_reserved);
1512 /* STATs from IPA */
1513 ipa_get_wdi_stats(&ipa_stat);
1514 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1515 "==== IPA_UC IPA TX ====\n"
1516 "NUM PROCD : %d\n"
1517 "CE DBELL : 0x%x\n"
1518 "NUM DBELL FIRED : %d\n"
1519 "COMP RNG FULL : %d\n"
1520 "COMP RNG EMPT : %d\n"
1521 "COMP RNG USE HGH : %d\n"
1522 "COMP RNG USE LOW : %d\n"
1523 "BAM FIFO FULL : %d\n"
1524 "BAM FIFO EMPT : %d\n"
1525 "BAM FIFO USE HGH : %d\n"
1526 "BAM FIFO USE LOW : %d\n"
1527 "NUM DBELL : %d\n"
1528 "NUM UNEXP DBELL : %d\n"
1529 "NUM BAM INT HDL : 0x%x\n"
1530 "NUM BAM INT NON-RUN : 0x%x\n"
1531 "NUM QMB INT HDL : 0x%x",
1532 ipa_stat.tx_ch_stats.num_pkts_processed,
1533 ipa_stat.tx_ch_stats.copy_engine_doorbell_value,
1534 ipa_stat.tx_ch_stats.num_db_fired,
1535 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringFull,
1536 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
1537 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
1538 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
1539 ipa_stat.tx_ch_stats.bam_stats.bamFifoFull,
1540 ipa_stat.tx_ch_stats.bam_stats.bamFifoEmpty,
1541 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageHigh,
1542 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageLow,
1543 ipa_stat.tx_ch_stats.num_db,
1544 ipa_stat.tx_ch_stats.num_unexpected_db,
1545 ipa_stat.tx_ch_stats.num_bam_int_handled,
1546 ipa_stat.tx_ch_stats.
1547 num_bam_int_in_non_runnning_state,
1548 ipa_stat.tx_ch_stats.num_qmb_int_handled);
1549
1550 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1551 "==== IPA_UC IPA RX ====\n"
1552 "MAX OST PKT : %d\n"
1553 "NUM PKT PRCSD : %d\n"
1554 "RNG RP : 0x%x\n"
1555 "COMP RNG FULL : %d\n"
1556 "COMP RNG EMPT : %d\n"
1557 "COMP RNG USE HGH : %d\n"
1558 "COMP RNG USE LOW : %d\n"
1559 "BAM FIFO FULL : %d\n"
1560 "BAM FIFO EMPT : %d\n"
1561 "BAM FIFO USE HGH : %d\n"
1562 "BAM FIFO USE LOW : %d\n"
1563 "NUM DB : %d\n"
1564 "NUM UNEXP DB : %d\n"
1565 "NUM BAM INT HNDL : 0x%x\n",
1566 ipa_stat.rx_ch_stats.max_outstanding_pkts,
1567 ipa_stat.rx_ch_stats.num_pkts_processed,
1568 ipa_stat.rx_ch_stats.rx_ring_rp_value,
1569 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringFull,
1570 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
1571 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
1572 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
1573 ipa_stat.rx_ch_stats.bam_stats.bamFifoFull,
1574 ipa_stat.rx_ch_stats.bam_stats.bamFifoEmpty,
1575 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageHigh,
1576 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageLow,
1577 ipa_stat.rx_ch_stats.num_db,
1578 ipa_stat.rx_ch_stats.num_unexpected_db,
1579 ipa_stat.rx_ch_stats.num_bam_int_handled);
1580 } else if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1581 (HDD_IPA_UC_STAT_REASON_BW_CAL == hdd_ipa->stat_req_reason)) {
1582 /* STATs from FW */
1583 uc_fw_stat = (struct ipa_uc_fw_stats *)
1584 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
Yun Parke59b3912015-11-09 13:19:06 -08001585 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001586 hdd_ipa->ipa_tx_packets_diff = HDD_BW_GET_DIFF(
1587 uc_fw_stat->tx_pkts_completed,
1588 hdd_ipa->ipa_p_tx_packets);
1589 hdd_ipa->ipa_rx_packets_diff = HDD_BW_GET_DIFF(
1590 (uc_fw_stat->rx_num_ind_drop_no_space +
1591 uc_fw_stat->rx_num_ind_drop_no_buf +
1592 uc_fw_stat->rx_num_pkts_indicated),
1593 hdd_ipa->ipa_p_rx_packets);
1594
1595 hdd_ipa->ipa_p_tx_packets = uc_fw_stat->tx_pkts_completed;
1596 hdd_ipa->ipa_p_rx_packets =
1597 (uc_fw_stat->rx_num_ind_drop_no_space +
1598 uc_fw_stat->rx_num_ind_drop_no_buf +
1599 uc_fw_stat->rx_num_pkts_indicated);
Yun Parke59b3912015-11-09 13:19:06 -08001600 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001601 } else {
1602 HDD_IPA_LOG(LOGE, "INVALID REASON %d",
1603 hdd_ipa->stat_req_reason);
1604 }
1605 cdf_mem_free(op_msg);
1606}
1607
1608
1609/**
1610 * hdd_ipa_uc_offload_enable_disable() - wdi enable/disable notify to fw
1611 * @adapter: device adapter instance
1612 * @offload_type: MCC or SCC
1613 * @enable: TX offload enable or disable
1614 *
1615 * Return: none
1616 */
1617static void hdd_ipa_uc_offload_enable_disable(hdd_adapter_t *adapter,
1618 uint32_t offload_type, uint32_t enable)
1619{
1620 struct sir_ipa_offload_enable_disable ipa_offload_enable_disable;
1621
1622 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1623 * channel change indication. Since these indications are sent by lower
1624 * layer as SAP updates and IPA doesn't have to do anything for these
1625 * updates so ignoring!
1626 */
1627 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
1628 return;
1629
1630 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1631 * channel change indication. Since these indications are sent by lower
1632 * layer as SAP updates and IPA doesn't have to do anything for these
1633 * updates so ignoring!
1634 */
1635 if (adapter->ipa_context)
1636 return;
1637
1638 cdf_mem_zero(&ipa_offload_enable_disable,
1639 sizeof(ipa_offload_enable_disable));
1640 ipa_offload_enable_disable.offload_type = offload_type;
1641 ipa_offload_enable_disable.vdev_id = adapter->sessionId;
1642 ipa_offload_enable_disable.enable = enable;
1643
1644 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1645 "%s: offload_type=%d, vdev_id=%d, enable=%d", __func__,
1646 ipa_offload_enable_disable.offload_type,
1647 ipa_offload_enable_disable.vdev_id,
1648 ipa_offload_enable_disable.enable);
1649
1650 if (CDF_STATUS_SUCCESS !=
1651 sme_ipa_offload_enable_disable(WLAN_HDD_GET_HAL_CTX(adapter),
1652 adapter->sessionId, &ipa_offload_enable_disable)) {
1653 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1654 "%s: Failure to enable IPA offload \
1655 (offload_type=%d, vdev_id=%d, enable=%d)", __func__,
1656 ipa_offload_enable_disable.offload_type,
1657 ipa_offload_enable_disable.vdev_id,
1658 ipa_offload_enable_disable.enable);
1659 }
1660}
1661
1662/**
1663 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1664 * @work: uC OP work
1665 *
1666 * Return: None
1667 */
1668static void hdd_ipa_uc_fw_op_event_handler(struct work_struct *work)
1669{
1670 struct op_msg_type *msg;
1671 struct uc_op_work_struct *uc_op_work = container_of(work,
1672 struct uc_op_work_struct, work);
1673 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1674
1675 cds_ssr_protect(__func__);
1676
1677 msg = uc_op_work->msg;
1678 uc_op_work->msg = NULL;
1679 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1680 "%s, posted msg %d", __func__, msg->op_code);
1681
1682 hdd_ipa_uc_op_cb(msg, hdd_ipa->hdd_ctx);
1683
1684 cds_ssr_unprotect(__func__);
1685
1686 return;
1687}
1688
1689/**
1690 * hdd_ipa_uc_op_event_handler() - Adapter lookup
1691 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1692 * @op_msg: operation message received from firmware
1693 * @hdd_ctx: Global HDD context
1694 *
1695 * Return: None
1696 */
1697static void hdd_ipa_uc_op_event_handler(uint8_t *op_msg, void *hdd_ctx)
1698{
1699 struct hdd_ipa_priv *hdd_ipa;
1700 struct op_msg_type *msg;
1701 struct uc_op_work_struct *uc_op_work;
1702 CDF_STATUS status = CDF_STATUS_SUCCESS;
1703
1704 status = wlan_hdd_validate_context(hdd_ctx);
1705 if (0 != status) {
1706 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1707 goto end;
1708 }
1709
1710 msg = (struct op_msg_type *)op_msg;
1711 hdd_ipa = ((hdd_context_t *)hdd_ctx)->hdd_ipa;
1712
1713 if (unlikely(!hdd_ipa))
1714 goto end;
1715
1716 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1717 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s: Invalid OP Code (%d)",
1718 __func__, msg->op_code);
1719 goto end;
1720 }
1721
1722 uc_op_work = &hdd_ipa->uc_op_work[msg->op_code];
1723 if (uc_op_work->msg)
1724 /* When the same uC OPCODE is already pended, just return */
1725 goto end;
1726
1727 uc_op_work->msg = msg;
1728 schedule_work(&uc_op_work->work);
1729 return;
1730
1731end:
1732 cdf_mem_free(op_msg);
1733}
1734
1735/**
1736 * hdd_ipa_uc_ol_init() - Initialize IPA uC offload
1737 * @hdd_ctx: Global HDD context
1738 *
1739 * Return: CDF_STATUS
1740 */
1741static CDF_STATUS hdd_ipa_uc_ol_init(hdd_context_t *hdd_ctx)
1742{
1743 struct ipa_wdi_in_params pipe_in;
1744 struct ipa_wdi_out_params pipe_out;
1745 struct hdd_ipa_priv *ipa_ctxt = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1746 p_cds_contextType cds_ctx = hdd_ctx->pcds_context;
1747 uint8_t i;
1748
1749 cdf_mem_zero(&pipe_in, sizeof(struct ipa_wdi_in_params));
1750 cdf_mem_zero(&pipe_out, sizeof(struct ipa_wdi_out_params));
1751
1752 cdf_list_init(&ipa_ctxt->pending_event, 1000);
1753 cdf_mutex_init(&ipa_ctxt->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08001754 cdf_mutex_init(&ipa_ctxt->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001755
1756 /* TX PIPE */
1757 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1758 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
1759 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1760 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
1761 pipe_in.sys.ipa_ep_cfg.hdr.hdr_additional_const_len =
1762 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
1763 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1764 pipe_in.sys.client = IPA_CLIENT_WLAN1_CONS;
1765 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize;
1766 pipe_in.sys.priv = hdd_ctx->hdd_ipa;
1767 pipe_in.sys.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
1768 pipe_in.sys.notify = hdd_ipa_i2w_cb;
1769 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1770 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1771 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1772 pipe_in.sys.keep_ipa_awake = true;
1773 }
1774
Leo Chang3bc8fed2015-11-13 10:59:47 -08001775 pipe_in.u.dl.comp_ring_base_pa = ipa_ctxt->tx_comp_ring_base_paddr;
1776 pipe_in.u.dl.comp_ring_size =
1777 ipa_ctxt->tx_comp_ring_size * sizeof(cdf_dma_addr_t);
1778 pipe_in.u.dl.ce_ring_base_pa = ipa_ctxt->ce_sr_base_paddr;
1779 pipe_in.u.dl.ce_door_bell_pa = ipa_ctxt->ce_reg_paddr;
1780 pipe_in.u.dl.ce_ring_size = ipa_ctxt->ce_sr_ring_size;
1781 pipe_in.u.dl.num_tx_buffers = ipa_ctxt->tx_num_alloc_buffer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001782
1783 /* Connect WDI IPA PIPE */
1784 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
1785 /* Micro Controller Doorbell register */
Leo Chang3bc8fed2015-11-13 10:59:47 -08001786 ipa_ctxt->tx_comp_doorbell_paddr = pipe_out.uc_door_bell_pa;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001787 /* WLAN TX PIPE Handle */
1788 ipa_ctxt->tx_pipe_handle = pipe_out.clnt_hdl;
1789 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1790 "TX : CRBPA 0x%x, CRS %d, CERBPA 0x%x, CEDPA 0x%x,"
1791 " CERZ %d, NB %d, CDBPAD 0x%x",
1792 (unsigned int)pipe_in.u.dl.comp_ring_base_pa,
1793 pipe_in.u.dl.comp_ring_size,
1794 (unsigned int)pipe_in.u.dl.ce_ring_base_pa,
1795 (unsigned int)pipe_in.u.dl.ce_door_bell_pa,
1796 pipe_in.u.dl.ce_ring_size,
1797 pipe_in.u.dl.num_tx_buffers,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001798 (unsigned int)ipa_ctxt->tx_comp_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001799
1800 /* RX PIPE */
1801 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1802 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_RX_HDR_LEN;
1803 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
1804 pipe_in.sys.ipa_ep_cfg.hdr.hdr_metadata_reg_valid = 1;
1805 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1806 pipe_in.sys.client = IPA_CLIENT_WLAN1_PROD;
1807 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize +
1808 sizeof(struct sps_iovec);
1809 pipe_in.sys.notify = hdd_ipa_w2i_cb;
1810 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1811 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1812 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1813 pipe_in.sys.keep_ipa_awake = true;
1814 }
1815
Leo Chang3bc8fed2015-11-13 10:59:47 -08001816 pipe_in.u.ul.rdy_ring_base_pa = ipa_ctxt->rx_rdy_ring_base_paddr;
1817 pipe_in.u.ul.rdy_ring_size = ipa_ctxt->rx_rdy_ring_size;
1818 pipe_in.u.ul.rdy_ring_rp_pa = ipa_ctxt->rx_proc_done_idx_paddr;
1819 HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001820 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
Leo Chang3bc8fed2015-11-13 10:59:47 -08001821 ipa_ctxt->rx_ready_doorbell_paddr = pipe_out.uc_door_bell_pa;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001822 ipa_ctxt->rx_pipe_handle = pipe_out.clnt_hdl;
1823 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1824 "RX : RRBPA 0x%x, RRS %d, PDIPA 0x%x, RDY_DB_PAD 0x%x",
1825 (unsigned int)pipe_in.u.ul.rdy_ring_base_pa,
1826 pipe_in.u.ul.rdy_ring_size,
1827 (unsigned int)pipe_in.u.ul.rdy_ring_rp_pa,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001828 (unsigned int)ipa_ctxt->rx_ready_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001829
1830 ol_txrx_ipa_uc_set_doorbell_paddr(cds_ctx->pdev_txrx_ctx,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001831 ipa_ctxt->tx_comp_doorbell_paddr,
1832 ipa_ctxt->rx_ready_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001833
1834 ol_txrx_ipa_uc_register_op_cb(cds_ctx->pdev_txrx_ctx,
1835 hdd_ipa_uc_op_event_handler, (void *)hdd_ctx);
1836
1837 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
1838 cnss_init_work(&ipa_ctxt->uc_op_work[i].work,
1839 hdd_ipa_uc_fw_op_event_handler);
1840 ipa_ctxt->uc_op_work[i].msg = NULL;
1841 }
1842
1843 return CDF_STATUS_SUCCESS;
1844}
1845
Leo Change3e49442015-10-26 20:07:13 -07001846/**
1847 * hdd_ipa_uc_force_pipe_shutdown() - Force shutdown IPA pipe
1848 * @hdd_ctx: hdd main context
1849 *
1850 * Force shutdown IPA pipe
1851 * Independent of FW pipe status, IPA pipe shutdonw progress
1852 * in case, any STA does not leave properly, IPA HW pipe should cleaned up
1853 * independent from FW pipe status
1854 *
1855 * Return: NONE
1856 */
1857void hdd_ipa_uc_force_pipe_shutdown(hdd_context_t *hdd_ctx)
1858{
1859 struct hdd_ipa_priv *hdd_ipa;
1860
1861 if (!hdd_ipa_is_enabled(hdd_ctx) || !hdd_ctx->hdd_ipa)
1862 return;
1863
1864 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1865 if (false == hdd_ipa->ipa_pipes_down) {
1866 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1867 "IPA pipes are not down yet, force shutdown");
1868 hdd_ipa_uc_disable_pipes(hdd_ipa);
1869 } else {
1870 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1871 "IPA pipes are down, do nothing");
1872 }
1873
1874 return;
1875}
1876
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001877/**
1878 * hdd_ipa_uc_ssr_deinit() - handle ipa deinit for SSR
1879 *
1880 * Deinit basic IPA UC host side to be in sync reloaded FW during
1881 * SSR
1882 *
1883 * Return: 0 - Success
1884 */
1885int hdd_ipa_uc_ssr_deinit(void)
1886{
1887 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1888 int idx;
1889 struct hdd_ipa_iface_context *iface_context;
1890
Leo Chang3bc8fed2015-11-13 10:59:47 -08001891 if ((!hdd_ipa) || (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001892 return 0;
1893
1894 /* Clean up HDD IPA interfaces */
1895 for (idx = 0; (hdd_ipa->num_iface > 0) &&
1896 (idx < HDD_IPA_MAX_IFACE); idx++) {
1897 iface_context = &hdd_ipa->iface_context[idx];
1898 if (iface_context && iface_context->adapter)
1899 hdd_ipa_cleanup_iface(iface_context);
1900 }
1901
1902 /* After SSR, wlan driver reloads FW again. But we need to protect
1903 * IPA submodule during SSR transient state. So deinit basic IPA
1904 * UC host side to be in sync with reloaded FW during SSR
1905 */
Yun Parkf7dc8cd2015-11-17 15:25:12 -08001906 if (!hdd_ipa->ipa_pipes_down)
1907 hdd_ipa_uc_disable_pipes(hdd_ipa);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001908
Leo Chang3bc8fed2015-11-13 10:59:47 -08001909 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001910 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1911 hdd_ipa->assoc_stas_map[idx].is_reserved = false;
1912 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
1913 }
Leo Chang3bc8fed2015-11-13 10:59:47 -08001914 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001915
1916 /* Full IPA driver cleanup not required since wlan driver is now
1917 * unloaded and reloaded after SSR.
1918 */
1919 return 0;
1920}
1921
1922/**
1923 * hdd_ipa_uc_ssr_reinit() - handle ipa reinit after SSR
1924 *
1925 * Init basic IPA UC host side to be in sync with reloaded FW after
1926 * SSR to resume IPA UC operations
1927 *
1928 * Return: 0 - Success
1929 */
1930int hdd_ipa_uc_ssr_reinit(void)
1931{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001932
1933 /* After SSR is complete, IPA UC can resume operation. But now wlan
1934 * driver will be unloaded and reloaded, which takes care of IPA cleanup
1935 * and initialization. This is a placeholder func if IPA has to resume
1936 * operations without driver reload.
1937 */
1938 return 0;
1939}
Leo Chang3bc8fed2015-11-13 10:59:47 -08001940
1941/**
1942 * hdd_ipa_tx_packet_ipa() - send packet to IPA
1943 * @hdd_ctx: Global HDD context
1944 * @skb: skb sent to IPA
1945 * @session_id: send packet instance session id
1946 *
1947 * Send TX packet which generated by system to IPA.
1948 * This routine only will be used for function verification
1949 *
1950 * Return: NULL packet sent to IPA properly
1951 * NULL invalid packet drop
1952 * skb packet not sent to IPA. legacy data path should handle
1953 */
1954struct sk_buff *hdd_ipa_tx_packet_ipa(hdd_context_t *hdd_ctx,
1955 struct sk_buff *skb, uint8_t session_id)
Leo Change3e49442015-10-26 20:07:13 -07001956{
Leo Chang3bc8fed2015-11-13 10:59:47 -08001957 struct ipa_header *ipa_header;
1958 struct frag_header *frag_header;
1959
1960 if (!hdd_ipa_uc_is_enabled(hdd_ctx))
1961 return skb;
1962
1963 ipa_header = (struct ipa_header *) skb_push(skb,
1964 sizeof(struct ipa_header));
1965 if (!ipa_header) {
1966 /* No headroom, legacy */
1967 return skb;
1968 }
1969 memset(ipa_header, 0, sizeof(*ipa_header));
1970 ipa_header->vdev_id = 0;
1971
1972 frag_header = (struct frag_header *) skb_push(skb,
1973 sizeof(struct frag_header));
1974 if (!frag_header) {
1975 /* No headroom, drop */
1976 kfree_skb(skb);
1977 return NULL;
1978 }
1979 memset(frag_header, 0, sizeof(*frag_header));
1980 frag_header->length = skb->len - sizeof(struct frag_header)
1981 - sizeof(struct ipa_header);
1982
1983 ipa_tx_dp(IPA_CLIENT_WLAN1_CONS, skb, NULL);
1984 return NULL;
Leo Change3e49442015-10-26 20:07:13 -07001985}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001986
1987/**
1988 * hdd_ipa_wake_lock_timer_func() - Wake lock work handler
1989 * @work: scheduled work
1990 *
1991 * When IPA resources are released in hdd_ipa_rm_try_release() we do
1992 * not want to immediately release the wake lock since the system
1993 * would then potentially try to suspend when there is a healthy data
1994 * rate. Deferred work is scheduled and this function handles the
1995 * work. When this function is called, if the IPA resource is still
1996 * released then we release the wake lock.
1997 *
1998 * Return: None
1999 */
2000static void hdd_ipa_wake_lock_timer_func(struct work_struct *work)
2001{
2002 struct hdd_ipa_priv *hdd_ipa = container_of(to_delayed_work(work),
2003 struct hdd_ipa_priv,
2004 wake_lock_work);
2005
2006 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2007
2008 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED)
2009 goto end;
2010
2011 hdd_ipa->wake_lock_released = true;
2012 cdf_wake_lock_release(&hdd_ipa->wake_lock,
2013 WIFI_POWER_EVENT_WAKELOCK_IPA);
2014
2015end:
2016 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2017}
2018
2019/**
2020 * hdd_ipa_rm_request() - Request resource from IPA
2021 * @hdd_ipa: Global HDD IPA context
2022 *
2023 * Return: 0 on success, negative errno on error
2024 */
2025static int hdd_ipa_rm_request(struct hdd_ipa_priv *hdd_ipa)
2026{
2027 int ret = 0;
2028
2029 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2030 return 0;
2031
2032 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2033
2034 switch (hdd_ipa->rm_state) {
2035 case HDD_IPA_RM_GRANTED:
2036 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2037 return 0;
2038 case HDD_IPA_RM_GRANT_PENDING:
2039 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2040 return -EINPROGRESS;
2041 case HDD_IPA_RM_RELEASED:
2042 hdd_ipa->rm_state = HDD_IPA_RM_GRANT_PENDING;
2043 break;
2044 }
2045
2046 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2047
2048 ret = ipa_rm_inactivity_timer_request_resource(
2049 IPA_RM_RESOURCE_WLAN_PROD);
2050
2051 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2052 if (ret == 0) {
2053 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2054 hdd_ipa->stats.num_rm_grant_imm++;
2055 }
2056
2057 cancel_delayed_work(&hdd_ipa->wake_lock_work);
2058 if (hdd_ipa->wake_lock_released) {
2059 cdf_wake_lock_acquire(&hdd_ipa->wake_lock,
2060 WIFI_POWER_EVENT_WAKELOCK_IPA);
2061 hdd_ipa->wake_lock_released = false;
2062 }
2063 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2064
2065 return ret;
2066}
2067
2068/**
2069 * hdd_ipa_rm_try_release() - Attempt to release IPA resource
2070 * @hdd_ipa: Global HDD IPA context
2071 *
2072 * Return: 0 if resources released, negative errno otherwise
2073 */
2074static int hdd_ipa_rm_try_release(struct hdd_ipa_priv *hdd_ipa)
2075{
2076 int ret = 0;
2077
2078 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2079 return 0;
2080
2081 if (atomic_read(&hdd_ipa->tx_ref_cnt))
2082 return -EAGAIN;
2083
2084 spin_lock_bh(&hdd_ipa->q_lock);
2085 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
2086 (hdd_ipa->pending_hw_desc_cnt || hdd_ipa->pend_q_cnt)) {
2087 spin_unlock_bh(&hdd_ipa->q_lock);
2088 return -EAGAIN;
2089 }
2090 spin_unlock_bh(&hdd_ipa->q_lock);
2091
2092 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2093
2094 if (!cdf_nbuf_is_queue_empty(&hdd_ipa->pm_queue_head)) {
2095 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2096 return -EAGAIN;
2097 }
2098 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2099
2100 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2101 switch (hdd_ipa->rm_state) {
2102 case HDD_IPA_RM_GRANTED:
2103 break;
2104 case HDD_IPA_RM_GRANT_PENDING:
2105 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2106 return -EINPROGRESS;
2107 case HDD_IPA_RM_RELEASED:
2108 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2109 return 0;
2110 }
2111
2112 /* IPA driver returns immediately so set the state here to avoid any
2113 * race condition.
2114 */
2115 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2116 hdd_ipa->stats.num_rm_release++;
2117 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2118
2119 ret =
2120 ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_WLAN_PROD);
2121
2122 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2123 if (unlikely(ret != 0)) {
2124 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2125 WARN_ON(1);
2126 }
2127
2128 /*
2129 * If wake_lock is released immediately, kernel would try to suspend
2130 * immediately as well, Just avoid ping-pong between suspend-resume
2131 * while there is healthy amount of data transfer going on by
2132 * releasing the wake_lock after some delay.
2133 */
2134 schedule_delayed_work(&hdd_ipa->wake_lock_work,
2135 msecs_to_jiffies
2136 (HDD_IPA_RX_INACTIVITY_MSEC_DELAY));
2137
2138 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2139
2140 return ret;
2141}
2142
2143/**
2144 * hdd_ipa_rm_notify() - IPA resource manager notifier callback
2145 * @user_data: user data registered with IPA
2146 * @event: the IPA resource manager event that occurred
2147 * @data: the data associated with the event
2148 *
2149 * Return: None
2150 */
2151static void hdd_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
2152 unsigned long data)
2153{
2154 struct hdd_ipa_priv *hdd_ipa = user_data;
2155
2156 if (unlikely(!hdd_ipa))
2157 return;
2158
2159 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2160 return;
2161
2162 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Evt: %d", event);
2163
2164 switch (event) {
2165 case IPA_RM_RESOURCE_GRANTED:
2166 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2167 /* RM Notification comes with ISR context
2168 * it should be serialized into work queue to avoid
2169 * ISR sleep problem
2170 */
2171 hdd_ipa->uc_rm_work.event = event;
2172 schedule_work(&hdd_ipa->uc_rm_work.work);
2173 break;
2174 }
2175 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2176 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2177 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2178 hdd_ipa->stats.num_rm_grant++;
2179 break;
2180
2181 case IPA_RM_RESOURCE_RELEASED:
2182 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "RM Release");
2183 hdd_ipa->resource_unloading = false;
2184 break;
2185
2186 default:
2187 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Unknown RM Evt: %d", event);
2188 break;
2189 }
2190}
2191
2192/**
2193 * hdd_ipa_rm_cons_release() - WLAN consumer resource release handler
2194 *
2195 * Callback function registered with IPA that is called when IPA wants
2196 * to release the WLAN consumer resource
2197 *
2198 * Return: 0 if the request is granted, negative errno otherwise
2199 */
2200static int hdd_ipa_rm_cons_release(void)
2201{
2202 return 0;
2203}
2204
2205/**
2206 * hdd_ipa_rm_cons_request() - WLAN consumer resource request handler
2207 *
2208 * Callback function registered with IPA that is called when IPA wants
2209 * to access the WLAN consumer resource
2210 *
2211 * Return: 0 if the request is granted, negative errno otherwise
2212 */
2213static int hdd_ipa_rm_cons_request(void)
2214{
Yun Park4d8b60a2015-10-22 13:59:32 -07002215 int ret = 0;
2216
2217 if (ghdd_ipa->resource_loading) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002218 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
Yun Park4d8b60a2015-10-22 13:59:32 -07002219 "%s: IPA resource loading in progress",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002220 __func__);
2221 ghdd_ipa->pending_cons_req = true;
Yun Park4d8b60a2015-10-22 13:59:32 -07002222 ret = -EINPROGRESS;
2223 } else if (ghdd_ipa->resource_unloading) {
2224 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
2225 "%s: IPA resource unloading in progress",
2226 __func__);
2227 ghdd_ipa->pending_cons_req = true;
2228 ret = -EPERM;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002229 }
Yun Park4d8b60a2015-10-22 13:59:32 -07002230
2231 return ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002232}
2233
2234/**
2235 * hdd_ipa_set_perf_level() - Set IPA performance level
2236 * @hdd_ctx: Global HDD context
2237 * @tx_packets: Number of packets transmitted in the last sample period
2238 * @rx_packets: Number of packets received in the last sample period
2239 *
2240 * Return: 0 on success, negative errno on error
2241 */
2242int hdd_ipa_set_perf_level(hdd_context_t *hdd_ctx, uint64_t tx_packets,
2243 uint64_t rx_packets)
2244{
2245 uint32_t next_cons_bw, next_prod_bw;
2246 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2247 struct ipa_rm_perf_profile profile;
2248 int ret;
2249
2250 if ((!hdd_ipa_is_enabled(hdd_ctx)) ||
2251 (!hdd_ipa_is_clk_scaling_enabled(hdd_ctx)))
2252 return 0;
2253
2254 memset(&profile, 0, sizeof(profile));
2255
2256 if (tx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2257 next_cons_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2258 else if (tx_packets >
2259 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2260 next_cons_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2261 else
2262 next_cons_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2263
2264 if (rx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2265 next_prod_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2266 else if (rx_packets >
2267 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2268 next_prod_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2269 else
2270 next_prod_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2271
2272 HDD_IPA_LOG(LOG1,
2273 "CONS perf curr: %d, next: %d",
2274 hdd_ipa->curr_cons_bw, next_cons_bw);
2275 HDD_IPA_LOG(LOG1,
2276 "PROD perf curr: %d, next: %d",
2277 hdd_ipa->curr_prod_bw, next_prod_bw);
2278
2279 if (hdd_ipa->curr_cons_bw != next_cons_bw) {
2280 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2281 "Requesting CONS perf curr: %d, next: %d",
2282 hdd_ipa->curr_cons_bw, next_cons_bw);
2283 profile.max_supported_bandwidth_mbps = next_cons_bw;
2284 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_CONS,
2285 &profile);
2286 if (ret) {
2287 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2288 "RM CONS set perf profile failed: %d", ret);
2289
2290 return ret;
2291 }
2292 hdd_ipa->curr_cons_bw = next_cons_bw;
2293 hdd_ipa->stats.num_cons_perf_req++;
2294 }
2295
2296 if (hdd_ipa->curr_prod_bw != next_prod_bw) {
2297 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2298 "Requesting PROD perf curr: %d, next: %d",
2299 hdd_ipa->curr_prod_bw, next_prod_bw);
2300 profile.max_supported_bandwidth_mbps = next_prod_bw;
2301 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_PROD,
2302 &profile);
2303 if (ret) {
2304 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2305 "RM PROD set perf profile failed: %d", ret);
2306 return ret;
2307 }
2308 hdd_ipa->curr_prod_bw = next_prod_bw;
2309 hdd_ipa->stats.num_prod_perf_req++;
2310 }
2311
2312 return 0;
2313}
2314
2315/**
2316 * hdd_ipa_setup_rm() - Setup IPA resource management
2317 * @hdd_ipa: Global HDD IPA context
2318 *
2319 * Return: 0 on success, negative errno on error
2320 */
2321static int hdd_ipa_setup_rm(struct hdd_ipa_priv *hdd_ipa)
2322{
2323 struct ipa_rm_create_params create_params = { 0 };
2324 int ret;
2325
2326 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2327 return 0;
2328
2329 cnss_init_work(&hdd_ipa->uc_rm_work.work, hdd_ipa_uc_rm_notify_defer);
2330 memset(&create_params, 0, sizeof(create_params));
2331 create_params.name = IPA_RM_RESOURCE_WLAN_PROD;
2332 create_params.reg_params.user_data = hdd_ipa;
2333 create_params.reg_params.notify_cb = hdd_ipa_rm_notify;
2334 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2335
2336 ret = ipa_rm_create_resource(&create_params);
2337 if (ret) {
2338 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2339 "Create RM resource failed: %d", ret);
2340 goto setup_rm_fail;
2341 }
2342
2343 memset(&create_params, 0, sizeof(create_params));
2344 create_params.name = IPA_RM_RESOURCE_WLAN_CONS;
2345 create_params.request_resource = hdd_ipa_rm_cons_request;
2346 create_params.release_resource = hdd_ipa_rm_cons_release;
2347 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2348
2349 ret = ipa_rm_create_resource(&create_params);
2350 if (ret) {
2351 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2352 "Create RM CONS resource failed: %d", ret);
2353 goto delete_prod;
2354 }
2355
2356 ipa_rm_add_dependency(IPA_RM_RESOURCE_WLAN_PROD,
2357 IPA_RM_RESOURCE_APPS_CONS);
2358
2359 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WLAN_PROD,
2360 HDD_IPA_RX_INACTIVITY_MSEC_DELAY);
2361 if (ret) {
2362 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Timer init failed: %d",
2363 ret);
2364 goto timer_init_failed;
2365 }
2366
2367 /* Set the lowest bandwidth to start with */
2368 ret = hdd_ipa_set_perf_level(hdd_ipa->hdd_ctx, 0, 0);
2369
2370 if (ret) {
2371 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2372 "Set perf level failed: %d", ret);
2373 goto set_perf_failed;
2374 }
2375
2376 cdf_wake_lock_init(&hdd_ipa->wake_lock, "wlan_ipa");
2377#ifdef CONFIG_CNSS
2378 cnss_init_delayed_work(&hdd_ipa->wake_lock_work,
2379 hdd_ipa_wake_lock_timer_func);
2380#else
2381 INIT_DELAYED_WORK(&hdd_ipa->wake_lock_work,
2382 hdd_ipa_wake_lock_timer_func);
2383#endif
2384 cdf_spinlock_init(&hdd_ipa->rm_lock);
2385 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2386 hdd_ipa->wake_lock_released = true;
2387 atomic_set(&hdd_ipa->tx_ref_cnt, 0);
2388
2389 return ret;
2390
2391set_perf_failed:
2392 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2393
2394timer_init_failed:
2395 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2396
2397delete_prod:
2398 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2399
2400setup_rm_fail:
2401 return ret;
2402}
2403
2404/**
2405 * hdd_ipa_destroy_rm_resource() - Destroy IPA resources
2406 * @hdd_ipa: Global HDD IPA context
2407 *
2408 * Destroys all resources associated with the IPA resource manager
2409 *
2410 * Return: None
2411 */
2412static void hdd_ipa_destroy_rm_resource(struct hdd_ipa_priv *hdd_ipa)
2413{
2414 int ret;
2415
2416 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2417 return;
2418
2419 cancel_delayed_work_sync(&hdd_ipa->wake_lock_work);
2420 cdf_wake_lock_destroy(&hdd_ipa->wake_lock);
2421
2422#ifdef WLAN_OPEN_SOURCE
2423 cancel_work_sync(&hdd_ipa->uc_rm_work.work);
2424#endif
2425 cdf_spinlock_destroy(&hdd_ipa->rm_lock);
2426
2427 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2428
2429 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2430 if (ret)
2431 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2432 "RM PROD resource delete failed %d", ret);
2433
2434 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2435 if (ret)
2436 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2437 "RM CONS resource delete failed %d", ret);
2438}
2439
2440/**
2441 * hdd_ipa_send_skb_to_network() - Send skb to kernel
2442 * @skb: network buffer
2443 * @adapter: network adapter
2444 *
2445 * Called when a network buffer is received which should not be routed
2446 * to the IPA module.
2447 *
2448 * Return: None
2449 */
2450static void hdd_ipa_send_skb_to_network(cdf_nbuf_t skb,
2451 hdd_adapter_t *adapter)
2452{
2453 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2454 unsigned int cpu_index;
2455
2456 if (!adapter || adapter->magic != WLAN_HDD_ADAPTER_MAGIC) {
2457 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_LOW, "Invalid adapter: 0x%p",
2458 adapter);
2459 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2460 cdf_nbuf_free(skb);
2461 return;
2462 }
2463
2464 if (hdd_ipa->hdd_ctx->isUnloadInProgress) {
2465 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2466 cdf_nbuf_free(skb);
2467 return;
2468 }
2469
2470 skb->destructor = hdd_ipa_uc_rt_debug_destructor;
2471 skb->dev = adapter->dev;
2472 skb->protocol = eth_type_trans(skb, skb->dev);
2473 skb->ip_summed = CHECKSUM_NONE;
2474
2475 cpu_index = wlan_hdd_get_cpu();
2476
2477 ++adapter->hdd_stats.hddTxRxStats.rxPackets[cpu_index];
2478 if (netif_rx_ni(skb) == NET_RX_SUCCESS)
2479 ++adapter->hdd_stats.hddTxRxStats.rxDelivered[cpu_index];
2480 else
2481 ++adapter->hdd_stats.hddTxRxStats.rxRefused[cpu_index];
2482
2483 HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa);
2484 adapter->dev->last_rx = jiffies;
2485}
2486
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002487/**
2488 * hdd_ipa_w2i_cb() - WLAN to IPA callback handler
2489 * @priv: pointer to private data registered with IPA (we register a
2490 * pointer to the global IPA context)
2491 * @evt: the IPA event which triggered the callback
2492 * @data: data associated with the event
2493 *
2494 * Return: None
2495 */
2496static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
2497 unsigned long data)
2498{
2499 struct hdd_ipa_priv *hdd_ipa = NULL;
2500 hdd_adapter_t *adapter = NULL;
2501 cdf_nbuf_t skb;
2502 uint8_t iface_id;
2503 uint8_t session_id;
2504 struct hdd_ipa_iface_context *iface_context;
2505 cdf_nbuf_t copy;
2506 uint8_t fw_desc;
2507 int ret;
2508
2509 hdd_ipa = (struct hdd_ipa_priv *)priv;
2510
2511 switch (evt) {
2512 case IPA_RECEIVE:
2513 skb = (cdf_nbuf_t) data;
2514 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2515 session_id = (uint8_t)skb->cb[0];
2516 iface_id = vdev_to_iface[session_id];
2517 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2518 "IPA_RECEIVE: session_id=%u, iface_id=%u",
2519 session_id, iface_id);
2520 } else {
2521 iface_id = HDD_IPA_GET_IFACE_ID(skb->data);
2522 }
2523
2524 if (iface_id >= HDD_IPA_MAX_IFACE) {
2525 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2526 "IPA_RECEIVE: Invalid iface_id: %u",
2527 iface_id);
2528 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_INFO_HIGH,
2529 "w2i -- skb", skb->data, 8);
2530 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2531 cdf_nbuf_free(skb);
2532 return;
2533 }
2534
2535 iface_context = &hdd_ipa->iface_context[iface_id];
2536 adapter = iface_context->adapter;
2537
2538 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG,
2539 "w2i -- skb", skb->data, 8);
2540 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2541 hdd_ipa->stats.num_rx_excep++;
2542 skb_pull(skb, HDD_IPA_UC_WLAN_CLD_HDR_LEN);
2543 } else {
2544 skb_pull(skb, HDD_IPA_WLAN_CLD_HDR_LEN);
2545 }
2546
2547 iface_context->stats.num_rx_ipa_excep++;
2548
2549 /* Disable to forward Intra-BSS Rx packets when
2550 * ap_isolate=1 in hostapd.conf
2551 */
2552 if (adapter->sessionCtx.ap.apDisableIntraBssFwd) {
2553 /*
2554 * When INTRA_BSS_FWD_OFFLOAD is enabled, FW will send
2555 * all Rx packets to IPA uC, which need to be forwarded
2556 * to other interface.
2557 * And, IPA driver will send back to WLAN host driver
2558 * through exception pipe with fw_desc field set by FW.
2559 * Here we are checking fw_desc field for FORWARD bit
2560 * set, and forward to Tx. Then copy to kernel stack
2561 * only when DISCARD bit is not set.
2562 */
2563 fw_desc = (uint8_t)skb->cb[1];
2564
Leo Chang3bc8fed2015-11-13 10:59:47 -08002565 if (fw_desc & HDD_IPA_FW_RX_DESC_FORWARD_M) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002566 HDD_IPA_LOG(
2567 CDF_TRACE_LEVEL_DEBUG,
2568 "Forward packet to Tx (fw_desc=%d)",
2569 fw_desc);
2570 copy = cdf_nbuf_copy(skb);
2571 if (copy) {
2572 hdd_ipa->ipa_tx_forward++;
2573 ret = hdd_softap_hard_start_xmit(
2574 (struct sk_buff *)copy,
2575 adapter->dev);
2576 if (ret) {
2577 HDD_IPA_LOG(
2578 CDF_TRACE_LEVEL_DEBUG,
2579 "Forward packet tx fail");
2580 hdd_ipa->stats.
2581 num_tx_bcmc_err++;
2582 } else {
2583 hdd_ipa->stats.num_tx_bcmc++;
2584 }
2585 }
2586 }
Mahesh Kumar Kalikot Veetil221dc672015-11-06 14:27:28 -08002587
Leo Chang3bc8fed2015-11-13 10:59:47 -08002588 if (fw_desc & HDD_IPA_FW_RX_DESC_DISCARD_M) {
Mahesh Kumar Kalikot Veetil221dc672015-11-06 14:27:28 -08002589 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2590 hdd_ipa->ipa_rx_discard++;
2591 cdf_nbuf_free(skb);
2592 break;
2593 }
2594
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002595 } else {
2596 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2597 "Intra-BSS FWD is disabled-skip forward to Tx");
2598 }
2599
2600 hdd_ipa_send_skb_to_network(skb, adapter);
2601 break;
2602
2603 default:
2604 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2605 "w2i cb wrong event: 0x%x", evt);
2606 return;
2607 }
2608}
2609
2610/**
2611 * hdd_ipa_nbuf_cb() - IPA TX complete callback
2612 * @skb: packet buffer which was transmitted
2613 *
2614 * Return: None
2615 */
2616static void hdd_ipa_nbuf_cb(cdf_nbuf_t skb)
2617{
2618 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2619
2620 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "%lx", NBUF_OWNER_PRIV_DATA(skb));
2621 ipa_free_skb((struct ipa_rx_data *)NBUF_OWNER_PRIV_DATA(skb));
2622
2623 hdd_ipa->stats.num_tx_comp_cnt++;
2624
2625 atomic_dec(&hdd_ipa->tx_ref_cnt);
2626
2627 hdd_ipa_rm_try_release(hdd_ipa);
2628}
2629
2630/**
2631 * hdd_ipa_send_pkt_to_tl() - Send an IPA packet to TL
2632 * @iface_context: interface-specific IPA context
2633 * @ipa_tx_desc: packet data descriptor
2634 *
2635 * Return: None
2636 */
2637static void hdd_ipa_send_pkt_to_tl(
2638 struct hdd_ipa_iface_context *iface_context,
2639 struct ipa_rx_data *ipa_tx_desc)
2640{
2641 struct hdd_ipa_priv *hdd_ipa = iface_context->hdd_ipa;
2642 uint8_t interface_id;
2643 hdd_adapter_t *adapter = NULL;
2644 cdf_nbuf_t skb;
2645
2646 cdf_spin_lock_bh(&iface_context->interface_lock);
2647 adapter = iface_context->adapter;
2648 if (!adapter) {
2649 HDD_IPA_LOG(CDF_TRACE_LEVEL_WARN, "Interface Down");
2650 ipa_free_skb(ipa_tx_desc);
2651 iface_context->stats.num_tx_drop++;
2652 cdf_spin_unlock_bh(&iface_context->interface_lock);
2653 hdd_ipa_rm_try_release(hdd_ipa);
2654 return;
2655 }
2656
2657 /*
2658 * During CAC period, data packets shouldn't be sent over the air so
2659 * drop all the packets here
2660 */
2661 if (WLAN_HDD_GET_AP_CTX_PTR(adapter)->dfs_cac_block_tx) {
2662 ipa_free_skb(ipa_tx_desc);
2663 cdf_spin_unlock_bh(&iface_context->interface_lock);
2664 iface_context->stats.num_tx_cac_drop++;
2665 hdd_ipa_rm_try_release(hdd_ipa);
2666 return;
2667 }
2668
2669 interface_id = adapter->sessionId;
2670 ++adapter->stats.tx_packets;
2671
2672 cdf_spin_unlock_bh(&iface_context->interface_lock);
2673
2674 skb = ipa_tx_desc->skb;
2675
2676 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2677 NBUF_OWNER_ID(skb) = IPA_NBUF_OWNER_ID;
2678 NBUF_CALLBACK_FN(skb) = hdd_ipa_nbuf_cb;
2679 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2680 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr
2681 + HDD_IPA_WLAN_FRAG_HEADER
2682 + HDD_IPA_WLAN_IPA_HEADER;
2683 ipa_tx_desc->skb->len -=
2684 HDD_IPA_WLAN_FRAG_HEADER + HDD_IPA_WLAN_IPA_HEADER;
2685 } else
2686 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr;
2687
2688 NBUF_OWNER_PRIV_DATA(skb) = (unsigned long)ipa_tx_desc;
2689
2690 adapter->stats.tx_bytes += ipa_tx_desc->skb->len;
2691
2692 skb = ol_tx_send_ipa_data_frame(iface_context->tl_context,
2693 ipa_tx_desc->skb);
2694 if (skb) {
2695 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "TLSHIM tx fail");
2696 ipa_free_skb(ipa_tx_desc);
2697 iface_context->stats.num_tx_err++;
2698 hdd_ipa_rm_try_release(hdd_ipa);
2699 return;
2700 }
2701
2702 atomic_inc(&hdd_ipa->tx_ref_cnt);
2703
2704 iface_context->stats.num_tx++;
2705
2706}
2707
2708/**
2709 * hdd_ipa_pm_send_pkt_to_tl() - Send queued packets to TL
2710 * @work: pointer to the scheduled work
2711 *
2712 * Called during PM resume to send packets to TL which were queued
2713 * while host was in the process of suspending.
2714 *
2715 * Return: None
2716 */
2717static void hdd_ipa_pm_send_pkt_to_tl(struct work_struct *work)
2718{
2719 struct hdd_ipa_priv *hdd_ipa = container_of(work,
2720 struct hdd_ipa_priv,
2721 pm_work);
2722 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2723 cdf_nbuf_t skb;
2724 uint32_t dequeued = 0;
2725
2726 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2727
2728 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
2729 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2730
2731 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2732
2733 dequeued++;
2734
2735 hdd_ipa_send_pkt_to_tl(pm_tx_cb->iface_context,
2736 pm_tx_cb->ipa_tx_desc);
2737
2738 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2739 }
2740
2741 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2742
2743 hdd_ipa->stats.num_tx_dequeued += dequeued;
2744 if (dequeued > hdd_ipa->stats.num_max_pm_queue)
2745 hdd_ipa->stats.num_max_pm_queue = dequeued;
2746}
2747
2748/**
2749 * hdd_ipa_i2w_cb() - IPA to WLAN callback
2750 * @priv: pointer to private data registered with IPA (we register a
2751 * pointer to the interface-specific IPA context)
2752 * @evt: the IPA event which triggered the callback
2753 * @data: data associated with the event
2754 *
2755 * Return: None
2756 */
2757static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
2758 unsigned long data)
2759{
2760 struct hdd_ipa_priv *hdd_ipa = NULL;
2761 struct ipa_rx_data *ipa_tx_desc;
2762 struct hdd_ipa_iface_context *iface_context;
2763 cdf_nbuf_t skb;
2764 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2765 CDF_STATUS status = CDF_STATUS_SUCCESS;
2766
Mukul Sharma81661ae2015-10-30 20:26:02 +05302767 iface_context = (struct hdd_ipa_iface_context *)priv;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002768 if (evt != IPA_RECEIVE) {
2769 skb = (cdf_nbuf_t) data;
2770 dev_kfree_skb_any(skb);
2771 iface_context->stats.num_tx_drop++;
2772 return;
2773 }
2774
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002775 ipa_tx_desc = (struct ipa_rx_data *)data;
2776
2777 hdd_ipa = iface_context->hdd_ipa;
2778
2779 /*
2780 * When SSR is going on or driver is unloading, just drop the packets.
2781 * During SSR, there is no use in queueing the packets as STA has to
2782 * connect back any way
2783 */
2784 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
2785 if (0 != status) {
2786 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
2787 ipa_free_skb(ipa_tx_desc);
2788 iface_context->stats.num_tx_drop++;
2789 return;
2790 }
2791
2792 skb = ipa_tx_desc->skb;
2793
2794 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG, "i2w", skb->data, 8);
2795
2796 /*
2797 * If PROD resource is not requested here then there may be cases where
2798 * IPA hardware may be clocked down because of not having proper
2799 * dependency graph between WLAN CONS and modem PROD pipes. Adding the
2800 * workaround to request PROD resource while data is going over CONS
2801 * pipe to prevent the IPA hardware clockdown.
2802 */
2803 hdd_ipa_rm_request(hdd_ipa);
2804
2805 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2806 /*
2807 * If host is still suspended then queue the packets and these will be
2808 * drained later when resume completes. When packet is arrived here and
2809 * host is suspended, this means that there is already resume is in
2810 * progress.
2811 */
2812 if (hdd_ipa->suspended) {
2813 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2814 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2815 pm_tx_cb->iface_context = iface_context;
2816 pm_tx_cb->ipa_tx_desc = ipa_tx_desc;
2817 cdf_nbuf_queue_add(&hdd_ipa->pm_queue_head, skb);
2818 hdd_ipa->stats.num_tx_queued++;
2819
2820 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2821 return;
2822 }
2823
2824 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2825
2826 /*
2827 * If we are here means, host is not suspended, wait for the work queue
2828 * to finish.
2829 */
2830#ifdef WLAN_OPEN_SOURCE
2831 flush_work(&hdd_ipa->pm_work);
2832#endif
2833
2834 return hdd_ipa_send_pkt_to_tl(iface_context, ipa_tx_desc);
2835}
2836
2837/**
2838 * hdd_ipa_suspend() - Suspend IPA
2839 * @hdd_ctx: Global HDD context
2840 *
2841 * Return: 0 on success, negativer errno on error
2842 */
2843int hdd_ipa_suspend(hdd_context_t *hdd_ctx)
2844{
2845 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2846
2847 if (!hdd_ipa_is_enabled(hdd_ctx))
2848 return 0;
2849
2850 /*
2851 * Check if IPA is ready for suspend, If we are here means, there is
2852 * high chance that suspend would go through but just to avoid any race
2853 * condition after suspend started, these checks are conducted before
2854 * allowing to suspend.
2855 */
2856 if (atomic_read(&hdd_ipa->tx_ref_cnt))
2857 return -EAGAIN;
2858
2859 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2860
2861 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED) {
2862 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2863 return -EAGAIN;
2864 }
2865 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2866
2867 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2868 hdd_ipa->suspended = true;
2869 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2870
2871 return 0;
2872}
2873
2874/**
2875 * hdd_ipa_resume() - Resume IPA following suspend
2876 * hdd_ctx: Global HDD context
2877 *
2878 * Return: 0 on success, negative errno on error
2879 */
2880int hdd_ipa_resume(hdd_context_t *hdd_ctx)
2881{
2882 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2883
2884 if (!hdd_ipa_is_enabled(hdd_ctx))
2885 return 0;
2886
2887 schedule_work(&hdd_ipa->pm_work);
2888
2889 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2890 hdd_ipa->suspended = false;
2891 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2892
2893 return 0;
2894}
2895
2896/**
2897 * hdd_ipa_setup_sys_pipe() - Setup all IPA Sys pipes
2898 * @hdd_ipa: Global HDD IPA context
2899 *
2900 * Return: 0 on success, negative errno on error
2901 */
2902static int hdd_ipa_setup_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
2903{
2904 int i, ret = 0;
2905 struct ipa_sys_connect_params *ipa;
2906 uint32_t desc_fifo_sz;
2907
2908 /* The maximum number of descriptors that can be provided to a BAM at
2909 * once is one less than the total number of descriptors that the buffer
2910 * can contain.
2911 * If max_num_of_descriptors = (BAM_PIPE_DESCRIPTOR_FIFO_SIZE / sizeof
2912 * (SPS_DESCRIPTOR)), then (max_num_of_descriptors - 1) descriptors can
2913 * be provided at once.
2914 * Because of above requirement, one extra descriptor will be added to
2915 * make sure hardware always has one descriptor.
2916 */
2917 desc_fifo_sz = hdd_ipa->hdd_ctx->config->IpaDescSize
2918 + sizeof(struct sps_iovec);
2919
2920 /*setup TX pipes */
2921 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
2922 ipa = &hdd_ipa->sys_pipe[i].ipa_sys_params;
2923
2924 ipa->client = hdd_ipa_adapter_2_client[i].cons_client;
2925 ipa->desc_fifo_sz = desc_fifo_sz;
2926 ipa->priv = &hdd_ipa->iface_context[i];
2927 ipa->notify = hdd_ipa_i2w_cb;
2928
2929 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2930 ipa->ipa_ep_cfg.hdr.hdr_len =
2931 HDD_IPA_UC_WLAN_TX_HDR_LEN;
2932 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
2933 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
2934 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
2935 ipa->ipa_ep_cfg.hdr.hdr_additional_const_len =
2936 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
2937 ipa->ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
2938 } else {
2939 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
2940 }
2941 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
2942
2943 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2944 ipa->keep_ipa_awake = 1;
2945
2946 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
2947 if (ret) {
2948 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed for pipe %d"
2949 " ret: %d", i, ret);
2950 goto setup_sys_pipe_fail;
2951 }
2952 hdd_ipa->sys_pipe[i].conn_hdl_valid = 1;
2953 }
2954
2955 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2956 /*
2957 * Hard code it here, this can be extended if in case
2958 * PROD pipe is also per interface.
2959 * Right now there is no advantage of doing this.
2960 */
2961 hdd_ipa->prod_client = IPA_CLIENT_WLAN1_PROD;
2962
2963 ipa = &hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].ipa_sys_params;
2964
2965 ipa->client = hdd_ipa->prod_client;
2966
2967 ipa->desc_fifo_sz = desc_fifo_sz;
2968 ipa->priv = hdd_ipa;
2969 ipa->notify = hdd_ipa_w2i_cb;
2970
2971 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
2972 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_RX_HDR_LEN;
2973 ipa->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
2974 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
2975
2976 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2977 ipa->keep_ipa_awake = 1;
2978
2979 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
2980 if (ret) {
2981 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2982 "Failed for RX pipe: %d", ret);
2983 goto setup_sys_pipe_fail;
2984 }
2985 hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].conn_hdl_valid = 1;
2986 }
2987
2988 return ret;
2989
2990setup_sys_pipe_fail:
2991
2992 while (--i >= 0) {
2993 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].conn_hdl);
2994 cdf_mem_zero(&hdd_ipa->sys_pipe[i],
2995 sizeof(struct hdd_ipa_sys_pipe));
2996 }
2997
2998 return ret;
2999}
3000
3001/**
3002 * hdd_ipa_teardown_sys_pipe() - Tear down all IPA Sys pipes
3003 * @hdd_ipa: Global HDD IPA context
3004 *
3005 * Return: None
3006 */
3007static void hdd_ipa_teardown_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
3008{
3009 int ret = 0, i;
3010 for (i = 0; i < HDD_IPA_MAX_SYSBAM_PIPE; i++) {
3011 if (hdd_ipa->sys_pipe[i].conn_hdl_valid) {
3012 ret =
3013 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].
3014 conn_hdl);
3015 if (ret)
3016 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed: %d",
3017 ret);
3018
3019 hdd_ipa->sys_pipe[i].conn_hdl_valid = 0;
3020 }
3021 }
3022}
3023
3024/**
3025 * hdd_ipa_register_interface() - register IPA interface
3026 * @hdd_ipa: Global IPA context
3027 * @iface_context: Per-interface IPA context
3028 *
3029 * Return: 0 on success, negative errno on error
3030 */
3031static int hdd_ipa_register_interface(struct hdd_ipa_priv *hdd_ipa,
3032 struct hdd_ipa_iface_context
3033 *iface_context)
3034{
3035 struct ipa_tx_intf tx_intf;
3036 struct ipa_rx_intf rx_intf;
3037 struct ipa_ioc_tx_intf_prop *tx_prop = NULL;
3038 struct ipa_ioc_rx_intf_prop *rx_prop = NULL;
3039 char *ifname = iface_context->adapter->dev->name;
3040
3041 char ipv4_hdr_name[IPA_RESOURCE_NAME_MAX];
3042 char ipv6_hdr_name[IPA_RESOURCE_NAME_MAX];
3043
3044 int num_prop = 1;
3045 int ret = 0;
3046
3047 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx))
3048 num_prop++;
3049
3050 /* Allocate TX properties for TOS categories, 1 each for IPv4 & IPv6 */
3051 tx_prop =
3052 cdf_mem_malloc(sizeof(struct ipa_ioc_tx_intf_prop) * num_prop);
3053 if (!tx_prop) {
3054 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "tx_prop allocation failed");
3055 goto register_interface_fail;
3056 }
3057
3058 /* Allocate RX properties, 1 each for IPv4 & IPv6 */
3059 rx_prop =
3060 cdf_mem_malloc(sizeof(struct ipa_ioc_rx_intf_prop) * num_prop);
3061 if (!rx_prop) {
3062 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "rx_prop allocation failed");
3063 goto register_interface_fail;
3064 }
3065
3066 cdf_mem_zero(&tx_intf, sizeof(tx_intf));
3067 cdf_mem_zero(&rx_intf, sizeof(rx_intf));
3068
3069 snprintf(ipv4_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
3070 ifname, HDD_IPA_IPV4_NAME_EXT);
3071 snprintf(ipv6_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
3072 ifname, HDD_IPA_IPV6_NAME_EXT);
3073
3074 rx_prop[IPA_IP_v4].ip = IPA_IP_v4;
3075 rx_prop[IPA_IP_v4].src_pipe = iface_context->prod_client;
3076 rx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3077 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
3078
3079 /*
3080 * Interface ID is 3rd byte in the CLD header. Add the meta data and
3081 * mask to identify the interface in IPA hardware
3082 */
3083 rx_prop[IPA_IP_v4].attrib.meta_data =
3084 htonl(iface_context->adapter->sessionId << 16);
3085 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
3086
3087 rx_intf.num_props++;
3088 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3089 rx_prop[IPA_IP_v6].ip = IPA_IP_v6;
3090 rx_prop[IPA_IP_v6].src_pipe = iface_context->prod_client;
3091 rx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3092 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
3093 rx_prop[IPA_IP_v4].attrib.meta_data =
3094 htonl(iface_context->adapter->sessionId << 16);
3095 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
3096
3097 rx_intf.num_props++;
3098 }
3099
3100 tx_prop[IPA_IP_v4].ip = IPA_IP_v4;
3101 tx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3102 tx_prop[IPA_IP_v4].dst_pipe = IPA_CLIENT_WLAN1_CONS;
3103 tx_prop[IPA_IP_v4].alt_dst_pipe = iface_context->cons_client;
3104 strlcpy(tx_prop[IPA_IP_v4].hdr_name, ipv4_hdr_name,
3105 IPA_RESOURCE_NAME_MAX);
3106 tx_intf.num_props++;
3107
3108 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3109 tx_prop[IPA_IP_v6].ip = IPA_IP_v6;
3110 tx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3111 tx_prop[IPA_IP_v6].dst_pipe = IPA_CLIENT_WLAN1_CONS;
3112 tx_prop[IPA_IP_v6].alt_dst_pipe = iface_context->cons_client;
3113 strlcpy(tx_prop[IPA_IP_v6].hdr_name, ipv6_hdr_name,
3114 IPA_RESOURCE_NAME_MAX);
3115 tx_intf.num_props++;
3116 }
3117
3118 tx_intf.prop = tx_prop;
3119 rx_intf.prop = rx_prop;
3120
3121 /* Call the ipa api to register interface */
3122 ret = ipa_register_intf(ifname, &tx_intf, &rx_intf);
3123
3124register_interface_fail:
3125 cdf_mem_free(tx_prop);
3126 cdf_mem_free(rx_prop);
3127 return ret;
3128}
3129
3130/**
3131 * hdd_remove_ipa_header() - Remove a specific header from IPA
3132 * @name: Name of the header to be removed
3133 *
3134 * Return: None
3135 */
3136static void hdd_ipa_remove_header(char *name)
3137{
3138 struct ipa_ioc_get_hdr hdrlookup;
3139 int ret = 0, len;
3140 struct ipa_ioc_del_hdr *ipa_hdr;
3141
3142 cdf_mem_zero(&hdrlookup, sizeof(hdrlookup));
3143 strlcpy(hdrlookup.name, name, sizeof(hdrlookup.name));
3144 ret = ipa_get_hdr(&hdrlookup);
3145 if (ret) {
3146 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Hdr deleted already %s, %d",
3147 name, ret);
3148 return;
3149 }
3150
3151 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "hdl: 0x%x", hdrlookup.hdl);
3152 len = sizeof(struct ipa_ioc_del_hdr) + sizeof(struct ipa_hdr_del) * 1;
3153 ipa_hdr = (struct ipa_ioc_del_hdr *)cdf_mem_malloc(len);
3154 if (ipa_hdr == NULL) {
3155 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "ipa_hdr allocation failed");
3156 return;
3157 }
3158 ipa_hdr->num_hdls = 1;
3159 ipa_hdr->commit = 0;
3160 ipa_hdr->hdl[0].hdl = hdrlookup.hdl;
3161 ipa_hdr->hdl[0].status = -1;
3162 ret = ipa_del_hdr(ipa_hdr);
3163 if (ret != 0)
3164 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Delete header failed: %d",
3165 ret);
3166
3167 cdf_mem_free(ipa_hdr);
3168}
3169
3170/**
3171 * hdd_ipa_add_header_info() - Add IPA header for a given interface
3172 * @hdd_ipa: Global HDD IPA context
3173 * @iface_context: Interface-specific HDD IPA context
3174 * @mac_addr: Interface MAC address
3175 *
3176 * Return: 0 on success, negativer errno value on error
3177 */
3178static int hdd_ipa_add_header_info(struct hdd_ipa_priv *hdd_ipa,
3179 struct hdd_ipa_iface_context *iface_context,
3180 uint8_t *mac_addr)
3181{
3182 hdd_adapter_t *adapter = iface_context->adapter;
3183 char *ifname;
3184 struct ipa_ioc_add_hdr *ipa_hdr = NULL;
3185 int ret = -EINVAL;
3186 struct hdd_ipa_tx_hdr *tx_hdr = NULL;
3187 struct hdd_ipa_uc_tx_hdr *uc_tx_hdr = NULL;
3188
3189 ifname = adapter->dev->name;
3190
3191 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Add Partial hdr: %s, %pM",
3192 ifname, mac_addr);
3193
3194 /* dynamically allocate the memory to add the hdrs */
3195 ipa_hdr = cdf_mem_malloc(sizeof(struct ipa_ioc_add_hdr)
3196 + sizeof(struct ipa_hdr_add));
3197 if (!ipa_hdr) {
3198 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3199 "%s: ipa_hdr allocation failed", ifname);
3200 ret = -ENOMEM;
3201 goto end;
3202 }
3203
3204 ipa_hdr->commit = 0;
3205 ipa_hdr->num_hdrs = 1;
3206
3207 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3208 uc_tx_hdr = (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3209 memcpy(uc_tx_hdr, &ipa_uc_tx_hdr, HDD_IPA_UC_WLAN_TX_HDR_LEN);
3210 memcpy(uc_tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3211 uc_tx_hdr->ipa_hd.vdev_id = iface_context->adapter->sessionId;
3212 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
3213 "ifname=%s, vdev_id=%d",
3214 ifname, uc_tx_hdr->ipa_hd.vdev_id);
3215 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3216 ifname, HDD_IPA_IPV4_NAME_EXT);
3217 ipa_hdr->hdr[0].hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
3218 ipa_hdr->hdr[0].type = IPA_HDR_L2_ETHERNET_II;
3219 ipa_hdr->hdr[0].is_partial = 1;
3220 ipa_hdr->hdr[0].hdr_hdl = 0;
3221 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3222 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3223
3224 ret = ipa_add_hdr(ipa_hdr);
3225 } else {
3226 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3227
3228 /* Set the Source MAC */
3229 memcpy(tx_hdr, &ipa_tx_hdr, HDD_IPA_WLAN_TX_HDR_LEN);
3230 memcpy(tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3231
3232 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3233 ifname, HDD_IPA_IPV4_NAME_EXT);
3234 ipa_hdr->hdr[0].hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
3235 ipa_hdr->hdr[0].is_partial = 1;
3236 ipa_hdr->hdr[0].hdr_hdl = 0;
3237 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3238 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3239
3240 /* Set the type to IPV4 in the header */
3241 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IP);
3242
3243 ret = ipa_add_hdr(ipa_hdr);
3244 }
3245 if (ret) {
3246 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s IPv4 add hdr failed: %d",
3247 ifname, ret);
3248 goto end;
3249 }
3250
3251 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv4 hdr_hdl: 0x%x",
3252 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3253
3254 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3255 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3256 ifname, HDD_IPA_IPV6_NAME_EXT);
3257
3258 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3259 uc_tx_hdr =
3260 (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3261 uc_tx_hdr->eth.h_proto = cpu_to_be16(ETH_P_IPV6);
3262 } else {
3263 /* Set the type to IPV6 in the header */
3264 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3265 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IPV6);
3266 }
3267
3268 ret = ipa_add_hdr(ipa_hdr);
3269 if (ret) {
3270 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3271 "%s: IPv6 add hdr failed: %d", ifname, ret);
3272 goto clean_ipv4_hdr;
3273 }
3274
3275 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv6 hdr_hdl: 0x%x",
3276 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3277 }
3278
3279 cdf_mem_free(ipa_hdr);
3280
3281 return ret;
3282
3283clean_ipv4_hdr:
3284 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3285 ifname, HDD_IPA_IPV4_NAME_EXT);
3286 hdd_ipa_remove_header(ipa_hdr->hdr[0].name);
3287end:
3288 if (ipa_hdr)
3289 cdf_mem_free(ipa_hdr);
3290
3291 return ret;
3292}
3293
3294/**
3295 * hdd_ipa_clean_hdr() - Cleanup IPA on a given adapter
3296 * @adapter: Adapter upon which IPA was previously configured
3297 *
3298 * Return: None
3299 */
3300static void hdd_ipa_clean_hdr(hdd_adapter_t *adapter)
3301{
3302 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3303 int ret;
3304 char name_ipa[IPA_RESOURCE_NAME_MAX];
3305
3306 /* Remove the headers */
3307 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3308 adapter->dev->name, HDD_IPA_IPV4_NAME_EXT);
3309 hdd_ipa_remove_header(name_ipa);
3310
3311 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3312 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3313 adapter->dev->name, HDD_IPA_IPV6_NAME_EXT);
3314 hdd_ipa_remove_header(name_ipa);
3315 }
3316 /* unregister the interface with IPA */
3317 ret = ipa_deregister_intf(adapter->dev->name);
3318 if (ret)
3319 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3320 "%s: ipa_deregister_intf fail: %d",
3321 adapter->dev->name, ret);
3322}
3323
3324/**
3325 * hdd_ipa_cleanup_iface() - Cleanup IPA on a given interface
3326 * @iface_context: interface-specific IPA context
3327 *
3328 * Return: None
3329 */
3330static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context)
3331{
3332 if (iface_context == NULL)
3333 return;
3334
3335 hdd_ipa_clean_hdr(iface_context->adapter);
3336
3337 cdf_spin_lock_bh(&iface_context->interface_lock);
3338 iface_context->adapter->ipa_context = NULL;
3339 iface_context->adapter = NULL;
3340 iface_context->tl_context = NULL;
3341 cdf_spin_unlock_bh(&iface_context->interface_lock);
3342 iface_context->ifa_address = 0;
3343 if (!iface_context->hdd_ipa->num_iface) {
3344 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3345 "NUM INTF 0, Invalid");
3346 CDF_ASSERT(0);
3347 }
3348 iface_context->hdd_ipa->num_iface--;
3349}
3350
3351/**
3352 * hdd_ipa_setup_iface() - Setup IPA on a given interface
3353 * @hdd_ipa: HDD IPA global context
3354 * @adapter: Interface upon which IPA is being setup
3355 * @sta_id: Station ID of the API instance
3356 *
3357 * Return: 0 on success, negative errno value on error
3358 */
3359static int hdd_ipa_setup_iface(struct hdd_ipa_priv *hdd_ipa,
3360 hdd_adapter_t *adapter, uint8_t sta_id)
3361{
3362 struct hdd_ipa_iface_context *iface_context = NULL;
3363 void *tl_context = NULL;
3364 int i, ret = 0;
3365
3366 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
3367 * channel change indication. Since these indications are sent by lower
3368 * layer as SAP updates and IPA doesn't have to do anything for these
3369 * updates so ignoring!
3370 */
3371 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
3372 return 0;
3373
3374 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3375 if (hdd_ipa->iface_context[i].adapter == NULL) {
3376 iface_context = &(hdd_ipa->iface_context[i]);
3377 break;
3378 }
3379 }
3380
3381 if (iface_context == NULL) {
3382 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3383 "All the IPA interfaces are in use");
3384 ret = -ENOMEM;
3385 goto end;
3386 }
3387
3388 adapter->ipa_context = iface_context;
3389 iface_context->adapter = adapter;
3390 iface_context->sta_id = sta_id;
3391 tl_context = ol_txrx_get_vdev_by_sta_id(sta_id);
3392
3393 if (tl_context == NULL) {
3394 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3395 "Not able to get TL context sta_id: %d", sta_id);
3396 ret = -EINVAL;
3397 goto end;
3398 }
3399
3400 iface_context->tl_context = tl_context;
3401
3402 ret = hdd_ipa_add_header_info(hdd_ipa, iface_context,
3403 adapter->dev->dev_addr);
3404
3405 if (ret)
3406 goto end;
3407
3408 /* Configure the TX and RX pipes filter rules */
3409 ret = hdd_ipa_register_interface(hdd_ipa, iface_context);
3410 if (ret)
3411 goto cleanup_header;
3412
3413 hdd_ipa->num_iface++;
3414 return ret;
3415
3416cleanup_header:
3417
3418 hdd_ipa_clean_hdr(adapter);
3419end:
3420 if (iface_context)
3421 hdd_ipa_cleanup_iface(iface_context);
3422 return ret;
3423}
3424
3425/**
3426 * hdd_ipa_msg_free_fn() - Free an IPA message
3427 * @buff: pointer to the IPA message
3428 * @len: length of the IPA message
3429 * @type: type of IPA message
3430 *
3431 * Return: None
3432 */
3433static void hdd_ipa_msg_free_fn(void *buff, uint32_t len, uint32_t type)
3434{
3435 hddLog(LOG1, "msg type:%d, len:%d", type, len);
3436 ghdd_ipa->stats.num_free_msg++;
3437 cdf_mem_free(buff);
3438}
3439
3440/**
3441 * hdd_ipa_send_mcc_scc_msg() - send IPA WLAN_SWITCH_TO_MCC/SCC message
3442 * @mcc_mode: 0=MCC/1=SCC
3443 *
3444 * Return: 0 on success, negative errno value on error
3445 */
3446int hdd_ipa_send_mcc_scc_msg(hdd_context_t *pHddCtx, bool mcc_mode)
3447{
3448 hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
3449 CDF_STATUS status;
3450 hdd_adapter_t *pAdapter;
3451 struct ipa_msg_meta meta;
3452 struct ipa_wlan_msg *msg;
3453 int ret;
3454
3455 if (!hdd_ipa_uc_sta_is_enabled(pHddCtx))
3456 return -EINVAL;
3457
3458 if (!pHddCtx->mcc_mode) {
3459 /* Flush TxRx queue for each adapter before switch to SCC */
3460 status = hdd_get_front_adapter(pHddCtx, &adapter_node);
3461 while (NULL != adapter_node && CDF_STATUS_SUCCESS == status) {
3462 pAdapter = adapter_node->pAdapter;
3463 if (pAdapter->device_mode == WLAN_HDD_INFRA_STATION ||
3464 pAdapter->device_mode == WLAN_HDD_SOFTAP) {
3465 hddLog(CDF_TRACE_LEVEL_INFO,
3466 "MCC->SCC: Flush TxRx queue(d_mode=%d)",
3467 pAdapter->device_mode);
3468 hdd_deinit_tx_rx(pAdapter);
3469 }
3470 status = hdd_get_next_adapter(
3471 pHddCtx, adapter_node, &next);
3472 adapter_node = next;
3473 }
3474 }
3475
3476 /* Send SCC/MCC Switching event to IPA */
3477 meta.msg_len = sizeof(*msg);
3478 msg = cdf_mem_malloc(meta.msg_len);
3479 if (msg == NULL) {
3480 hddLog(LOGE, "msg allocation failed");
3481 return -ENOMEM;
3482 }
3483
3484 meta.msg_type = mcc_mode ?
3485 WLAN_SWITCH_TO_MCC : WLAN_SWITCH_TO_SCC;
3486 hddLog(LOG1, "ipa_send_msg(Evt:%d)", meta.msg_type);
3487
3488 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3489
3490 if (ret) {
3491 hddLog(LOGE, "ipa_send_msg(Evt:%d) - fail=%d",
3492 meta.msg_type, ret);
3493 cdf_mem_free(msg);
3494 }
3495
3496 return ret;
3497}
3498
3499/**
3500 * hdd_ipa_wlan_event_to_str() - convert IPA WLAN event to string
3501 * @event: IPA WLAN event to be converted to a string
3502 *
3503 * Return: ASCII string representing the IPA WLAN event
3504 */
3505static inline char *hdd_ipa_wlan_event_to_str(enum ipa_wlan_event event)
3506{
3507 switch (event) {
3508 case WLAN_CLIENT_CONNECT:
3509 return "WLAN_CLIENT_CONNECT";
3510 case WLAN_CLIENT_DISCONNECT:
3511 return "WLAN_CLIENT_DISCONNECT";
3512 case WLAN_CLIENT_POWER_SAVE_MODE:
3513 return "WLAN_CLIENT_POWER_SAVE_MODE";
3514 case WLAN_CLIENT_NORMAL_MODE:
3515 return "WLAN_CLIENT_NORMAL_MODE";
3516 case SW_ROUTING_ENABLE:
3517 return "SW_ROUTING_ENABLE";
3518 case SW_ROUTING_DISABLE:
3519 return "SW_ROUTING_DISABLE";
3520 case WLAN_AP_CONNECT:
3521 return "WLAN_AP_CONNECT";
3522 case WLAN_AP_DISCONNECT:
3523 return "WLAN_AP_DISCONNECT";
3524 case WLAN_STA_CONNECT:
3525 return "WLAN_STA_CONNECT";
3526 case WLAN_STA_DISCONNECT:
3527 return "WLAN_STA_DISCONNECT";
3528 case WLAN_CLIENT_CONNECT_EX:
3529 return "WLAN_CLIENT_CONNECT_EX";
3530
3531 case IPA_WLAN_EVENT_MAX:
3532 default:
3533 return "UNKNOWN";
3534 }
3535}
3536
3537/**
3538 * hdd_ipa_wlan_evt() - IPA event handler
3539 * @adapter: adapter upon which the event was received
3540 * @sta_id: station id for the event
3541 * @type: the event type
3542 * @mac_address: MAC address associated with the event
3543 *
3544 * Return: 0 on success, negative errno value on error
3545 */
3546int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
3547 enum ipa_wlan_event type, uint8_t *mac_addr)
3548{
3549 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3550 struct ipa_msg_meta meta;
3551 struct ipa_wlan_msg *msg;
3552 struct ipa_wlan_msg_ex *msg_ex = NULL;
3553 int ret;
3554
3555 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: %s evt, MAC: %pM sta_id: %d",
3556 adapter->dev->name, hdd_ipa_wlan_event_to_str(type),
3557 mac_addr, sta_id);
3558
3559 if (type >= IPA_WLAN_EVENT_MAX)
3560 return -EINVAL;
3561
3562 if (WARN_ON(is_zero_ether_addr(mac_addr)))
3563 return -EINVAL;
3564
3565 if (!hdd_ipa || !hdd_ipa_is_enabled(hdd_ipa->hdd_ctx)) {
3566 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "IPA OFFLOAD NOT ENABLED");
3567 return -EINVAL;
3568 }
3569
3570 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3571 !hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
3572 (WLAN_HDD_SOFTAP != adapter->device_mode)) {
3573 return 0;
3574 }
3575
3576 /*
3577 * During IPA UC resource loading/unloading new events can be issued.
3578 * Store the events separately and handle them later.
3579 */
3580 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3581 ((hdd_ipa->resource_loading) ||
3582 (hdd_ipa->resource_unloading))) {
Yun Parkf19e07d2015-11-20 11:34:27 -08003583 unsigned int pending_event_count;
3584 struct ipa_uc_pending_event *pending_event = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003585
Yun Parkf19e07d2015-11-20 11:34:27 -08003586 hdd_err("IPA resource %s inprogress",
3587 hdd_ipa->resource_loading ? "load":"unload");
3588
3589 cdf_mutex_acquire(&hdd_ipa->event_lock);
3590
3591 cdf_list_size(&hdd_ipa->pending_event, &pending_event_count);
3592 if (pending_event_count >= HDD_IPA_MAX_PENDING_EVENT_COUNT) {
3593 hdd_notice("Reached max pending event count");
3594 cdf_list_remove_front(&hdd_ipa->pending_event,
3595 (cdf_list_node_t **)&pending_event);
3596 } else {
3597 pending_event =
3598 (struct ipa_uc_pending_event *)cdf_mem_malloc(
3599 sizeof(struct ipa_uc_pending_event));
3600 }
3601
3602 if (!pending_event) {
3603 hdd_err("Pending event memory alloc fail");
3604 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003605 return -ENOMEM;
3606 }
Yun Parkf19e07d2015-11-20 11:34:27 -08003607
3608 pending_event->adapter = adapter;
3609 pending_event->sta_id = sta_id;
3610 pending_event->type = type;
3611 cdf_mem_copy(pending_event->mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003612 mac_addr,
3613 CDF_MAC_ADDR_SIZE);
3614 cdf_list_insert_back(&hdd_ipa->pending_event,
Yun Parkf19e07d2015-11-20 11:34:27 -08003615 &pending_event->node);
3616
3617 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003618 return 0;
3619 }
3620
3621 hdd_ipa->stats.event[type]++;
3622
Leo Chang3bc8fed2015-11-13 10:59:47 -08003623 meta.msg_type = type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003624 switch (type) {
3625 case WLAN_STA_CONNECT:
3626 /* STA already connected and without disconnect, connect again
3627 * This is Roaming scenario
3628 */
3629 if (hdd_ipa->sta_connected)
3630 hdd_ipa_cleanup_iface(adapter->ipa_context);
3631
3632 if ((hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) &&
3633 (!hdd_ipa->sta_connected))
3634 hdd_ipa_uc_offload_enable_disable(adapter,
3635 SIR_STA_RX_DATA_OFFLOAD, 1);
3636
3637 cdf_mutex_acquire(&hdd_ipa->event_lock);
3638
3639 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3640 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3641 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3642 msg_ex->name, meta.msg_type);
3643 } else if ((!hdd_ipa->sap_num_connected_sta) &&
3644 (!hdd_ipa->sta_connected)) {
3645 /* Enable IPA UC TX PIPE when STA connected */
3646 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
Yun Park4cab6ee2015-10-27 11:43:40 -07003647 if (ret) {
3648 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003649 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3650 "handle 1st con ret %d", ret);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003651 hdd_ipa_uc_offload_enable_disable(adapter,
3652 SIR_STA_RX_DATA_OFFLOAD, 0);
3653 goto end;
3654 }
3655 }
3656 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3657 if (ret) {
3658 cdf_mutex_release(&hdd_ipa->event_lock);
3659 hdd_ipa_uc_offload_enable_disable(adapter,
3660 SIR_STA_RX_DATA_OFFLOAD, 0);
3661 goto end;
3662
3663#ifdef IPA_UC_OFFLOAD
3664 vdev_to_iface[adapter->sessionId] =
3665 ((struct hdd_ipa_iface_context *)
3666 (adapter->ipa_context))->iface_id;
3667#endif /* IPA_UC_OFFLOAD */
3668 }
3669
3670 cdf_mutex_release(&hdd_ipa->event_lock);
3671
3672 hdd_ipa->sta_connected = 1;
3673 break;
3674
3675 case WLAN_AP_CONNECT:
3676 /* For DFS channel we get two start_bss event (before and after
3677 * CAC). Also when ACS range includes both DFS and non DFS
3678 * channels, we could possibly change channel many times due to
3679 * RADAR detection and chosen channel may not be a DFS channels.
3680 * So dont return error here. Just discard the event.
3681 */
3682 if (adapter->ipa_context)
3683 return 0;
3684
3685 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3686 hdd_ipa_uc_offload_enable_disable(adapter,
3687 SIR_AP_RX_DATA_OFFLOAD, 1);
3688 }
3689 cdf_mutex_acquire(&hdd_ipa->event_lock);
3690 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3691 if (ret) {
3692 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3693 "%s: Evt: %d, Interface setup failed",
3694 msg_ex->name, meta.msg_type);
3695 cdf_mutex_release(&hdd_ipa->event_lock);
3696 goto end;
3697
3698#ifdef IPA_UC_OFFLOAD
3699 vdev_to_iface[adapter->sessionId] =
3700 ((struct hdd_ipa_iface_context *)
3701 (adapter->ipa_context))->iface_id;
3702#endif /* IPA_UC_OFFLOAD */
3703 }
3704 cdf_mutex_release(&hdd_ipa->event_lock);
3705 break;
3706
3707 case WLAN_STA_DISCONNECT:
3708 cdf_mutex_acquire(&hdd_ipa->event_lock);
3709 hdd_ipa_cleanup_iface(adapter->ipa_context);
3710
3711 if (!hdd_ipa->sta_connected) {
3712 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3713 "%s: Evt: %d, STA already disconnected",
3714 msg_ex->name, meta.msg_type);
3715 cdf_mutex_release(&hdd_ipa->event_lock);
3716 return -EINVAL;
3717 }
3718 hdd_ipa->sta_connected = 0;
3719 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3720 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3721 "%s: IPA UC OFFLOAD NOT ENABLED",
3722 msg_ex->name);
3723 } else {
3724 /* Disable IPA UC TX PIPE when STA disconnected */
3725 if ((!hdd_ipa->sap_num_connected_sta) ||
3726 ((!hdd_ipa->num_iface) &&
3727 (HDD_IPA_UC_NUM_WDI_PIPE ==
3728 hdd_ipa->activated_fw_pipe))) {
3729 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3730 }
3731 }
3732
3733 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
3734 hdd_ipa_uc_offload_enable_disable(adapter,
3735 SIR_STA_RX_DATA_OFFLOAD, 0);
3736 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3737 }
3738
3739 cdf_mutex_release(&hdd_ipa->event_lock);
3740 break;
3741
3742 case WLAN_AP_DISCONNECT:
3743 if (!adapter->ipa_context) {
3744 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3745 "%s: Evt: %d, SAP already disconnected",
3746 msg_ex->name, meta.msg_type);
3747 return -EINVAL;
3748 }
3749
3750 cdf_mutex_acquire(&hdd_ipa->event_lock);
3751 hdd_ipa_cleanup_iface(adapter->ipa_context);
3752 if ((!hdd_ipa->num_iface) &&
3753 (HDD_IPA_UC_NUM_WDI_PIPE ==
3754 hdd_ipa->activated_fw_pipe)) {
3755 if (hdd_ipa->hdd_ctx->isUnloadInProgress) {
3756 /*
3757 * We disable WDI pipes directly here since
3758 * IPA_OPCODE_TX/RX_SUSPEND message will not be
3759 * processed when unloading WLAN driver is in
3760 * progress
3761 */
3762 hdd_ipa_uc_disable_pipes(hdd_ipa);
3763 } else {
3764 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3765 "NO INTF left but still pipe clean up");
3766 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3767 }
3768 }
3769
3770 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3771 hdd_ipa_uc_offload_enable_disable(adapter,
3772 SIR_AP_RX_DATA_OFFLOAD, 0);
3773 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3774 }
3775 cdf_mutex_release(&hdd_ipa->event_lock);
3776 break;
3777
3778 case WLAN_CLIENT_CONNECT_EX:
3779 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%d %d",
3780 adapter->dev->ifindex, sta_id);
3781
3782 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3783 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3784 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3785 adapter->dev->name, meta.msg_type);
3786 return 0;
3787 }
3788
3789 cdf_mutex_acquire(&hdd_ipa->event_lock);
3790 if (hdd_ipa_uc_find_add_assoc_sta(hdd_ipa,
3791 true, sta_id)) {
3792 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3793 "%s: STA ID %d found, not valid",
3794 adapter->dev->name, sta_id);
3795 cdf_mutex_release(&hdd_ipa->event_lock);
3796 return 0;
3797 }
Yun Park312f71a2015-12-08 10:22:42 -08003798
3799 /* Enable IPA UC Data PIPEs when first STA connected */
3800 if ((0 == hdd_ipa->sap_num_connected_sta) &&
3801 (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) ||
3802 !hdd_ipa->sta_connected)) {
3803 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
3804 if (ret) {
3805 cdf_mutex_release(&hdd_ipa->event_lock);
3806 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3807 "%s: handle 1st con ret %d",
3808 adapter->dev->name, ret);
3809 return ret;
3810 }
3811 }
3812
3813 hdd_ipa->sap_num_connected_sta++;
3814 hdd_ipa->pending_cons_req = false;
3815
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003816 cdf_mutex_release(&hdd_ipa->event_lock);
3817
3818 meta.msg_type = type;
3819 meta.msg_len = (sizeof(struct ipa_wlan_msg_ex) +
3820 sizeof(struct ipa_wlan_hdr_attrib_val));
3821 msg_ex = cdf_mem_malloc(meta.msg_len);
3822
3823 if (msg_ex == NULL) {
3824 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3825 "msg_ex allocation failed");
3826 return -ENOMEM;
3827 }
3828 strlcpy(msg_ex->name, adapter->dev->name,
3829 IPA_RESOURCE_NAME_MAX);
3830 msg_ex->num_of_attribs = 1;
3831 msg_ex->attribs[0].attrib_type = WLAN_HDR_ATTRIB_MAC_ADDR;
3832 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3833 msg_ex->attribs[0].offset =
3834 HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3835 } else {
3836 msg_ex->attribs[0].offset =
3837 HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3838 }
3839 memcpy(msg_ex->attribs[0].u.mac_addr, mac_addr,
3840 IPA_MAC_ADDR_SIZE);
3841
3842 ret = ipa_send_msg(&meta, msg_ex, hdd_ipa_msg_free_fn);
3843
3844 if (ret) {
3845 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d : %d",
3846 msg_ex->name, meta.msg_type, ret);
3847 cdf_mem_free(msg_ex);
3848 return ret;
3849 }
3850 hdd_ipa->stats.num_send_msg++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003851 return ret;
3852
3853 case WLAN_CLIENT_DISCONNECT:
3854 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3855 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3856 "%s: IPA UC OFFLOAD NOT ENABLED",
3857 msg_ex->name);
3858 return 0;
3859 }
3860
3861 cdf_mutex_acquire(&hdd_ipa->event_lock);
3862 if (!hdd_ipa_uc_find_add_assoc_sta(hdd_ipa, false, sta_id)) {
3863 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3864 "%s: STA ID %d NOT found, not valid",
3865 msg_ex->name, sta_id);
3866 cdf_mutex_release(&hdd_ipa->event_lock);
3867 return 0;
3868 }
3869 hdd_ipa->sap_num_connected_sta--;
3870 /* Disable IPA UC TX PIPE when last STA disconnected */
3871 if (!hdd_ipa->sap_num_connected_sta
3872 && (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) ||
3873 !hdd_ipa->sta_connected)
3874 && (false == hdd_ipa->resource_unloading)
3875 && (HDD_IPA_UC_NUM_WDI_PIPE ==
3876 hdd_ipa->activated_fw_pipe))
3877 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3878 cdf_mutex_release(&hdd_ipa->event_lock);
3879 break;
3880
3881 default:
3882 return 0;
3883 }
3884
3885 meta.msg_len = sizeof(struct ipa_wlan_msg);
3886 msg = cdf_mem_malloc(meta.msg_len);
3887 if (msg == NULL) {
3888 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "msg allocation failed");
3889 return -ENOMEM;
3890 }
3891
3892 meta.msg_type = type;
3893 strlcpy(msg->name, adapter->dev->name, IPA_RESOURCE_NAME_MAX);
3894 memcpy(msg->mac_addr, mac_addr, ETH_ALEN);
3895
3896 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d",
3897 msg->name, meta.msg_type);
3898
3899 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3900
3901 if (ret) {
3902 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d fail:%d",
3903 msg->name, meta.msg_type, ret);
3904 cdf_mem_free(msg);
3905 return ret;
3906 }
3907
3908 hdd_ipa->stats.num_send_msg++;
3909
3910end:
3911 return ret;
3912}
3913
3914/**
3915 * hdd_ipa_rm_state_to_str() - Convert IPA RM state to string
3916 * @state: IPA RM state value
3917 *
3918 * Return: ASCII string representing the IPA RM state
3919 */
3920static inline char *hdd_ipa_rm_state_to_str(enum hdd_ipa_rm_state state)
3921{
3922 switch (state) {
3923 case HDD_IPA_RM_RELEASED:
3924 return "RELEASED";
3925 case HDD_IPA_RM_GRANT_PENDING:
3926 return "GRANT_PENDING";
3927 case HDD_IPA_RM_GRANTED:
3928 return "GRANTED";
3929 }
3930
3931 return "UNKNOWN";
3932}
3933
3934/**
3935 * hdd_ipa_init() - IPA initialization function
3936 * @hdd_ctx: HDD global context
3937 *
3938 * Allocate hdd_ipa resources, ipa pipe resource and register
3939 * wlan interface with IPA module.
3940 *
3941 * Return: CDF_STATUS enumeration
3942 */
3943CDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx)
3944{
3945 struct hdd_ipa_priv *hdd_ipa = NULL;
3946 int ret, i;
3947 struct hdd_ipa_iface_context *iface_context = NULL;
3948
3949 if (!hdd_ipa_is_enabled(hdd_ctx))
3950 return CDF_STATUS_SUCCESS;
3951
3952 hdd_ipa = cdf_mem_malloc(sizeof(*hdd_ipa));
3953 if (!hdd_ipa) {
3954 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL, "hdd_ipa allocation failed");
Leo Chang3bc8fed2015-11-13 10:59:47 -08003955 goto fail_return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003956 }
3957
3958 hdd_ctx->hdd_ipa = hdd_ipa;
3959 ghdd_ipa = hdd_ipa;
3960 hdd_ipa->hdd_ctx = hdd_ctx;
3961 hdd_ipa->num_iface = 0;
Leo Chang3bc8fed2015-11-13 10:59:47 -08003962 ol_txrx_ipa_uc_get_resource(cds_get_context(CDF_MODULE_ID_TXRX),
3963 &hdd_ipa->ce_sr_base_paddr,
3964 &hdd_ipa->ce_sr_ring_size,
3965 &hdd_ipa->ce_reg_paddr,
3966 &hdd_ipa->tx_comp_ring_base_paddr,
3967 &hdd_ipa->tx_comp_ring_size,
3968 &hdd_ipa->tx_num_alloc_buffer,
3969 &hdd_ipa->rx_rdy_ring_base_paddr,
3970 &hdd_ipa->rx_rdy_ring_size,
3971 &hdd_ipa->rx_proc_done_idx_paddr,
3972 &hdd_ipa->rx_proc_done_idx_vaddr,
3973 &hdd_ipa->rx2_rdy_ring_base_paddr,
3974 &hdd_ipa->rx2_rdy_ring_size,
3975 &hdd_ipa->rx2_proc_done_idx_paddr,
3976 &hdd_ipa->rx2_proc_done_idx_vaddr);
3977 if ((0 == hdd_ipa->ce_sr_base_paddr) ||
3978 (0 == hdd_ipa->tx_comp_ring_base_paddr) ||
3979 (0 == hdd_ipa->rx_rdy_ring_base_paddr) ||
3980 (0 == hdd_ipa->rx2_rdy_ring_base_paddr)) {
3981 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
3982 "IPA UC resource alloc fail");
3983 goto fail_get_resource;
3984 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003985
3986 /* Create the interface context */
3987 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3988 iface_context = &hdd_ipa->iface_context[i];
3989 iface_context->hdd_ipa = hdd_ipa;
3990 iface_context->cons_client =
3991 hdd_ipa_adapter_2_client[i].cons_client;
3992 iface_context->prod_client =
3993 hdd_ipa_adapter_2_client[i].prod_client;
3994 iface_context->iface_id = i;
3995 iface_context->adapter = NULL;
3996 cdf_spinlock_init(&iface_context->interface_lock);
3997 }
3998
3999#ifdef CONFIG_CNSS
4000 cnss_init_work(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
4001#else
4002 INIT_WORK(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
4003#endif
4004 cdf_spinlock_init(&hdd_ipa->pm_lock);
4005 cdf_nbuf_queue_init(&hdd_ipa->pm_queue_head);
4006
4007 ret = hdd_ipa_setup_rm(hdd_ipa);
4008 if (ret)
4009 goto fail_setup_rm;
4010
4011 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
4012 hdd_ipa_uc_rt_debug_init(hdd_ctx);
4013 cdf_mem_zero(&hdd_ipa->stats, sizeof(hdd_ipa->stats));
4014 hdd_ipa->sap_num_connected_sta = 0;
4015 hdd_ipa->ipa_tx_packets_diff = 0;
4016 hdd_ipa->ipa_rx_packets_diff = 0;
4017 hdd_ipa->ipa_p_tx_packets = 0;
4018 hdd_ipa->ipa_p_rx_packets = 0;
4019 hdd_ipa->resource_loading = false;
4020 hdd_ipa->resource_unloading = false;
4021 hdd_ipa->sta_connected = 0;
Leo Change3e49442015-10-26 20:07:13 -07004022 hdd_ipa->ipa_pipes_down = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004023 /* Setup IPA sys_pipe for MCC */
4024 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
4025 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
4026 if (ret)
4027 goto fail_create_sys_pipe;
4028 }
4029 hdd_ipa_uc_ol_init(hdd_ctx);
4030 } else {
4031 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
4032 if (ret)
4033 goto fail_create_sys_pipe;
4034 }
4035
4036 return CDF_STATUS_SUCCESS;
4037
4038fail_create_sys_pipe:
4039 hdd_ipa_destroy_rm_resource(hdd_ipa);
4040fail_setup_rm:
Leo Chang3bc8fed2015-11-13 10:59:47 -08004041 cdf_spinlock_destroy(&hdd_ipa->pm_lock);
4042fail_get_resource:
4043 cdf_mem_free(hdd_ipa);
4044 hdd_ctx->hdd_ipa = NULL;
4045 ghdd_ipa = NULL;
4046fail_return:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004047 return CDF_STATUS_E_FAILURE;
4048}
4049
4050/**
Yun Parkf19e07d2015-11-20 11:34:27 -08004051 * hdd_ipa_cleanup_pending_event() - Cleanup IPA pending event list
4052 * @hdd_ipa: pointer to HDD IPA struct
4053 *
4054 * Return: none
4055 */
4056void hdd_ipa_cleanup_pending_event(struct hdd_ipa_priv *hdd_ipa)
4057{
4058 struct ipa_uc_pending_event *pending_event = NULL;
4059
4060 while (cdf_list_remove_front(&hdd_ipa->pending_event,
4061 (cdf_list_node_t **)&pending_event) == CDF_STATUS_SUCCESS) {
4062 cdf_mem_free(pending_event);
4063 }
4064
4065 cdf_list_destroy(&hdd_ipa->pending_event);
4066}
4067
4068/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004069 * hdd_ipa_cleanup - IPA cleanup function
4070 * @hdd_ctx: HDD global context
4071 *
4072 * Return: CDF_STATUS enumeration
4073 */
4074CDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx)
4075{
4076 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
4077 int i;
4078 struct hdd_ipa_iface_context *iface_context = NULL;
4079 cdf_nbuf_t skb;
4080 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
4081
4082 if (!hdd_ipa_is_enabled(hdd_ctx))
4083 return CDF_STATUS_SUCCESS;
4084
4085 if (!hdd_ipa_uc_is_enabled(hdd_ctx)) {
4086 unregister_inetaddr_notifier(&hdd_ipa->ipv4_notifier);
4087 hdd_ipa_teardown_sys_pipe(hdd_ipa);
4088 }
4089
4090 /* Teardown IPA sys_pipe for MCC */
4091 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx))
4092 hdd_ipa_teardown_sys_pipe(hdd_ipa);
4093
4094 hdd_ipa_destroy_rm_resource(hdd_ipa);
4095
4096#ifdef WLAN_OPEN_SOURCE
4097 cancel_work_sync(&hdd_ipa->pm_work);
4098#endif
4099
4100 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
4101
4102 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
4103 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
4104
4105 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
4106 ipa_free_skb(pm_tx_cb->ipa_tx_desc);
4107
4108 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
4109 }
4110 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
4111
4112 cdf_spinlock_destroy(&hdd_ipa->pm_lock);
4113
4114 /* destory the interface lock */
4115 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
4116 iface_context = &hdd_ipa->iface_context[i];
4117 cdf_spinlock_destroy(&iface_context->interface_lock);
4118 }
4119
4120 /* This should never hit but still make sure that there are no pending
4121 * descriptor in IPA hardware
4122 */
4123 if (hdd_ipa->pending_hw_desc_cnt != 0) {
4124 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
4125 "IPA Pending write done: %d Waiting!",
4126 hdd_ipa->pending_hw_desc_cnt);
4127
4128 for (i = 0; hdd_ipa->pending_hw_desc_cnt != 0 && i < 10; i++) {
4129 usleep_range(100, 100);
4130 }
4131
4132 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
4133 "IPA Pending write done: desc: %d %s(%d)!",
4134 hdd_ipa->pending_hw_desc_cnt,
4135 hdd_ipa->pending_hw_desc_cnt == 0 ? "completed"
4136 : "leak", i);
4137 }
4138 if (hdd_ipa_uc_is_enabled(hdd_ctx)) {
4139 hdd_ipa_uc_rt_debug_deinit(hdd_ctx);
4140 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
4141 "%s: Disconnect TX PIPE", __func__);
4142 ipa_disconnect_wdi_pipe(hdd_ipa->tx_pipe_handle);
4143 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
4144 "%s: Disconnect RX PIPE", __func__);
4145 ipa_disconnect_wdi_pipe(hdd_ipa->rx_pipe_handle);
4146 cdf_mutex_destroy(&hdd_ipa->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08004147 cdf_mutex_destroy(&hdd_ipa->ipa_lock);
Yun Parkf19e07d2015-11-20 11:34:27 -08004148 hdd_ipa_cleanup_pending_event(hdd_ipa);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004149
4150#ifdef WLAN_OPEN_SOURCE
4151 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
4152 cancel_work_sync(&hdd_ipa->uc_op_work[i].work);
4153 hdd_ipa->uc_op_work[i].msg = NULL;
4154 }
4155#endif
4156 }
4157
4158 cdf_mem_free(hdd_ipa);
4159 hdd_ctx->hdd_ipa = NULL;
4160
4161 return CDF_STATUS_SUCCESS;
4162}
4163#endif /* IPA_OFFLOAD */