blob: 42883c18264f7aac744fd3497009ecfc998c38dc [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wlan_hdd_ipa.c
30 *
31 * WLAN HDD and ipa interface implementation
32 * Originally written by Qualcomm Atheros, Inc
33 */
34
35#ifdef IPA_OFFLOAD
36
37/* Include Files */
38#include <wlan_hdd_includes.h>
39#include <wlan_hdd_ipa.h>
40
41#include <linux/etherdevice.h>
42#include <linux/atomic.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/debugfs.h>
47#include <linux/inetdevice.h>
48#include <linux/ip.h>
49#include <wlan_hdd_softap_tx_rx.h>
50#include <ol_txrx_osif_api.h>
51
52#include "cds_sched.h"
53
54#include "wma.h"
55#include "wma_api.h"
56
57#define HDD_IPA_DESC_BUFFER_RATIO 4
58#define HDD_IPA_IPV4_NAME_EXT "_ipv4"
59#define HDD_IPA_IPV6_NAME_EXT "_ipv6"
60
61#define HDD_IPA_RX_INACTIVITY_MSEC_DELAY 1000
62#define HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET 12
63#define HDD_IPA_UC_WLAN_8023_HDR_SIZE 14
64/* WDI TX and RX PIPE */
65#define HDD_IPA_UC_NUM_WDI_PIPE 2
66#define HDD_IPA_UC_MAX_PENDING_EVENT 33
67
68#define HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE 32000
69#define HDD_IPA_UC_RT_DEBUG_PERIOD 300
70#define HDD_IPA_UC_RT_DEBUG_BUF_COUNT 30
71#define HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL 10000
72
73#define HDD_IPA_WLAN_HDR_DES_MAC_OFFSET 0
74#define HDD_IPA_MAX_IFACE 3
75#define HDD_IPA_MAX_SYSBAM_PIPE 4
76#define HDD_IPA_RX_PIPE HDD_IPA_MAX_IFACE
77#define HDD_IPA_ENABLE_MASK BIT(0)
78#define HDD_IPA_PRE_FILTER_ENABLE_MASK BIT(1)
79#define HDD_IPA_IPV6_ENABLE_MASK BIT(2)
80#define HDD_IPA_RM_ENABLE_MASK BIT(3)
81#define HDD_IPA_CLK_SCALING_ENABLE_MASK BIT(4)
82#define HDD_IPA_UC_ENABLE_MASK BIT(5)
83#define HDD_IPA_UC_STA_ENABLE_MASK BIT(6)
84#define HDD_IPA_REAL_TIME_DEBUGGING BIT(8)
85
Yun Parkf19e07d2015-11-20 11:34:27 -080086#define HDD_IPA_MAX_PENDING_EVENT_COUNT 20
87
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088typedef enum {
89 HDD_IPA_UC_OPCODE_TX_SUSPEND = 0,
90 HDD_IPA_UC_OPCODE_TX_RESUME = 1,
91 HDD_IPA_UC_OPCODE_RX_SUSPEND = 2,
92 HDD_IPA_UC_OPCODE_RX_RESUME = 3,
93 HDD_IPA_UC_OPCODE_STATS = 4,
94 /* keep this last */
95 HDD_IPA_UC_OPCODE_MAX
96} hdd_ipa_uc_op_code;
97
98/**
99 * enum - Reason codes for stat query
100 *
101 * @HDD_IPA_UC_STAT_REASON_NONE: Initial value
102 * @HDD_IPA_UC_STAT_REASON_DEBUG: For debug/info
103 * @HDD_IPA_UC_STAT_REASON_BW_CAL: For bandwidth calibration
104 */
105enum {
106 HDD_IPA_UC_STAT_REASON_NONE,
107 HDD_IPA_UC_STAT_REASON_DEBUG,
108 HDD_IPA_UC_STAT_REASON_BW_CAL
109};
110
111/**
112 * enum hdd_ipa_rm_state - IPA resource manager state
113 * @HDD_IPA_RM_RELEASED: PROD pipe resource released
114 * @HDD_IPA_RM_GRANT_PENDING: PROD pipe resource requested but not granted yet
115 * @HDD_IPA_RM_GRANTED: PROD pipe resource granted
116 */
117enum hdd_ipa_rm_state {
118 HDD_IPA_RM_RELEASED,
119 HDD_IPA_RM_GRANT_PENDING,
120 HDD_IPA_RM_GRANTED,
121};
122
123struct llc_snap_hdr {
124 uint8_t dsap;
125 uint8_t ssap;
126 uint8_t resv[4];
127 __be16 eth_type;
128} __packed;
129
Leo Chang3bc8fed2015-11-13 10:59:47 -0800130/**
131 * struct hdd_ipa_tx_hdr - header type which IPA should handle to TX packet
132 * @eth: ether II header
133 * @llc_snap: LLC snap header
134 *
135 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136struct hdd_ipa_tx_hdr {
137 struct ethhdr eth;
138 struct llc_snap_hdr llc_snap;
139} __packed;
140
Leo Chang3bc8fed2015-11-13 10:59:47 -0800141/**
142 * struct frag_header - fragment header type registered to IPA hardware
143 * @length: fragment length
144 * @reserved1: Reserved not used
145 * @reserved2: Reserved not used
146 *
147 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800148struct frag_header {
Leo Chang3bc8fed2015-11-13 10:59:47 -0800149 uint16_t length;
150 uint32_t reserved1;
151 uint32_t reserved2;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800152} __packed;
153
Leo Chang3bc8fed2015-11-13 10:59:47 -0800154/**
155 * struct ipa_header - ipa header type registered to IPA hardware
156 * @vdev_id: vdev id
157 * @reserved: Reserved not used
158 *
159 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800160struct ipa_header {
161 uint32_t
162 vdev_id:8, /* vdev_id field is LSB of IPA DESC */
163 reserved:24;
164} __packed;
165
Leo Chang3bc8fed2015-11-13 10:59:47 -0800166/**
167 * struct hdd_ipa_uc_tx_hdr - full tx header registered to IPA hardware
168 * @frag_hd: fragment header
169 * @ipa_hd: ipa header
170 * @eth: ether II header
171 *
172 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173struct hdd_ipa_uc_tx_hdr {
174 struct frag_header frag_hd;
175 struct ipa_header ipa_hd;
176 struct ethhdr eth;
177} __packed;
178
179#define HDD_IPA_WLAN_FRAG_HEADER sizeof(struct frag_header)
180#define HDD_IPA_WLAN_IPA_HEADER sizeof(struct frag_header)
181
182/**
183 * struct hdd_ipa_cld_hdr - IPA CLD Header
184 * @reserved: reserved fields
185 * @iface_id: interface ID
186 * @sta_id: Station ID
187 *
188 * Packed 32-bit structure
189 * +----------+----------+--------------+--------+
190 * | Reserved | QCMAP ID | interface id | STA ID |
191 * +----------+----------+--------------+--------+
192 */
193struct hdd_ipa_cld_hdr {
194 uint8_t reserved[2];
195 uint8_t iface_id;
196 uint8_t sta_id;
197} __packed;
198
199struct hdd_ipa_rx_hdr {
200 struct hdd_ipa_cld_hdr cld_hdr;
201 struct ethhdr eth;
202} __packed;
203
204struct hdd_ipa_pm_tx_cb {
205 struct hdd_ipa_iface_context *iface_context;
206 struct ipa_rx_data *ipa_tx_desc;
207};
208
209struct hdd_ipa_uc_rx_hdr {
210 struct ethhdr eth;
211} __packed;
212
213struct hdd_ipa_sys_pipe {
214 uint32_t conn_hdl;
215 uint8_t conn_hdl_valid;
216 struct ipa_sys_connect_params ipa_sys_params;
217};
218
219struct hdd_ipa_iface_stats {
220 uint64_t num_tx;
221 uint64_t num_tx_drop;
222 uint64_t num_tx_err;
223 uint64_t num_tx_cac_drop;
224 uint64_t num_rx_prefilter;
225 uint64_t num_rx_ipa_excep;
226 uint64_t num_rx_recv;
227 uint64_t num_rx_recv_mul;
228 uint64_t num_rx_send_desc_err;
229 uint64_t max_rx_mul;
230};
231
232struct hdd_ipa_priv;
233
234struct hdd_ipa_iface_context {
235 struct hdd_ipa_priv *hdd_ipa;
236 hdd_adapter_t *adapter;
237 void *tl_context;
238
239 enum ipa_client_type cons_client;
240 enum ipa_client_type prod_client;
241
242 uint8_t iface_id; /* This iface ID */
243 uint8_t sta_id; /* This iface station ID */
244 cdf_spinlock_t interface_lock;
245 uint32_t ifa_address;
246 struct hdd_ipa_iface_stats stats;
247};
248
249struct hdd_ipa_stats {
250 uint32_t event[IPA_WLAN_EVENT_MAX];
251 uint64_t num_send_msg;
252 uint64_t num_free_msg;
253
254 uint64_t num_rm_grant;
255 uint64_t num_rm_release;
256 uint64_t num_rm_grant_imm;
257 uint64_t num_cons_perf_req;
258 uint64_t num_prod_perf_req;
259
260 uint64_t num_rx_drop;
261 uint64_t num_rx_ipa_tx_dp;
262 uint64_t num_rx_ipa_splice;
263 uint64_t num_rx_ipa_loop;
264 uint64_t num_rx_ipa_tx_dp_err;
265 uint64_t num_rx_ipa_write_done;
266 uint64_t num_max_ipa_tx_mul;
267 uint64_t num_rx_ipa_hw_maxed_out;
268 uint64_t max_pend_q_cnt;
269
270 uint64_t num_tx_comp_cnt;
271 uint64_t num_tx_queued;
272 uint64_t num_tx_dequeued;
273 uint64_t num_max_pm_queue;
274
275 uint64_t num_freeq_empty;
276 uint64_t num_pri_freeq_empty;
277 uint64_t num_rx_excep;
278 uint64_t num_tx_bcmc;
279 uint64_t num_tx_bcmc_err;
280};
281
282struct ipa_uc_stas_map {
283 bool is_reserved;
284 uint8_t sta_id;
285};
286struct op_msg_type {
287 uint8_t msg_t;
288 uint8_t rsvd;
289 uint16_t op_code;
290 uint16_t len;
291 uint16_t rsvd_snd;
292};
293
294struct ipa_uc_fw_stats {
295 uint32_t tx_comp_ring_base;
296 uint32_t tx_comp_ring_size;
297 uint32_t tx_comp_ring_dbell_addr;
298 uint32_t tx_comp_ring_dbell_ind_val;
299 uint32_t tx_comp_ring_dbell_cached_val;
300 uint32_t tx_pkts_enqueued;
301 uint32_t tx_pkts_completed;
302 uint32_t tx_is_suspend;
303 uint32_t tx_reserved;
304 uint32_t rx_ind_ring_base;
305 uint32_t rx_ind_ring_size;
306 uint32_t rx_ind_ring_dbell_addr;
307 uint32_t rx_ind_ring_dbell_ind_val;
308 uint32_t rx_ind_ring_dbell_ind_cached_val;
309 uint32_t rx_ind_ring_rdidx_addr;
310 uint32_t rx_ind_ring_rd_idx_cached_val;
311 uint32_t rx_refill_idx;
312 uint32_t rx_num_pkts_indicated;
313 uint32_t rx_buf_refilled;
314 uint32_t rx_num_ind_drop_no_space;
315 uint32_t rx_num_ind_drop_no_buf;
316 uint32_t rx_is_suspend;
317 uint32_t rx_reserved;
318};
319
320struct ipa_uc_pending_event {
321 cdf_list_node_t node;
322 hdd_adapter_t *adapter;
323 enum ipa_wlan_event type;
324 uint8_t sta_id;
325 uint8_t mac_addr[CDF_MAC_ADDR_SIZE];
326};
327
328/**
329 * struct uc_rm_work_struct
330 * @work: uC RM work
331 * @event: IPA RM event
332 */
333struct uc_rm_work_struct {
334 struct work_struct work;
335 enum ipa_rm_event event;
336};
337
338/**
339 * struct uc_op_work_struct
340 * @work: uC OP work
341 * @msg: OP message
342 */
343struct uc_op_work_struct {
344 struct work_struct work;
345 struct op_msg_type *msg;
346};
347static uint8_t vdev_to_iface[CSR_ROAM_SESSION_MAX];
348
349/**
350 * struct uc_rt_debug_info
351 * @time: system time
352 * @ipa_excep_count: IPA exception packet count
353 * @rx_drop_count: IPA Rx drop packet count
354 * @net_sent_count: IPA Rx packet sent to network stack count
355 * @rx_discard_count: IPA Rx discard packet count
356 * @rx_mcbc_count: IPA Rx BCMC packet count
357 * @tx_mcbc_count: IPA Tx BCMC packet countt
358 * @tx_fwd_count: IPA Tx forward packet count
359 * @rx_destructor_call: IPA Rx packet destructor count
360 */
361struct uc_rt_debug_info {
362 v_TIME_t time;
363 uint64_t ipa_excep_count;
364 uint64_t rx_drop_count;
365 uint64_t net_sent_count;
366 uint64_t rx_discard_count;
367 uint64_t rx_mcbc_count;
368 uint64_t tx_mcbc_count;
369 uint64_t tx_fwd_count;
370 uint64_t rx_destructor_call;
371};
372
373struct hdd_ipa_priv {
374 struct hdd_ipa_sys_pipe sys_pipe[HDD_IPA_MAX_SYSBAM_PIPE];
375 struct hdd_ipa_iface_context iface_context[HDD_IPA_MAX_IFACE];
376 uint8_t num_iface;
377 enum hdd_ipa_rm_state rm_state;
378 /*
379 * IPA driver can send RM notifications with IRQ disabled so using cdf
380 * APIs as it is taken care gracefully. Without this, kernel would throw
381 * an warning if spin_lock_bh is used while IRQ is disabled
382 */
383 cdf_spinlock_t rm_lock;
384 struct uc_rm_work_struct uc_rm_work;
385 struct uc_op_work_struct uc_op_work[HDD_IPA_UC_OPCODE_MAX];
386 cdf_wake_lock_t wake_lock;
387 struct delayed_work wake_lock_work;
388 bool wake_lock_released;
389
390 enum ipa_client_type prod_client;
391
392 atomic_t tx_ref_cnt;
393 cdf_nbuf_queue_t pm_queue_head;
394 struct work_struct pm_work;
395 cdf_spinlock_t pm_lock;
396 bool suspended;
397
398 uint32_t pending_hw_desc_cnt;
399 uint32_t hw_desc_cnt;
400 spinlock_t q_lock;
401 uint32_t freeq_cnt;
402 struct list_head free_desc_head;
403
404 uint32_t pend_q_cnt;
405 struct list_head pend_desc_head;
406
407 hdd_context_t *hdd_ctx;
408
409 struct dentry *debugfs_dir;
410 struct hdd_ipa_stats stats;
411
412 struct notifier_block ipv4_notifier;
413 uint32_t curr_prod_bw;
414 uint32_t curr_cons_bw;
415
416 uint8_t activated_fw_pipe;
417 uint8_t sap_num_connected_sta;
418 uint8_t sta_connected;
419 uint32_t tx_pipe_handle;
420 uint32_t rx_pipe_handle;
421 bool resource_loading;
422 bool resource_unloading;
423 bool pending_cons_req;
424 struct ipa_uc_stas_map assoc_stas_map[WLAN_MAX_STA_COUNT];
425 cdf_list_t pending_event;
426 cdf_mutex_t event_lock;
Leo Change3e49442015-10-26 20:07:13 -0700427 bool ipa_pipes_down;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800428 uint32_t ipa_tx_packets_diff;
429 uint32_t ipa_rx_packets_diff;
430 uint32_t ipa_p_tx_packets;
431 uint32_t ipa_p_rx_packets;
432 uint32_t stat_req_reason;
433 uint64_t ipa_tx_forward;
434 uint64_t ipa_rx_discard;
435 uint64_t ipa_rx_net_send_count;
436 uint64_t ipa_rx_internel_drop_count;
437 uint64_t ipa_rx_destructor_count;
438 cdf_mc_timer_t rt_debug_timer;
439 struct uc_rt_debug_info rt_bug_buffer[HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
440 unsigned int rt_buf_fill_index;
441 cdf_mc_timer_t rt_debug_fill_timer;
442 cdf_mutex_t rt_debug_lock;
Yun Parke59b3912015-11-09 13:19:06 -0800443 cdf_mutex_t ipa_lock;
Leo Chang3bc8fed2015-11-13 10:59:47 -0800444
445 /* CE resources */
446 cdf_dma_addr_t ce_sr_base_paddr;
447 uint32_t ce_sr_ring_size;
448 cdf_dma_addr_t ce_reg_paddr;
449
450 /* WLAN TX:IPA->WLAN */
451 cdf_dma_addr_t tx_comp_ring_base_paddr;
452 uint32_t tx_comp_ring_size;
453 uint32_t tx_num_alloc_buffer;
454
455 /* WLAN RX:WLAN->IPA */
456 cdf_dma_addr_t rx_rdy_ring_base_paddr;
457 uint32_t rx_rdy_ring_size;
458 cdf_dma_addr_t rx_proc_done_idx_paddr;
459 void *rx_proc_done_idx_vaddr;
460
461 /* WLAN RX2:WLAN->IPA */
462 cdf_dma_addr_t rx2_rdy_ring_base_paddr;
463 uint32_t rx2_rdy_ring_size;
464 cdf_dma_addr_t rx2_proc_done_idx_paddr;
465 void *rx2_proc_done_idx_vaddr;
466
467 /* IPA UC doorbell registers paddr */
468 cdf_dma_addr_t tx_comp_doorbell_paddr;
469 cdf_dma_addr_t rx_ready_doorbell_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470};
471
472#define HDD_IPA_WLAN_CLD_HDR_LEN sizeof(struct hdd_ipa_cld_hdr)
473#define HDD_IPA_UC_WLAN_CLD_HDR_LEN 0
474#define HDD_IPA_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_tx_hdr)
475#define HDD_IPA_UC_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_uc_tx_hdr)
476#define HDD_IPA_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_rx_hdr)
477#define HDD_IPA_UC_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_uc_rx_hdr)
478
Leo Chang3bc8fed2015-11-13 10:59:47 -0800479#define HDD_IPA_FW_RX_DESC_DISCARD_M 0x1
480#define HDD_IPA_FW_RX_DESC_FORWARD_M 0x2
481
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800482#define HDD_IPA_GET_IFACE_ID(_data) \
483 (((struct hdd_ipa_cld_hdr *) (_data))->iface_id)
484
485#define HDD_IPA_LOG(LVL, fmt, args ...) \
486 CDF_TRACE(CDF_MODULE_ID_HDD, LVL, \
487 "%s:%d: "fmt, __func__, __LINE__, ## args)
488
489#define HDD_IPA_DBG_DUMP(_lvl, _prefix, _buf, _len) \
490 do { \
491 CDF_TRACE(CDF_MODULE_ID_HDD, _lvl, "%s:", _prefix); \
492 CDF_TRACE_HEX_DUMP(CDF_MODULE_ID_HDD, _lvl, _buf, _len); \
493 } while (0)
494
495#define HDD_IPA_IS_CONFIG_ENABLED(_hdd_ctx, _mask) \
496 (((_hdd_ctx)->config->IpaConfig & (_mask)) == (_mask))
497
498#define HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa) \
499 do { \
500 hdd_ipa->ipa_rx_internel_drop_count++; \
501 } while (0)
502#define HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa) \
503 do { \
504 hdd_ipa->ipa_rx_net_send_count++; \
505 } while (0)
506#define HDD_BW_GET_DIFF(_x, _y) (unsigned long)((ULONG_MAX - (_y)) + (_x) + 1)
507
Leo Chang3bc8fed2015-11-13 10:59:47 -0800508/* Temporary macro to make a build without IPA V2 */
509#ifdef IPA_V2
510#define HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt) \
511do { \
512 pipe_in.u.ul.rdy_ring_rp_va = ipa_ctxt->rx_proc_done_idx_vaddr; \
513 pipe_in.u.ul.rdy_comp_ring_base_pa = ipa_ctxt->rx2_rdy_ring_base_paddr;\
514 pipe_in.u.ul.rdy_comp_ring_size = ipa_ctxt->rx2_rdy_ring_size; \
515 pipe_in.u.ul.rdy_comp_ring_wp_pa = ipa_ctxt->rx2_proc_done_idx_paddr; \
516 pipe_in.u.ul.rdy_comp_ring_wp_va = ipa_ctxt->rx2_proc_done_idx_vaddr; \
517} while (0)
518#else
519/* Do nothing */
520#define HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt)
521#endif /* IPA_V2 */
522
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800523static struct hdd_ipa_adapter_2_client {
524 enum ipa_client_type cons_client;
525 enum ipa_client_type prod_client;
526} hdd_ipa_adapter_2_client[HDD_IPA_MAX_IFACE] = {
527 {
528 IPA_CLIENT_WLAN2_CONS, IPA_CLIENT_WLAN1_PROD
529 }, {
530 IPA_CLIENT_WLAN3_CONS, IPA_CLIENT_WLAN1_PROD
531 }, {
532 IPA_CLIENT_WLAN4_CONS, IPA_CLIENT_WLAN1_PROD
533 },
534};
535
536/* For Tx pipes, use Ethernet-II Header format */
537struct hdd_ipa_uc_tx_hdr ipa_uc_tx_hdr = {
538 {
Leo Chang3bc8fed2015-11-13 10:59:47 -0800539 0x0000,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540 0x00000000,
541 0x00000000
542 },
543 {
544 0x00000000
545 },
546 {
547 {0x00, 0x03, 0x7f, 0xaa, 0xbb, 0xcc},
548 {0x00, 0x03, 0x7f, 0xdd, 0xee, 0xff},
549 0x0008
550 }
551};
552
553/* For Tx pipes, use 802.3 Header format */
554static struct hdd_ipa_tx_hdr ipa_tx_hdr = {
555 {
556 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
557 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
558 0x00 /* length can be zero */
559 },
560 {
561 /* LLC SNAP header 8 bytes */
562 0xaa, 0xaa,
563 {0x03, 0x00, 0x00, 0x00},
564 0x0008 /* type value(2 bytes) ,filled by wlan */
565 /* 0x0800 - IPV4, 0x86dd - IPV6 */
566 }
567};
568
569static const char *op_string[] = {
570 "TX_SUSPEND",
571 "TX_RESUME",
572 "RX_SUSPEND",
573 "RX_RESUME",
574 "STATS",
575};
576
577static struct hdd_ipa_priv *ghdd_ipa;
578
579/* Local Function Prototypes */
580static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
581 unsigned long data);
582static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
583 unsigned long data);
584
585static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context);
586
587/**
588 * hdd_ipa_is_enabled() - Is IPA enabled?
589 * @hdd_ctx: Global HDD context
590 *
591 * Return: true if IPA is enabled, false otherwise
592 */
593bool hdd_ipa_is_enabled(hdd_context_t *hdd_ctx)
594{
595 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_ENABLE_MASK);
596}
597
598/**
599 * hdd_ipa_uc_is_enabled() - Is IPA uC offload enabled?
600 * @hdd_ctx: Global HDD context
601 *
602 * Return: true if IPA uC offload is enabled, false otherwise
603 */
604bool hdd_ipa_uc_is_enabled(hdd_context_t *hdd_ctx)
605{
606 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_ENABLE_MASK);
607}
608
609/**
610 * hdd_ipa_uc_sta_is_enabled() - Is STA mode IPA uC offload enabled?
611 * @hdd_ctx: Global HDD context
612 *
613 * Return: true if STA mode IPA uC offload is enabled, false otherwise
614 */
615static inline bool hdd_ipa_uc_sta_is_enabled(hdd_context_t *hdd_ctx)
616{
617 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_STA_ENABLE_MASK);
618}
619
620/**
621 * hdd_ipa_is_pre_filter_enabled() - Is IPA pre-filter enabled?
622 * @hdd_ipa: Global HDD IPA context
623 *
624 * Return: true if pre-filter is enabled, otherwise false
625 */
626static inline bool hdd_ipa_is_pre_filter_enabled(hdd_context_t *hdd_ctx)
627{
628 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
629 HDD_IPA_PRE_FILTER_ENABLE_MASK);
630}
631
632/**
633 * hdd_ipa_is_ipv6_enabled() - Is IPA IPv6 enabled?
634 * @hdd_ipa: Global HDD IPA context
635 *
636 * Return: true if IPv6 is enabled, otherwise false
637 */
638static inline bool hdd_ipa_is_ipv6_enabled(hdd_context_t *hdd_ctx)
639{
640 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_IPV6_ENABLE_MASK);
641}
642
643/**
644 * hdd_ipa_is_rm_enabled() - Is IPA resource manager enabled?
645 * @hdd_ipa: Global HDD IPA context
646 *
647 * Return: true if resource manager is enabled, otherwise false
648 */
649static inline bool hdd_ipa_is_rm_enabled(hdd_context_t *hdd_ctx)
650{
651 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_RM_ENABLE_MASK);
652}
653
654/**
655 * hdd_ipa_is_rt_debugging_enabled() - Is IPA real-time debug enabled?
656 * @hdd_ipa: Global HDD IPA context
657 *
658 * Return: true if resource manager is enabled, otherwise false
659 */
660static inline bool hdd_ipa_is_rt_debugging_enabled(hdd_context_t *hdd_ctx)
661{
662 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_REAL_TIME_DEBUGGING);
663}
664
665/**
666 * hdd_ipa_is_clk_scaling_enabled() - Is IPA clock scaling enabled?
667 * @hdd_ipa: Global HDD IPA context
668 *
669 * Return: true if clock scaling is enabled, otherwise false
670 */
671static inline bool hdd_ipa_is_clk_scaling_enabled(hdd_context_t *hdd_ctx)
672{
673 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
674 HDD_IPA_CLK_SCALING_ENABLE_MASK |
675 HDD_IPA_RM_ENABLE_MASK);
676}
677
678/**
679 * hdd_ipa_uc_rt_debug_host_fill - fill rt debug buffer
680 * @ctext: pointer to hdd context.
681 *
682 * If rt debug enabled, periodically called, and fill debug buffer
683 *
684 * Return: none
685 */
686static void hdd_ipa_uc_rt_debug_host_fill(void *ctext)
687{
688 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
689 struct hdd_ipa_priv *hdd_ipa;
690 struct uc_rt_debug_info *dump_info = NULL;
691
692 if (wlan_hdd_validate_context(hdd_ctx))
693 return;
694
695 if (!hdd_ctx->hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
696 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
697 "%s: IPA UC is not enabled", __func__);
698 return;
699 }
700
701 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
702
703 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
704 dump_info = &hdd_ipa->rt_bug_buffer[
705 hdd_ipa->rt_buf_fill_index % HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
706
707 dump_info->time = cdf_mc_timer_get_system_time();
708 dump_info->ipa_excep_count = hdd_ipa->stats.num_rx_excep;
709 dump_info->rx_drop_count = hdd_ipa->ipa_rx_internel_drop_count;
710 dump_info->net_sent_count = hdd_ipa->ipa_rx_net_send_count;
711 dump_info->rx_discard_count = hdd_ipa->ipa_rx_discard;
712 dump_info->tx_mcbc_count = hdd_ipa->stats.num_tx_bcmc;
713 dump_info->tx_fwd_count = hdd_ipa->ipa_tx_forward;
714 dump_info->rx_destructor_call = hdd_ipa->ipa_rx_destructor_count;
715 hdd_ipa->rt_buf_fill_index++;
716 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
717
718 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
719 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
720}
721
722/**
723 * hdd_ipa_uc_rt_debug_host_dump - dump rt debug buffer
724 * @hdd_ctx: pointer to hdd context.
725 *
726 * If rt debug enabled, dump debug buffer contents based on requirement
727 *
728 * Return: none
729 */
730void hdd_ipa_uc_rt_debug_host_dump(hdd_context_t *hdd_ctx)
731{
732 struct hdd_ipa_priv *hdd_ipa;
733 unsigned int dump_count;
734 unsigned int dump_index;
735 struct uc_rt_debug_info *dump_info = NULL;
736
737 if (wlan_hdd_validate_context(hdd_ctx))
738 return;
739
740 hdd_ipa = hdd_ctx->hdd_ipa;
741 if (!hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
742 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
743 "%s: IPA UC is not enabled", __func__);
744 return;
745 }
746
747 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
748 "========= WLAN-IPA DEBUG BUF DUMP ==========\n");
749 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
750 " TM : EXEP : DROP : NETS : MCBC : TXFD : DSTR : DSCD\n");
751
752 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
753 for (dump_count = 0;
754 dump_count < HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
755 dump_count++) {
756 dump_index = (hdd_ipa->rt_buf_fill_index + dump_count) %
757 HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
758 dump_info = &hdd_ipa->rt_bug_buffer[dump_index];
759 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
760 "%12lu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu\n",
761 dump_info->time, dump_info->ipa_excep_count,
762 dump_info->rx_drop_count, dump_info->net_sent_count,
763 dump_info->tx_mcbc_count, dump_info->tx_fwd_count,
764 dump_info->rx_destructor_call,
765 dump_info->rx_discard_count);
766 }
767 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
768 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
769 "======= WLAN-IPA DEBUG BUF DUMP END ========\n");
770}
771
772/**
773 * hdd_ipa_uc_rt_debug_handler - periodic memory health monitor handler
774 * @ctext: pointer to hdd context.
775 *
776 * periodically called by timer expire
777 * will try to alloc dummy memory and detect out of memory condition
778 * if out of memory detected, dump wlan-ipa stats
779 *
780 * Return: none
781 */
782static void hdd_ipa_uc_rt_debug_handler(void *ctext)
783{
784 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
785 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
786 void *dummy_ptr = NULL;
787
788 if (wlan_hdd_validate_context(hdd_ctx))
789 return;
790
791 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
792 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
793 "%s: IPA RT debug is not enabled", __func__);
794 return;
795 }
796
797 /* Allocate dummy buffer periodically and free immediately. this will
798 * proactively detect OOM and if allocation fails dump ipa stats
799 */
800 dummy_ptr = kmalloc(HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE,
801 GFP_KERNEL | GFP_ATOMIC);
802 if (!dummy_ptr) {
803 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
804 "%s: Dummy alloc fail", __func__);
805 hdd_ipa_uc_rt_debug_host_dump(hdd_ctx);
806 hdd_ipa_uc_stat_request(
807 hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP), 1);
808 } else {
809 kfree(dummy_ptr);
810 }
811
812 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
813 HDD_IPA_UC_RT_DEBUG_PERIOD);
814}
815
816/**
817 * hdd_ipa_uc_rt_debug_destructor - called by data packet free
818 * @skb: packet pinter
819 *
820 * when free data packet, will be invoked by wlan client and will increase
821 * free counter
822 *
823 * Return: none
824 */
825void hdd_ipa_uc_rt_debug_destructor(struct sk_buff *skb)
826{
827 if (!ghdd_ipa) {
828 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
829 "%s: invalid hdd context", __func__);
830 return;
831 }
832
833 ghdd_ipa->ipa_rx_destructor_count++;
834}
835
836/**
837 * hdd_ipa_uc_rt_debug_deinit - remove resources to handle rt debugging
838 * @hdd_ctx: hdd main context
839 *
840 * free all rt debugging resources
841 *
842 * Return: none
843 */
844static void hdd_ipa_uc_rt_debug_deinit(hdd_context_t *hdd_ctx)
845{
846 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
847
848 if (CDF_TIMER_STATE_STOPPED !=
849 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_fill_timer)) {
850 cdf_mc_timer_stop(&hdd_ipa->rt_debug_fill_timer);
851 }
852 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_fill_timer);
853 cdf_mutex_destroy(&hdd_ipa->rt_debug_lock);
854
855 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
856 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
857 "%s: IPA RT debug is not enabled", __func__);
858 return;
859 }
860
861 if (CDF_TIMER_STATE_STOPPED !=
862 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_timer)) {
863 cdf_mc_timer_stop(&hdd_ipa->rt_debug_timer);
864 }
865 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_timer);
866}
867
868/**
869 * hdd_ipa_uc_rt_debug_init - intialize resources to handle rt debugging
870 * @hdd_ctx: hdd main context
871 *
872 * alloc and initialize all rt debugging resources
873 *
874 * Return: none
875 */
876static void hdd_ipa_uc_rt_debug_init(hdd_context_t *hdd_ctx)
877{
878 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
879
880 cdf_mutex_init(&hdd_ipa->rt_debug_lock);
881 cdf_mc_timer_init(&hdd_ipa->rt_debug_fill_timer, CDF_TIMER_TYPE_SW,
882 hdd_ipa_uc_rt_debug_host_fill, (void *)hdd_ctx);
883 hdd_ipa->rt_buf_fill_index = 0;
884 cdf_mem_zero(hdd_ipa->rt_bug_buffer,
885 sizeof(struct uc_rt_debug_info) *
886 HDD_IPA_UC_RT_DEBUG_BUF_COUNT);
887 hdd_ipa->ipa_tx_forward = 0;
888 hdd_ipa->ipa_rx_discard = 0;
889 hdd_ipa->ipa_rx_net_send_count = 0;
890 hdd_ipa->ipa_rx_internel_drop_count = 0;
891 hdd_ipa->ipa_rx_destructor_count = 0;
892
893 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
894 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
895
896 /* Reatime debug enable on feature enable */
897 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
898 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
899 "%s: IPA RT debug is not enabled", __func__);
900 return;
901 }
902 cdf_mc_timer_init(&hdd_ipa->rt_debug_timer, CDF_TIMER_TYPE_SW,
903 hdd_ipa_uc_rt_debug_handler, (void *)hdd_ctx);
904 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
905 HDD_IPA_UC_RT_DEBUG_PERIOD);
906
907}
908
909/**
910 * hdd_ipa_uc_stat_query() - Query the IPA stats
911 * @hdd_ctx: Global HDD context
912 * @ipa_tx_diff: tx packet count diff from previous
913 * tx packet count
914 * @ipa_rx_diff: rx packet count diff from previous
915 * rx packet count
916 *
917 * Return: true if IPA is enabled, false otherwise
918 */
919void hdd_ipa_uc_stat_query(hdd_context_t *pHddCtx,
920 uint32_t *ipa_tx_diff, uint32_t *ipa_rx_diff)
921{
922 struct hdd_ipa_priv *hdd_ipa;
923
924 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
925 *ipa_tx_diff = 0;
926 *ipa_rx_diff = 0;
927
928 if (!hdd_ipa_is_enabled(pHddCtx) ||
929 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
930 return;
931 }
932
Yun Parke59b3912015-11-09 13:19:06 -0800933 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800934 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
935 (false == hdd_ipa->resource_loading)) {
936 *ipa_tx_diff = hdd_ipa->ipa_tx_packets_diff;
937 *ipa_rx_diff = hdd_ipa->ipa_rx_packets_diff;
938 HDD_IPA_LOG(LOG1, "STAT Query TX DIFF %d, RX DIFF %d",
939 *ipa_tx_diff, *ipa_rx_diff);
940 }
Yun Parke59b3912015-11-09 13:19:06 -0800941 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800942 return;
943}
944
945/**
946 * hdd_ipa_uc_stat_request() - Get IPA stats from IPA.
947 * @adapter: network adapter
948 * @reason: STAT REQ Reason
949 *
950 * Return: None
951 */
952void hdd_ipa_uc_stat_request(hdd_adapter_t *adapter, uint8_t reason)
953{
954 hdd_context_t *pHddCtx;
955 struct hdd_ipa_priv *hdd_ipa;
956
957 if (!adapter) {
958 return;
959 }
960
961 pHddCtx = (hdd_context_t *)adapter->pHddCtx;
962 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
963 if (!hdd_ipa_is_enabled(pHddCtx) ||
964 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
965 return;
966 }
967
968 HDD_IPA_LOG(LOG1, "STAT REQ Reason %d", reason);
Yun Parke59b3912015-11-09 13:19:06 -0800969 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800970 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
971 (false == hdd_ipa->resource_loading)) {
972 hdd_ipa->stat_req_reason = reason;
973 wma_cli_set_command(
974 (int)adapter->sessionId,
975 (int)WMA_VDEV_TXRX_GET_IPA_UC_FW_STATS_CMDID,
976 0, VDEV_CMD);
977 }
Yun Parke59b3912015-11-09 13:19:06 -0800978 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800979}
980
981/**
982 * hdd_ipa_uc_find_add_assoc_sta() - Find associated station
983 * @hdd_ipa: Global HDD IPA context
984 * @sta_add: Should station be added
985 * @sta_id: ID of the station being queried
986 *
987 * Return: true if the station was found
988 */
989static bool hdd_ipa_uc_find_add_assoc_sta(struct hdd_ipa_priv *hdd_ipa,
990 bool sta_add, uint8_t sta_id)
991{
992 bool sta_found = false;
993 uint8_t idx;
994 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
995 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
996 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
997 sta_found = true;
998 break;
999 }
1000 }
1001 if (sta_add && sta_found) {
1002 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1003 "%s: STA ID %d already exist, cannot add",
1004 __func__, sta_id);
1005 return sta_found;
1006 }
1007 if (sta_add) {
1008 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1009 if (!hdd_ipa->assoc_stas_map[idx].is_reserved) {
1010 hdd_ipa->assoc_stas_map[idx].is_reserved = true;
1011 hdd_ipa->assoc_stas_map[idx].sta_id = sta_id;
1012 return sta_found;
1013 }
1014 }
1015 }
1016 if (!sta_add && !sta_found) {
1017 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1018 "%s: STA ID %d does not exist, cannot delete",
1019 __func__, sta_id);
1020 return sta_found;
1021 }
1022 if (!sta_add) {
1023 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1024 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
1025 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
1026 hdd_ipa->assoc_stas_map[idx].is_reserved =
1027 false;
1028 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
1029 return sta_found;
1030 }
1031 }
1032 }
1033 return sta_found;
1034}
1035
1036/**
1037 * hdd_ipa_uc_enable_pipes() - Enable IPA uC pipes
1038 * @hdd_ipa: Global HDD IPA context
1039 *
1040 * Return: 0 on success, negative errno if error
1041 */
1042static int hdd_ipa_uc_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
1043{
1044 int result;
1045 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
1046
1047 /* ACTIVATE TX PIPE */
Yun Park4cab6ee2015-10-27 11:43:40 -07001048 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1049 "%s: Enable TX PIPE(tx_pipe_handle=%d)",
1050 __func__, hdd_ipa->tx_pipe_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001051 result = ipa_enable_wdi_pipe(hdd_ipa->tx_pipe_handle);
1052 if (result) {
1053 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1054 "%s: Enable TX PIPE fail, code %d",
1055 __func__, result);
1056 return result;
1057 }
1058 result = ipa_resume_wdi_pipe(hdd_ipa->tx_pipe_handle);
1059 if (result) {
1060 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1061 "%s: Resume TX PIPE fail, code %d",
1062 __func__, result);
1063 return result;
1064 }
1065 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, true);
1066
1067 /* ACTIVATE RX PIPE */
Yun Park4cab6ee2015-10-27 11:43:40 -07001068 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1069 "%s: Enable RX PIPE(rx_pipe_handle=%d)",
1070 __func__, hdd_ipa->rx_pipe_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001071 result = ipa_enable_wdi_pipe(hdd_ipa->rx_pipe_handle);
1072 if (result) {
1073 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1074 "%s: Enable RX PIPE fail, code %d",
1075 __func__, result);
1076 return result;
1077 }
1078 result = ipa_resume_wdi_pipe(hdd_ipa->rx_pipe_handle);
1079 if (result) {
1080 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1081 "%s: Resume RX PIPE fail, code %d",
1082 __func__, result);
1083 return result;
1084 }
1085 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, false);
Leo Change3e49442015-10-26 20:07:13 -07001086 hdd_ipa->ipa_pipes_down = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001087 return 0;
1088}
1089
1090/**
1091 * hdd_ipa_uc_disable_pipes() - Disable IPA uC pipes
1092 * @hdd_ipa: Global HDD IPA context
1093 *
1094 * Return: 0 on success, negative errno if error
1095 */
1096static int hdd_ipa_uc_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
1097{
1098 int result;
1099
Leo Change3e49442015-10-26 20:07:13 -07001100 hdd_ipa->ipa_pipes_down = true;
1101
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001102 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable RX PIPE", __func__);
1103 result = ipa_suspend_wdi_pipe(hdd_ipa->rx_pipe_handle);
1104 if (result) {
1105 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1106 "%s: Suspend RX PIPE fail, code %d",
1107 __func__, result);
1108 return result;
1109 }
1110 result = ipa_disable_wdi_pipe(hdd_ipa->rx_pipe_handle);
1111 if (result) {
1112 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1113 "%s: Disable RX PIPE fail, code %d",
1114 __func__, result);
1115 return result;
1116 }
1117
1118 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable TX PIPE", __func__);
1119 result = ipa_suspend_wdi_pipe(hdd_ipa->tx_pipe_handle);
1120 if (result) {
1121 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1122 "%s: Suspend TX PIPE fail, code %d",
1123 __func__, result);
1124 return result;
1125 }
1126 result = ipa_disable_wdi_pipe(hdd_ipa->tx_pipe_handle);
1127 if (result) {
1128 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1129 "%s: Disable TX PIPE fail, code %d",
1130 __func__, result);
1131 return result;
1132 }
1133
1134 return 0;
1135}
1136
1137/**
1138 * hdd_ipa_uc_handle_first_con() - Handle first uC IPA connection
1139 * @hdd_ipa: Global HDD IPA context
1140 *
1141 * Return: 0 on success, negative errno if error
1142 */
1143static int hdd_ipa_uc_handle_first_con(struct hdd_ipa_priv *hdd_ipa)
1144{
1145 hdd_ipa->activated_fw_pipe = 0;
1146 hdd_ipa->resource_loading = true;
Yun Park4cab6ee2015-10-27 11:43:40 -07001147
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001148 /* If RM feature enabled
1149 * Request PROD Resource first
1150 * PROD resource may return sync or async manners */
Yun Park4cab6ee2015-10-27 11:43:40 -07001151 if (hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx)) {
1152 if (!ipa_rm_request_resource(IPA_RM_RESOURCE_WLAN_PROD)) {
1153 /* RM PROD request sync return
1154 * enable pipe immediately
1155 */
1156 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1157 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1158 "%s: IPA WDI Pipe activation failed",
1159 __func__);
1160 hdd_ipa->resource_loading = false;
1161 return -EBUSY;
1162 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001163 }
1164 } else {
1165 /* RM Disabled
Yun Park4cab6ee2015-10-27 11:43:40 -07001166 * Just enabled all the PIPEs
1167 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001168 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1169 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
Yun Park4cab6ee2015-10-27 11:43:40 -07001170 "%s: IPA WDI Pipe activation failed",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001171 __func__);
1172 hdd_ipa->resource_loading = false;
1173 return -EBUSY;
1174 }
1175 hdd_ipa->resource_loading = false;
1176 }
Yun Park4cab6ee2015-10-27 11:43:40 -07001177
1178 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1179 "%s: IPA WDI Pipes activated successfully", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001180 return 0;
1181}
1182
1183/**
1184 * hdd_ipa_uc_handle_last_discon() - Handle last uC IPA disconnection
1185 * @hdd_ipa: Global HDD IPA context
1186 *
1187 * Return: None
1188 */
1189static void hdd_ipa_uc_handle_last_discon(struct hdd_ipa_priv *hdd_ipa)
1190{
1191 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
1192
1193 hdd_ipa->resource_unloading = true;
1194 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW RX PIPE", __func__);
1195 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, false);
1196 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW TX PIPE", __func__);
1197 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, true);
1198}
1199
1200/**
1201 * hdd_ipa_uc_rm_notify_handler() - IPA uC resource notification handler
1202 * @context: User context registered with TL (the IPA Global context is
1203 * registered
1204 * @rxpkt: Packet containing the notification
1205 * @staid: ID of the station associated with the packet
1206 *
1207 * Return: None
1208 */
1209static void
1210hdd_ipa_uc_rm_notify_handler(void *context, enum ipa_rm_event event)
1211{
1212 struct hdd_ipa_priv *hdd_ipa = context;
1213 CDF_STATUS status = CDF_STATUS_SUCCESS;
1214
1215 /*
1216 * When SSR is going on or driver is unloading, just return.
1217 */
1218 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
1219 if (0 != status) {
1220 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1221 return;
1222 }
1223
1224 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
1225 return;
1226
1227 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s, event code %d",
1228 __func__, event);
1229
1230 switch (event) {
1231 case IPA_RM_RESOURCE_GRANTED:
1232 /* Differed RM Granted */
1233 hdd_ipa_uc_enable_pipes(hdd_ipa);
Yun Parke59b3912015-11-09 13:19:06 -08001234 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001235 if ((false == hdd_ipa->resource_unloading) &&
1236 (!hdd_ipa->activated_fw_pipe)) {
1237 hdd_ipa_uc_enable_pipes(hdd_ipa);
1238 }
Yun Parke59b3912015-11-09 13:19:06 -08001239 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001240 break;
1241
1242 case IPA_RM_RESOURCE_RELEASED:
1243 /* Differed RM Released */
1244 hdd_ipa->resource_unloading = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001245 break;
1246
1247 default:
1248 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1249 "%s, invalid event code %d", __func__, event);
1250 break;
1251 }
1252}
1253
1254/**
1255 * hdd_ipa_uc_rm_notify_defer() - Defer IPA uC notification
1256 * @hdd_ipa: Global HDD IPA context
1257 * @event: IPA resource manager event to be deferred
1258 *
1259 * This function is called when a resource manager event is received
1260 * from firmware in interrupt context. This function will defer the
1261 * handling to the OL RX thread
1262 *
1263 * Return: None
1264 */
1265static void hdd_ipa_uc_rm_notify_defer(struct work_struct *work)
1266{
1267 enum ipa_rm_event event;
1268 struct uc_rm_work_struct *uc_rm_work = container_of(work,
1269 struct uc_rm_work_struct, work);
1270 struct hdd_ipa_priv *hdd_ipa = container_of(uc_rm_work,
1271 struct hdd_ipa_priv, uc_rm_work);
1272
1273 cds_ssr_protect(__func__);
1274 event = uc_rm_work->event;
1275 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1276 "%s, posted event %d", __func__, event);
1277
1278 hdd_ipa_uc_rm_notify_handler(hdd_ipa, event);
1279 cds_ssr_unprotect(__func__);
1280
1281 return;
1282}
1283
1284/**
1285 * hdd_ipa_uc_proc_pending_event() - Process IPA uC pending events
1286 * @hdd_ipa: Global HDD IPA context
1287 *
1288 * Return: None
1289 */
1290static void hdd_ipa_uc_proc_pending_event(struct hdd_ipa_priv *hdd_ipa)
1291{
1292 unsigned int pending_event_count;
1293 struct ipa_uc_pending_event *pending_event = NULL;
1294
1295 cdf_list_size(&hdd_ipa->pending_event, &pending_event_count);
1296 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1297 "%s, Pending Event Count %d", __func__, pending_event_count);
1298 if (!pending_event_count) {
1299 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1300 "%s, No Pending Event", __func__);
1301 return;
1302 }
1303
1304 cdf_list_remove_front(&hdd_ipa->pending_event,
1305 (cdf_list_node_t **)&pending_event);
1306 while (pending_event != NULL) {
1307 hdd_ipa_wlan_evt(pending_event->adapter,
1308 pending_event->type,
1309 pending_event->sta_id,
1310 pending_event->mac_addr);
1311 cdf_mem_free(pending_event);
1312 pending_event = NULL;
1313 cdf_list_remove_front(&hdd_ipa->pending_event,
1314 (cdf_list_node_t **)&pending_event);
1315 }
1316}
1317
1318/**
1319 * hdd_ipa_uc_op_cb() - IPA uC operation callback
1320 * @op_msg: operation message received from firmware
1321 * @usr_ctxt: user context registered with TL (we register the HDD Global
1322 * context)
1323 *
1324 * Return: None
1325 */
1326static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
1327{
1328 struct op_msg_type *msg = op_msg;
1329 struct ipa_uc_fw_stats *uc_fw_stat;
1330 struct IpaHwStatsWDIInfoData_t ipa_stat;
1331 struct hdd_ipa_priv *hdd_ipa;
1332 hdd_context_t *hdd_ctx;
1333 CDF_STATUS status = CDF_STATUS_SUCCESS;
1334
1335 if (!op_msg || !usr_ctxt) {
1336 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s, INVALID ARG", __func__);
1337 return;
1338 }
1339
1340 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1341 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1342 "%s, INVALID OPCODE %d", __func__, msg->op_code);
1343 return;
1344 }
1345
1346 hdd_ctx = (hdd_context_t *) usr_ctxt;
1347
1348 /*
1349 * When SSR is going on or driver is unloading, just return.
1350 */
1351 status = wlan_hdd_validate_context(hdd_ctx);
1352 if (0 != status) {
1353 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1354 cdf_mem_free(op_msg);
1355 return;
1356 }
1357
1358 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1359
1360 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
1361 "%s, OPCODE %s", __func__, op_string[msg->op_code]);
1362
1363 if ((HDD_IPA_UC_OPCODE_TX_RESUME == msg->op_code) ||
1364 (HDD_IPA_UC_OPCODE_RX_RESUME == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001365 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001366 hdd_ipa->activated_fw_pipe++;
1367 if (HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) {
1368 hdd_ipa->resource_loading = false;
1369 hdd_ipa_uc_proc_pending_event(hdd_ipa);
Yun Parkccc6d7a2015-12-02 14:50:13 -08001370 if (hdd_ipa->pending_cons_req)
1371 ipa_rm_notify_completion(
1372 IPA_RM_RESOURCE_GRANTED,
1373 IPA_RM_RESOURCE_WLAN_CONS);
Yun Park5b635012015-12-02 15:05:01 -08001374 hdd_ipa->pending_cons_req = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001375 }
Yun Parke59b3912015-11-09 13:19:06 -08001376 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001377 }
1378
1379 if ((HDD_IPA_UC_OPCODE_TX_SUSPEND == msg->op_code) ||
1380 (HDD_IPA_UC_OPCODE_RX_SUSPEND == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001381 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382 hdd_ipa->activated_fw_pipe--;
1383 if (!hdd_ipa->activated_fw_pipe) {
1384 hdd_ipa_uc_disable_pipes(hdd_ipa);
Yun Park5b635012015-12-02 15:05:01 -08001385 if (hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
1386 ipa_rm_release_resource(
1387 IPA_RM_RESOURCE_WLAN_PROD);
1388 /* Sync return success from IPA
1389 * Enable/resume all the PIPEs */
1390 hdd_ipa->resource_unloading = false;
1391 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1392 hdd_ipa->pending_cons_req = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001393 }
Yun Parke59b3912015-11-09 13:19:06 -08001394 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001395 }
1396
1397 if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1398 (HDD_IPA_UC_STAT_REASON_DEBUG == hdd_ipa->stat_req_reason)) {
1399
1400 /* STATs from host */
1401 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1402 "==== IPA_UC WLAN_HOST CE ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001403 "CE RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001404 "CE RING SIZE: %d\n"
1405 "CE REG ADDR : 0x%llx",
Leo Chang3bc8fed2015-11-13 10:59:47 -08001406 hdd_ipa->ce_sr_base_paddr,
1407 hdd_ipa->ce_sr_ring_size,
1408 hdd_ipa->ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001409 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1410 "==== IPA_UC WLAN_HOST TX ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001411 "COMP RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001412 "COMP RING SIZE: %d\n"
1413 "NUM ALLOC BUF: %d\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001414 "COMP RING DBELL : 0x%llx",
1415 hdd_ipa->tx_comp_ring_base_paddr,
1416 hdd_ipa->tx_comp_ring_size,
1417 hdd_ipa->tx_num_alloc_buffer,
1418 hdd_ipa->tx_comp_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001419 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1420 "==== IPA_UC WLAN_HOST RX ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001421 "IND RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001422 "IND RING SIZE: %d\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001423 "IND RING DBELL : 0x%llx\n"
1424 "PROC DONE IND ADDR : 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001425 "NUM EXCP PKT : %llu\n"
1426 "NUM TX BCMC : %llu\n"
1427 "NUM TX BCMC ERR : %llu",
Leo Chang3bc8fed2015-11-13 10:59:47 -08001428 hdd_ipa->rx_rdy_ring_base_paddr,
1429 hdd_ipa->rx_rdy_ring_size,
1430 hdd_ipa->rx_ready_doorbell_paddr,
1431 hdd_ipa->rx_proc_done_idx_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001432 hdd_ipa->stats.num_rx_excep,
1433 hdd_ipa->stats.num_tx_bcmc,
1434 hdd_ipa->stats.num_tx_bcmc_err);
1435 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1436 "==== IPA_UC WLAN_HOST CONTROL ====\n"
1437 "SAP NUM STAs: %d\n"
1438 "STA CONNECTED: %d\n"
1439 "TX PIPE HDL: %d\n"
1440 "RX PIPE HDL : %d\n"
1441 "RSC LOADING : %d\n"
1442 "RSC UNLOADING : %d\n"
1443 "PNDNG CNS RQT : %d",
1444 hdd_ipa->sap_num_connected_sta,
1445 hdd_ipa->sta_connected,
1446 hdd_ipa->tx_pipe_handle,
1447 hdd_ipa->rx_pipe_handle,
1448 (unsigned int)hdd_ipa->resource_loading,
1449 (unsigned int)hdd_ipa->resource_unloading,
1450 (unsigned int)hdd_ipa->pending_cons_req);
1451
1452 /* STATs from FW */
1453 uc_fw_stat = (struct ipa_uc_fw_stats *)
1454 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
1455 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1456 "==== IPA_UC WLAN_FW TX ====\n"
1457 "COMP RING BASE: 0x%x\n"
1458 "COMP RING SIZE: %d\n"
1459 "COMP RING DBELL : 0x%x\n"
1460 "COMP RING DBELL IND VAL : %d\n"
1461 "COMP RING DBELL CACHED VAL : %d\n"
1462 "COMP RING DBELL CACHED VAL : %d\n"
1463 "PKTS ENQ : %d\n"
1464 "PKTS COMP : %d\n"
1465 "IS SUSPEND : %d\n"
1466 "RSVD : 0x%x",
1467 uc_fw_stat->tx_comp_ring_base,
1468 uc_fw_stat->tx_comp_ring_size,
1469 uc_fw_stat->tx_comp_ring_dbell_addr,
1470 uc_fw_stat->tx_comp_ring_dbell_ind_val,
1471 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1472 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1473 uc_fw_stat->tx_pkts_enqueued,
1474 uc_fw_stat->tx_pkts_completed,
1475 uc_fw_stat->tx_is_suspend, uc_fw_stat->tx_reserved);
1476 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1477 "==== IPA_UC WLAN_FW RX ====\n"
1478 "IND RING BASE: 0x%x\n"
1479 "IND RING SIZE: %d\n"
1480 "IND RING DBELL : 0x%x\n"
1481 "IND RING DBELL IND VAL : %d\n"
1482 "IND RING DBELL CACHED VAL : %d\n"
1483 "RDY IND ADDR : 0x%x\n"
1484 "RDY IND CACHE VAL : %d\n"
1485 "RFIL IND : %d\n"
1486 "NUM PKT INDICAT : %d\n"
1487 "BUF REFIL : %d\n"
1488 "NUM DROP NO SPC : %d\n"
1489 "NUM DROP NO BUF : %d\n"
1490 "IS SUSPND : %d\n"
1491 "RSVD : 0x%x\n",
1492 uc_fw_stat->rx_ind_ring_base,
1493 uc_fw_stat->rx_ind_ring_size,
1494 uc_fw_stat->rx_ind_ring_dbell_addr,
1495 uc_fw_stat->rx_ind_ring_dbell_ind_val,
1496 uc_fw_stat->rx_ind_ring_dbell_ind_cached_val,
1497 uc_fw_stat->rx_ind_ring_rdidx_addr,
1498 uc_fw_stat->rx_ind_ring_rd_idx_cached_val,
1499 uc_fw_stat->rx_refill_idx,
1500 uc_fw_stat->rx_num_pkts_indicated,
1501 uc_fw_stat->rx_buf_refilled,
1502 uc_fw_stat->rx_num_ind_drop_no_space,
1503 uc_fw_stat->rx_num_ind_drop_no_buf,
1504 uc_fw_stat->rx_is_suspend, uc_fw_stat->rx_reserved);
1505 /* STATs from IPA */
1506 ipa_get_wdi_stats(&ipa_stat);
1507 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1508 "==== IPA_UC IPA TX ====\n"
1509 "NUM PROCD : %d\n"
1510 "CE DBELL : 0x%x\n"
1511 "NUM DBELL FIRED : %d\n"
1512 "COMP RNG FULL : %d\n"
1513 "COMP RNG EMPT : %d\n"
1514 "COMP RNG USE HGH : %d\n"
1515 "COMP RNG USE LOW : %d\n"
1516 "BAM FIFO FULL : %d\n"
1517 "BAM FIFO EMPT : %d\n"
1518 "BAM FIFO USE HGH : %d\n"
1519 "BAM FIFO USE LOW : %d\n"
1520 "NUM DBELL : %d\n"
1521 "NUM UNEXP DBELL : %d\n"
1522 "NUM BAM INT HDL : 0x%x\n"
1523 "NUM BAM INT NON-RUN : 0x%x\n"
1524 "NUM QMB INT HDL : 0x%x",
1525 ipa_stat.tx_ch_stats.num_pkts_processed,
1526 ipa_stat.tx_ch_stats.copy_engine_doorbell_value,
1527 ipa_stat.tx_ch_stats.num_db_fired,
1528 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringFull,
1529 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
1530 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
1531 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
1532 ipa_stat.tx_ch_stats.bam_stats.bamFifoFull,
1533 ipa_stat.tx_ch_stats.bam_stats.bamFifoEmpty,
1534 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageHigh,
1535 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageLow,
1536 ipa_stat.tx_ch_stats.num_db,
1537 ipa_stat.tx_ch_stats.num_unexpected_db,
1538 ipa_stat.tx_ch_stats.num_bam_int_handled,
1539 ipa_stat.tx_ch_stats.
1540 num_bam_int_in_non_runnning_state,
1541 ipa_stat.tx_ch_stats.num_qmb_int_handled);
1542
1543 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1544 "==== IPA_UC IPA RX ====\n"
1545 "MAX OST PKT : %d\n"
1546 "NUM PKT PRCSD : %d\n"
1547 "RNG RP : 0x%x\n"
1548 "COMP RNG FULL : %d\n"
1549 "COMP RNG EMPT : %d\n"
1550 "COMP RNG USE HGH : %d\n"
1551 "COMP RNG USE LOW : %d\n"
1552 "BAM FIFO FULL : %d\n"
1553 "BAM FIFO EMPT : %d\n"
1554 "BAM FIFO USE HGH : %d\n"
1555 "BAM FIFO USE LOW : %d\n"
1556 "NUM DB : %d\n"
1557 "NUM UNEXP DB : %d\n"
1558 "NUM BAM INT HNDL : 0x%x\n",
1559 ipa_stat.rx_ch_stats.max_outstanding_pkts,
1560 ipa_stat.rx_ch_stats.num_pkts_processed,
1561 ipa_stat.rx_ch_stats.rx_ring_rp_value,
1562 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringFull,
1563 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
1564 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
1565 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
1566 ipa_stat.rx_ch_stats.bam_stats.bamFifoFull,
1567 ipa_stat.rx_ch_stats.bam_stats.bamFifoEmpty,
1568 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageHigh,
1569 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageLow,
1570 ipa_stat.rx_ch_stats.num_db,
1571 ipa_stat.rx_ch_stats.num_unexpected_db,
1572 ipa_stat.rx_ch_stats.num_bam_int_handled);
1573 } else if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1574 (HDD_IPA_UC_STAT_REASON_BW_CAL == hdd_ipa->stat_req_reason)) {
1575 /* STATs from FW */
1576 uc_fw_stat = (struct ipa_uc_fw_stats *)
1577 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
Yun Parke59b3912015-11-09 13:19:06 -08001578 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001579 hdd_ipa->ipa_tx_packets_diff = HDD_BW_GET_DIFF(
1580 uc_fw_stat->tx_pkts_completed,
1581 hdd_ipa->ipa_p_tx_packets);
1582 hdd_ipa->ipa_rx_packets_diff = HDD_BW_GET_DIFF(
1583 (uc_fw_stat->rx_num_ind_drop_no_space +
1584 uc_fw_stat->rx_num_ind_drop_no_buf +
1585 uc_fw_stat->rx_num_pkts_indicated),
1586 hdd_ipa->ipa_p_rx_packets);
1587
1588 hdd_ipa->ipa_p_tx_packets = uc_fw_stat->tx_pkts_completed;
1589 hdd_ipa->ipa_p_rx_packets =
1590 (uc_fw_stat->rx_num_ind_drop_no_space +
1591 uc_fw_stat->rx_num_ind_drop_no_buf +
1592 uc_fw_stat->rx_num_pkts_indicated);
Yun Parke59b3912015-11-09 13:19:06 -08001593 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001594 } else {
1595 HDD_IPA_LOG(LOGE, "INVALID REASON %d",
1596 hdd_ipa->stat_req_reason);
1597 }
1598 cdf_mem_free(op_msg);
1599}
1600
1601
1602/**
1603 * hdd_ipa_uc_offload_enable_disable() - wdi enable/disable notify to fw
1604 * @adapter: device adapter instance
1605 * @offload_type: MCC or SCC
1606 * @enable: TX offload enable or disable
1607 *
1608 * Return: none
1609 */
1610static void hdd_ipa_uc_offload_enable_disable(hdd_adapter_t *adapter,
1611 uint32_t offload_type, uint32_t enable)
1612{
1613 struct sir_ipa_offload_enable_disable ipa_offload_enable_disable;
1614
1615 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1616 * channel change indication. Since these indications are sent by lower
1617 * layer as SAP updates and IPA doesn't have to do anything for these
1618 * updates so ignoring!
1619 */
1620 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
1621 return;
1622
1623 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1624 * channel change indication. Since these indications are sent by lower
1625 * layer as SAP updates and IPA doesn't have to do anything for these
1626 * updates so ignoring!
1627 */
1628 if (adapter->ipa_context)
1629 return;
1630
1631 cdf_mem_zero(&ipa_offload_enable_disable,
1632 sizeof(ipa_offload_enable_disable));
1633 ipa_offload_enable_disable.offload_type = offload_type;
1634 ipa_offload_enable_disable.vdev_id = adapter->sessionId;
1635 ipa_offload_enable_disable.enable = enable;
1636
1637 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1638 "%s: offload_type=%d, vdev_id=%d, enable=%d", __func__,
1639 ipa_offload_enable_disable.offload_type,
1640 ipa_offload_enable_disable.vdev_id,
1641 ipa_offload_enable_disable.enable);
1642
1643 if (CDF_STATUS_SUCCESS !=
1644 sme_ipa_offload_enable_disable(WLAN_HDD_GET_HAL_CTX(adapter),
1645 adapter->sessionId, &ipa_offload_enable_disable)) {
1646 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1647 "%s: Failure to enable IPA offload \
1648 (offload_type=%d, vdev_id=%d, enable=%d)", __func__,
1649 ipa_offload_enable_disable.offload_type,
1650 ipa_offload_enable_disable.vdev_id,
1651 ipa_offload_enable_disable.enable);
1652 }
1653}
1654
1655/**
1656 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1657 * @work: uC OP work
1658 *
1659 * Return: None
1660 */
1661static void hdd_ipa_uc_fw_op_event_handler(struct work_struct *work)
1662{
1663 struct op_msg_type *msg;
1664 struct uc_op_work_struct *uc_op_work = container_of(work,
1665 struct uc_op_work_struct, work);
1666 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1667
1668 cds_ssr_protect(__func__);
1669
1670 msg = uc_op_work->msg;
1671 uc_op_work->msg = NULL;
1672 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1673 "%s, posted msg %d", __func__, msg->op_code);
1674
1675 hdd_ipa_uc_op_cb(msg, hdd_ipa->hdd_ctx);
1676
1677 cds_ssr_unprotect(__func__);
1678
1679 return;
1680}
1681
1682/**
1683 * hdd_ipa_uc_op_event_handler() - Adapter lookup
1684 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1685 * @op_msg: operation message received from firmware
1686 * @hdd_ctx: Global HDD context
1687 *
1688 * Return: None
1689 */
1690static void hdd_ipa_uc_op_event_handler(uint8_t *op_msg, void *hdd_ctx)
1691{
1692 struct hdd_ipa_priv *hdd_ipa;
1693 struct op_msg_type *msg;
1694 struct uc_op_work_struct *uc_op_work;
1695 CDF_STATUS status = CDF_STATUS_SUCCESS;
1696
1697 status = wlan_hdd_validate_context(hdd_ctx);
1698 if (0 != status) {
1699 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1700 goto end;
1701 }
1702
1703 msg = (struct op_msg_type *)op_msg;
1704 hdd_ipa = ((hdd_context_t *)hdd_ctx)->hdd_ipa;
1705
1706 if (unlikely(!hdd_ipa))
1707 goto end;
1708
1709 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1710 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s: Invalid OP Code (%d)",
1711 __func__, msg->op_code);
1712 goto end;
1713 }
1714
1715 uc_op_work = &hdd_ipa->uc_op_work[msg->op_code];
1716 if (uc_op_work->msg)
1717 /* When the same uC OPCODE is already pended, just return */
1718 goto end;
1719
1720 uc_op_work->msg = msg;
1721 schedule_work(&uc_op_work->work);
1722 return;
1723
1724end:
1725 cdf_mem_free(op_msg);
1726}
1727
1728/**
1729 * hdd_ipa_uc_ol_init() - Initialize IPA uC offload
1730 * @hdd_ctx: Global HDD context
1731 *
1732 * Return: CDF_STATUS
1733 */
1734static CDF_STATUS hdd_ipa_uc_ol_init(hdd_context_t *hdd_ctx)
1735{
1736 struct ipa_wdi_in_params pipe_in;
1737 struct ipa_wdi_out_params pipe_out;
1738 struct hdd_ipa_priv *ipa_ctxt = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1739 p_cds_contextType cds_ctx = hdd_ctx->pcds_context;
1740 uint8_t i;
1741
1742 cdf_mem_zero(&pipe_in, sizeof(struct ipa_wdi_in_params));
1743 cdf_mem_zero(&pipe_out, sizeof(struct ipa_wdi_out_params));
1744
1745 cdf_list_init(&ipa_ctxt->pending_event, 1000);
1746 cdf_mutex_init(&ipa_ctxt->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08001747 cdf_mutex_init(&ipa_ctxt->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001748
1749 /* TX PIPE */
1750 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1751 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
1752 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1753 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
1754 pipe_in.sys.ipa_ep_cfg.hdr.hdr_additional_const_len =
1755 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
1756 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1757 pipe_in.sys.client = IPA_CLIENT_WLAN1_CONS;
1758 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize;
1759 pipe_in.sys.priv = hdd_ctx->hdd_ipa;
1760 pipe_in.sys.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
1761 pipe_in.sys.notify = hdd_ipa_i2w_cb;
1762 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1763 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1764 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1765 pipe_in.sys.keep_ipa_awake = true;
1766 }
1767
Leo Chang3bc8fed2015-11-13 10:59:47 -08001768 pipe_in.u.dl.comp_ring_base_pa = ipa_ctxt->tx_comp_ring_base_paddr;
1769 pipe_in.u.dl.comp_ring_size =
1770 ipa_ctxt->tx_comp_ring_size * sizeof(cdf_dma_addr_t);
1771 pipe_in.u.dl.ce_ring_base_pa = ipa_ctxt->ce_sr_base_paddr;
1772 pipe_in.u.dl.ce_door_bell_pa = ipa_ctxt->ce_reg_paddr;
1773 pipe_in.u.dl.ce_ring_size = ipa_ctxt->ce_sr_ring_size;
1774 pipe_in.u.dl.num_tx_buffers = ipa_ctxt->tx_num_alloc_buffer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001775
1776 /* Connect WDI IPA PIPE */
1777 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
1778 /* Micro Controller Doorbell register */
Leo Chang3bc8fed2015-11-13 10:59:47 -08001779 ipa_ctxt->tx_comp_doorbell_paddr = pipe_out.uc_door_bell_pa;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001780 /* WLAN TX PIPE Handle */
1781 ipa_ctxt->tx_pipe_handle = pipe_out.clnt_hdl;
1782 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1783 "TX : CRBPA 0x%x, CRS %d, CERBPA 0x%x, CEDPA 0x%x,"
1784 " CERZ %d, NB %d, CDBPAD 0x%x",
1785 (unsigned int)pipe_in.u.dl.comp_ring_base_pa,
1786 pipe_in.u.dl.comp_ring_size,
1787 (unsigned int)pipe_in.u.dl.ce_ring_base_pa,
1788 (unsigned int)pipe_in.u.dl.ce_door_bell_pa,
1789 pipe_in.u.dl.ce_ring_size,
1790 pipe_in.u.dl.num_tx_buffers,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001791 (unsigned int)ipa_ctxt->tx_comp_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001792
1793 /* RX PIPE */
1794 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1795 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_RX_HDR_LEN;
1796 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
1797 pipe_in.sys.ipa_ep_cfg.hdr.hdr_metadata_reg_valid = 1;
1798 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1799 pipe_in.sys.client = IPA_CLIENT_WLAN1_PROD;
1800 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize +
1801 sizeof(struct sps_iovec);
1802 pipe_in.sys.notify = hdd_ipa_w2i_cb;
1803 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1804 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1805 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1806 pipe_in.sys.keep_ipa_awake = true;
1807 }
1808
Leo Chang3bc8fed2015-11-13 10:59:47 -08001809 pipe_in.u.ul.rdy_ring_base_pa = ipa_ctxt->rx_rdy_ring_base_paddr;
1810 pipe_in.u.ul.rdy_ring_size = ipa_ctxt->rx_rdy_ring_size;
1811 pipe_in.u.ul.rdy_ring_rp_pa = ipa_ctxt->rx_proc_done_idx_paddr;
1812 HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001813 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
Leo Chang3bc8fed2015-11-13 10:59:47 -08001814 ipa_ctxt->rx_ready_doorbell_paddr = pipe_out.uc_door_bell_pa;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001815 ipa_ctxt->rx_pipe_handle = pipe_out.clnt_hdl;
1816 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1817 "RX : RRBPA 0x%x, RRS %d, PDIPA 0x%x, RDY_DB_PAD 0x%x",
1818 (unsigned int)pipe_in.u.ul.rdy_ring_base_pa,
1819 pipe_in.u.ul.rdy_ring_size,
1820 (unsigned int)pipe_in.u.ul.rdy_ring_rp_pa,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001821 (unsigned int)ipa_ctxt->rx_ready_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001822
1823 ol_txrx_ipa_uc_set_doorbell_paddr(cds_ctx->pdev_txrx_ctx,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001824 ipa_ctxt->tx_comp_doorbell_paddr,
1825 ipa_ctxt->rx_ready_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001826
1827 ol_txrx_ipa_uc_register_op_cb(cds_ctx->pdev_txrx_ctx,
1828 hdd_ipa_uc_op_event_handler, (void *)hdd_ctx);
1829
1830 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
1831 cnss_init_work(&ipa_ctxt->uc_op_work[i].work,
1832 hdd_ipa_uc_fw_op_event_handler);
1833 ipa_ctxt->uc_op_work[i].msg = NULL;
1834 }
1835
1836 return CDF_STATUS_SUCCESS;
1837}
1838
Leo Change3e49442015-10-26 20:07:13 -07001839/**
1840 * hdd_ipa_uc_force_pipe_shutdown() - Force shutdown IPA pipe
1841 * @hdd_ctx: hdd main context
1842 *
1843 * Force shutdown IPA pipe
1844 * Independent of FW pipe status, IPA pipe shutdonw progress
1845 * in case, any STA does not leave properly, IPA HW pipe should cleaned up
1846 * independent from FW pipe status
1847 *
1848 * Return: NONE
1849 */
1850void hdd_ipa_uc_force_pipe_shutdown(hdd_context_t *hdd_ctx)
1851{
1852 struct hdd_ipa_priv *hdd_ipa;
1853
1854 if (!hdd_ipa_is_enabled(hdd_ctx) || !hdd_ctx->hdd_ipa)
1855 return;
1856
1857 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1858 if (false == hdd_ipa->ipa_pipes_down) {
1859 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1860 "IPA pipes are not down yet, force shutdown");
1861 hdd_ipa_uc_disable_pipes(hdd_ipa);
1862 } else {
1863 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1864 "IPA pipes are down, do nothing");
1865 }
1866
1867 return;
1868}
1869
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001870/**
1871 * hdd_ipa_uc_ssr_deinit() - handle ipa deinit for SSR
1872 *
1873 * Deinit basic IPA UC host side to be in sync reloaded FW during
1874 * SSR
1875 *
1876 * Return: 0 - Success
1877 */
1878int hdd_ipa_uc_ssr_deinit(void)
1879{
1880 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1881 int idx;
1882 struct hdd_ipa_iface_context *iface_context;
1883
Leo Chang3bc8fed2015-11-13 10:59:47 -08001884 if ((!hdd_ipa) || (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001885 return 0;
1886
1887 /* Clean up HDD IPA interfaces */
1888 for (idx = 0; (hdd_ipa->num_iface > 0) &&
1889 (idx < HDD_IPA_MAX_IFACE); idx++) {
1890 iface_context = &hdd_ipa->iface_context[idx];
1891 if (iface_context && iface_context->adapter)
1892 hdd_ipa_cleanup_iface(iface_context);
1893 }
1894
1895 /* After SSR, wlan driver reloads FW again. But we need to protect
1896 * IPA submodule during SSR transient state. So deinit basic IPA
1897 * UC host side to be in sync with reloaded FW during SSR
1898 */
Yun Parkf7dc8cd2015-11-17 15:25:12 -08001899 if (!hdd_ipa->ipa_pipes_down)
1900 hdd_ipa_uc_disable_pipes(hdd_ipa);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001901
Leo Chang3bc8fed2015-11-13 10:59:47 -08001902 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001903 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1904 hdd_ipa->assoc_stas_map[idx].is_reserved = false;
1905 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
1906 }
Leo Chang3bc8fed2015-11-13 10:59:47 -08001907 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001908
1909 /* Full IPA driver cleanup not required since wlan driver is now
1910 * unloaded and reloaded after SSR.
1911 */
1912 return 0;
1913}
1914
1915/**
1916 * hdd_ipa_uc_ssr_reinit() - handle ipa reinit after SSR
1917 *
1918 * Init basic IPA UC host side to be in sync with reloaded FW after
1919 * SSR to resume IPA UC operations
1920 *
1921 * Return: 0 - Success
1922 */
1923int hdd_ipa_uc_ssr_reinit(void)
1924{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001925
1926 /* After SSR is complete, IPA UC can resume operation. But now wlan
1927 * driver will be unloaded and reloaded, which takes care of IPA cleanup
1928 * and initialization. This is a placeholder func if IPA has to resume
1929 * operations without driver reload.
1930 */
1931 return 0;
1932}
Leo Chang3bc8fed2015-11-13 10:59:47 -08001933
1934/**
1935 * hdd_ipa_tx_packet_ipa() - send packet to IPA
1936 * @hdd_ctx: Global HDD context
1937 * @skb: skb sent to IPA
1938 * @session_id: send packet instance session id
1939 *
1940 * Send TX packet which generated by system to IPA.
1941 * This routine only will be used for function verification
1942 *
1943 * Return: NULL packet sent to IPA properly
1944 * NULL invalid packet drop
1945 * skb packet not sent to IPA. legacy data path should handle
1946 */
1947struct sk_buff *hdd_ipa_tx_packet_ipa(hdd_context_t *hdd_ctx,
1948 struct sk_buff *skb, uint8_t session_id)
Leo Change3e49442015-10-26 20:07:13 -07001949{
Leo Chang3bc8fed2015-11-13 10:59:47 -08001950 struct ipa_header *ipa_header;
1951 struct frag_header *frag_header;
1952
1953 if (!hdd_ipa_uc_is_enabled(hdd_ctx))
1954 return skb;
1955
1956 ipa_header = (struct ipa_header *) skb_push(skb,
1957 sizeof(struct ipa_header));
1958 if (!ipa_header) {
1959 /* No headroom, legacy */
1960 return skb;
1961 }
1962 memset(ipa_header, 0, sizeof(*ipa_header));
1963 ipa_header->vdev_id = 0;
1964
1965 frag_header = (struct frag_header *) skb_push(skb,
1966 sizeof(struct frag_header));
1967 if (!frag_header) {
1968 /* No headroom, drop */
1969 kfree_skb(skb);
1970 return NULL;
1971 }
1972 memset(frag_header, 0, sizeof(*frag_header));
1973 frag_header->length = skb->len - sizeof(struct frag_header)
1974 - sizeof(struct ipa_header);
1975
1976 ipa_tx_dp(IPA_CLIENT_WLAN1_CONS, skb, NULL);
1977 return NULL;
Leo Change3e49442015-10-26 20:07:13 -07001978}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001979
1980/**
1981 * hdd_ipa_wake_lock_timer_func() - Wake lock work handler
1982 * @work: scheduled work
1983 *
1984 * When IPA resources are released in hdd_ipa_rm_try_release() we do
1985 * not want to immediately release the wake lock since the system
1986 * would then potentially try to suspend when there is a healthy data
1987 * rate. Deferred work is scheduled and this function handles the
1988 * work. When this function is called, if the IPA resource is still
1989 * released then we release the wake lock.
1990 *
1991 * Return: None
1992 */
1993static void hdd_ipa_wake_lock_timer_func(struct work_struct *work)
1994{
1995 struct hdd_ipa_priv *hdd_ipa = container_of(to_delayed_work(work),
1996 struct hdd_ipa_priv,
1997 wake_lock_work);
1998
1999 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2000
2001 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED)
2002 goto end;
2003
2004 hdd_ipa->wake_lock_released = true;
2005 cdf_wake_lock_release(&hdd_ipa->wake_lock,
2006 WIFI_POWER_EVENT_WAKELOCK_IPA);
2007
2008end:
2009 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2010}
2011
2012/**
2013 * hdd_ipa_rm_request() - Request resource from IPA
2014 * @hdd_ipa: Global HDD IPA context
2015 *
2016 * Return: 0 on success, negative errno on error
2017 */
2018static int hdd_ipa_rm_request(struct hdd_ipa_priv *hdd_ipa)
2019{
2020 int ret = 0;
2021
2022 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2023 return 0;
2024
2025 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2026
2027 switch (hdd_ipa->rm_state) {
2028 case HDD_IPA_RM_GRANTED:
2029 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2030 return 0;
2031 case HDD_IPA_RM_GRANT_PENDING:
2032 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2033 return -EINPROGRESS;
2034 case HDD_IPA_RM_RELEASED:
2035 hdd_ipa->rm_state = HDD_IPA_RM_GRANT_PENDING;
2036 break;
2037 }
2038
2039 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2040
2041 ret = ipa_rm_inactivity_timer_request_resource(
2042 IPA_RM_RESOURCE_WLAN_PROD);
2043
2044 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2045 if (ret == 0) {
2046 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2047 hdd_ipa->stats.num_rm_grant_imm++;
2048 }
2049
2050 cancel_delayed_work(&hdd_ipa->wake_lock_work);
2051 if (hdd_ipa->wake_lock_released) {
2052 cdf_wake_lock_acquire(&hdd_ipa->wake_lock,
2053 WIFI_POWER_EVENT_WAKELOCK_IPA);
2054 hdd_ipa->wake_lock_released = false;
2055 }
2056 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2057
2058 return ret;
2059}
2060
2061/**
2062 * hdd_ipa_rm_try_release() - Attempt to release IPA resource
2063 * @hdd_ipa: Global HDD IPA context
2064 *
2065 * Return: 0 if resources released, negative errno otherwise
2066 */
2067static int hdd_ipa_rm_try_release(struct hdd_ipa_priv *hdd_ipa)
2068{
2069 int ret = 0;
2070
2071 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2072 return 0;
2073
2074 if (atomic_read(&hdd_ipa->tx_ref_cnt))
2075 return -EAGAIN;
2076
2077 spin_lock_bh(&hdd_ipa->q_lock);
2078 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
2079 (hdd_ipa->pending_hw_desc_cnt || hdd_ipa->pend_q_cnt)) {
2080 spin_unlock_bh(&hdd_ipa->q_lock);
2081 return -EAGAIN;
2082 }
2083 spin_unlock_bh(&hdd_ipa->q_lock);
2084
2085 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2086
2087 if (!cdf_nbuf_is_queue_empty(&hdd_ipa->pm_queue_head)) {
2088 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2089 return -EAGAIN;
2090 }
2091 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2092
2093 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2094 switch (hdd_ipa->rm_state) {
2095 case HDD_IPA_RM_GRANTED:
2096 break;
2097 case HDD_IPA_RM_GRANT_PENDING:
2098 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2099 return -EINPROGRESS;
2100 case HDD_IPA_RM_RELEASED:
2101 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2102 return 0;
2103 }
2104
2105 /* IPA driver returns immediately so set the state here to avoid any
2106 * race condition.
2107 */
2108 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2109 hdd_ipa->stats.num_rm_release++;
2110 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2111
2112 ret =
2113 ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_WLAN_PROD);
2114
2115 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2116 if (unlikely(ret != 0)) {
2117 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2118 WARN_ON(1);
2119 }
2120
2121 /*
2122 * If wake_lock is released immediately, kernel would try to suspend
2123 * immediately as well, Just avoid ping-pong between suspend-resume
2124 * while there is healthy amount of data transfer going on by
2125 * releasing the wake_lock after some delay.
2126 */
2127 schedule_delayed_work(&hdd_ipa->wake_lock_work,
2128 msecs_to_jiffies
2129 (HDD_IPA_RX_INACTIVITY_MSEC_DELAY));
2130
2131 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2132
2133 return ret;
2134}
2135
2136/**
2137 * hdd_ipa_rm_notify() - IPA resource manager notifier callback
2138 * @user_data: user data registered with IPA
2139 * @event: the IPA resource manager event that occurred
2140 * @data: the data associated with the event
2141 *
2142 * Return: None
2143 */
2144static void hdd_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
2145 unsigned long data)
2146{
2147 struct hdd_ipa_priv *hdd_ipa = user_data;
2148
2149 if (unlikely(!hdd_ipa))
2150 return;
2151
2152 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2153 return;
2154
2155 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Evt: %d", event);
2156
2157 switch (event) {
2158 case IPA_RM_RESOURCE_GRANTED:
2159 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2160 /* RM Notification comes with ISR context
2161 * it should be serialized into work queue to avoid
2162 * ISR sleep problem
2163 */
2164 hdd_ipa->uc_rm_work.event = event;
2165 schedule_work(&hdd_ipa->uc_rm_work.work);
2166 break;
2167 }
2168 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2169 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2170 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2171 hdd_ipa->stats.num_rm_grant++;
2172 break;
2173
2174 case IPA_RM_RESOURCE_RELEASED:
2175 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "RM Release");
2176 hdd_ipa->resource_unloading = false;
2177 break;
2178
2179 default:
2180 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Unknown RM Evt: %d", event);
2181 break;
2182 }
2183}
2184
2185/**
2186 * hdd_ipa_rm_cons_release() - WLAN consumer resource release handler
2187 *
2188 * Callback function registered with IPA that is called when IPA wants
2189 * to release the WLAN consumer resource
2190 *
2191 * Return: 0 if the request is granted, negative errno otherwise
2192 */
2193static int hdd_ipa_rm_cons_release(void)
2194{
2195 return 0;
2196}
2197
2198/**
2199 * hdd_ipa_rm_cons_request() - WLAN consumer resource request handler
2200 *
2201 * Callback function registered with IPA that is called when IPA wants
2202 * to access the WLAN consumer resource
2203 *
2204 * Return: 0 if the request is granted, negative errno otherwise
2205 */
2206static int hdd_ipa_rm_cons_request(void)
2207{
Yun Park4d8b60a2015-10-22 13:59:32 -07002208 int ret = 0;
2209
2210 if (ghdd_ipa->resource_loading) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002211 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
Yun Park4d8b60a2015-10-22 13:59:32 -07002212 "%s: IPA resource loading in progress",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002213 __func__);
2214 ghdd_ipa->pending_cons_req = true;
Yun Park4d8b60a2015-10-22 13:59:32 -07002215 ret = -EINPROGRESS;
2216 } else if (ghdd_ipa->resource_unloading) {
2217 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
2218 "%s: IPA resource unloading in progress",
2219 __func__);
2220 ghdd_ipa->pending_cons_req = true;
2221 ret = -EPERM;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002222 }
Yun Park4d8b60a2015-10-22 13:59:32 -07002223
2224 return ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002225}
2226
2227/**
2228 * hdd_ipa_set_perf_level() - Set IPA performance level
2229 * @hdd_ctx: Global HDD context
2230 * @tx_packets: Number of packets transmitted in the last sample period
2231 * @rx_packets: Number of packets received in the last sample period
2232 *
2233 * Return: 0 on success, negative errno on error
2234 */
2235int hdd_ipa_set_perf_level(hdd_context_t *hdd_ctx, uint64_t tx_packets,
2236 uint64_t rx_packets)
2237{
2238 uint32_t next_cons_bw, next_prod_bw;
2239 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2240 struct ipa_rm_perf_profile profile;
2241 int ret;
2242
2243 if ((!hdd_ipa_is_enabled(hdd_ctx)) ||
2244 (!hdd_ipa_is_clk_scaling_enabled(hdd_ctx)))
2245 return 0;
2246
2247 memset(&profile, 0, sizeof(profile));
2248
2249 if (tx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2250 next_cons_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2251 else if (tx_packets >
2252 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2253 next_cons_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2254 else
2255 next_cons_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2256
2257 if (rx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2258 next_prod_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2259 else if (rx_packets >
2260 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2261 next_prod_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2262 else
2263 next_prod_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2264
2265 HDD_IPA_LOG(LOG1,
2266 "CONS perf curr: %d, next: %d",
2267 hdd_ipa->curr_cons_bw, next_cons_bw);
2268 HDD_IPA_LOG(LOG1,
2269 "PROD perf curr: %d, next: %d",
2270 hdd_ipa->curr_prod_bw, next_prod_bw);
2271
2272 if (hdd_ipa->curr_cons_bw != next_cons_bw) {
2273 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2274 "Requesting CONS perf curr: %d, next: %d",
2275 hdd_ipa->curr_cons_bw, next_cons_bw);
2276 profile.max_supported_bandwidth_mbps = next_cons_bw;
2277 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_CONS,
2278 &profile);
2279 if (ret) {
2280 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2281 "RM CONS set perf profile failed: %d", ret);
2282
2283 return ret;
2284 }
2285 hdd_ipa->curr_cons_bw = next_cons_bw;
2286 hdd_ipa->stats.num_cons_perf_req++;
2287 }
2288
2289 if (hdd_ipa->curr_prod_bw != next_prod_bw) {
2290 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2291 "Requesting PROD perf curr: %d, next: %d",
2292 hdd_ipa->curr_prod_bw, next_prod_bw);
2293 profile.max_supported_bandwidth_mbps = next_prod_bw;
2294 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_PROD,
2295 &profile);
2296 if (ret) {
2297 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2298 "RM PROD set perf profile failed: %d", ret);
2299 return ret;
2300 }
2301 hdd_ipa->curr_prod_bw = next_prod_bw;
2302 hdd_ipa->stats.num_prod_perf_req++;
2303 }
2304
2305 return 0;
2306}
2307
2308/**
2309 * hdd_ipa_setup_rm() - Setup IPA resource management
2310 * @hdd_ipa: Global HDD IPA context
2311 *
2312 * Return: 0 on success, negative errno on error
2313 */
2314static int hdd_ipa_setup_rm(struct hdd_ipa_priv *hdd_ipa)
2315{
2316 struct ipa_rm_create_params create_params = { 0 };
2317 int ret;
2318
2319 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2320 return 0;
2321
2322 cnss_init_work(&hdd_ipa->uc_rm_work.work, hdd_ipa_uc_rm_notify_defer);
2323 memset(&create_params, 0, sizeof(create_params));
2324 create_params.name = IPA_RM_RESOURCE_WLAN_PROD;
2325 create_params.reg_params.user_data = hdd_ipa;
2326 create_params.reg_params.notify_cb = hdd_ipa_rm_notify;
2327 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2328
2329 ret = ipa_rm_create_resource(&create_params);
2330 if (ret) {
2331 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2332 "Create RM resource failed: %d", ret);
2333 goto setup_rm_fail;
2334 }
2335
2336 memset(&create_params, 0, sizeof(create_params));
2337 create_params.name = IPA_RM_RESOURCE_WLAN_CONS;
2338 create_params.request_resource = hdd_ipa_rm_cons_request;
2339 create_params.release_resource = hdd_ipa_rm_cons_release;
2340 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2341
2342 ret = ipa_rm_create_resource(&create_params);
2343 if (ret) {
2344 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2345 "Create RM CONS resource failed: %d", ret);
2346 goto delete_prod;
2347 }
2348
2349 ipa_rm_add_dependency(IPA_RM_RESOURCE_WLAN_PROD,
2350 IPA_RM_RESOURCE_APPS_CONS);
2351
2352 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WLAN_PROD,
2353 HDD_IPA_RX_INACTIVITY_MSEC_DELAY);
2354 if (ret) {
2355 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Timer init failed: %d",
2356 ret);
2357 goto timer_init_failed;
2358 }
2359
2360 /* Set the lowest bandwidth to start with */
2361 ret = hdd_ipa_set_perf_level(hdd_ipa->hdd_ctx, 0, 0);
2362
2363 if (ret) {
2364 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2365 "Set perf level failed: %d", ret);
2366 goto set_perf_failed;
2367 }
2368
2369 cdf_wake_lock_init(&hdd_ipa->wake_lock, "wlan_ipa");
2370#ifdef CONFIG_CNSS
2371 cnss_init_delayed_work(&hdd_ipa->wake_lock_work,
2372 hdd_ipa_wake_lock_timer_func);
2373#else
2374 INIT_DELAYED_WORK(&hdd_ipa->wake_lock_work,
2375 hdd_ipa_wake_lock_timer_func);
2376#endif
2377 cdf_spinlock_init(&hdd_ipa->rm_lock);
2378 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2379 hdd_ipa->wake_lock_released = true;
2380 atomic_set(&hdd_ipa->tx_ref_cnt, 0);
2381
2382 return ret;
2383
2384set_perf_failed:
2385 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2386
2387timer_init_failed:
2388 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2389
2390delete_prod:
2391 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2392
2393setup_rm_fail:
2394 return ret;
2395}
2396
2397/**
2398 * hdd_ipa_destroy_rm_resource() - Destroy IPA resources
2399 * @hdd_ipa: Global HDD IPA context
2400 *
2401 * Destroys all resources associated with the IPA resource manager
2402 *
2403 * Return: None
2404 */
2405static void hdd_ipa_destroy_rm_resource(struct hdd_ipa_priv *hdd_ipa)
2406{
2407 int ret;
2408
2409 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2410 return;
2411
2412 cancel_delayed_work_sync(&hdd_ipa->wake_lock_work);
2413 cdf_wake_lock_destroy(&hdd_ipa->wake_lock);
2414
2415#ifdef WLAN_OPEN_SOURCE
2416 cancel_work_sync(&hdd_ipa->uc_rm_work.work);
2417#endif
2418 cdf_spinlock_destroy(&hdd_ipa->rm_lock);
2419
2420 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2421
2422 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2423 if (ret)
2424 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2425 "RM PROD resource delete failed %d", ret);
2426
2427 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2428 if (ret)
2429 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2430 "RM CONS resource delete failed %d", ret);
2431}
2432
2433/**
2434 * hdd_ipa_send_skb_to_network() - Send skb to kernel
2435 * @skb: network buffer
2436 * @adapter: network adapter
2437 *
2438 * Called when a network buffer is received which should not be routed
2439 * to the IPA module.
2440 *
2441 * Return: None
2442 */
2443static void hdd_ipa_send_skb_to_network(cdf_nbuf_t skb,
2444 hdd_adapter_t *adapter)
2445{
2446 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2447 unsigned int cpu_index;
2448
2449 if (!adapter || adapter->magic != WLAN_HDD_ADAPTER_MAGIC) {
2450 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_LOW, "Invalid adapter: 0x%p",
2451 adapter);
2452 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2453 cdf_nbuf_free(skb);
2454 return;
2455 }
2456
2457 if (hdd_ipa->hdd_ctx->isUnloadInProgress) {
2458 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2459 cdf_nbuf_free(skb);
2460 return;
2461 }
2462
2463 skb->destructor = hdd_ipa_uc_rt_debug_destructor;
2464 skb->dev = adapter->dev;
2465 skb->protocol = eth_type_trans(skb, skb->dev);
2466 skb->ip_summed = CHECKSUM_NONE;
2467
2468 cpu_index = wlan_hdd_get_cpu();
2469
2470 ++adapter->hdd_stats.hddTxRxStats.rxPackets[cpu_index];
2471 if (netif_rx_ni(skb) == NET_RX_SUCCESS)
2472 ++adapter->hdd_stats.hddTxRxStats.rxDelivered[cpu_index];
2473 else
2474 ++adapter->hdd_stats.hddTxRxStats.rxRefused[cpu_index];
2475
2476 HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa);
2477 adapter->dev->last_rx = jiffies;
2478}
2479
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002480/**
2481 * hdd_ipa_w2i_cb() - WLAN to IPA callback handler
2482 * @priv: pointer to private data registered with IPA (we register a
2483 * pointer to the global IPA context)
2484 * @evt: the IPA event which triggered the callback
2485 * @data: data associated with the event
2486 *
2487 * Return: None
2488 */
2489static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
2490 unsigned long data)
2491{
2492 struct hdd_ipa_priv *hdd_ipa = NULL;
2493 hdd_adapter_t *adapter = NULL;
2494 cdf_nbuf_t skb;
2495 uint8_t iface_id;
2496 uint8_t session_id;
2497 struct hdd_ipa_iface_context *iface_context;
2498 cdf_nbuf_t copy;
2499 uint8_t fw_desc;
2500 int ret;
2501
2502 hdd_ipa = (struct hdd_ipa_priv *)priv;
2503
2504 switch (evt) {
2505 case IPA_RECEIVE:
2506 skb = (cdf_nbuf_t) data;
2507 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2508 session_id = (uint8_t)skb->cb[0];
2509 iface_id = vdev_to_iface[session_id];
2510 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2511 "IPA_RECEIVE: session_id=%u, iface_id=%u",
2512 session_id, iface_id);
2513 } else {
2514 iface_id = HDD_IPA_GET_IFACE_ID(skb->data);
2515 }
2516
2517 if (iface_id >= HDD_IPA_MAX_IFACE) {
2518 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2519 "IPA_RECEIVE: Invalid iface_id: %u",
2520 iface_id);
2521 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_INFO_HIGH,
2522 "w2i -- skb", skb->data, 8);
2523 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2524 cdf_nbuf_free(skb);
2525 return;
2526 }
2527
2528 iface_context = &hdd_ipa->iface_context[iface_id];
2529 adapter = iface_context->adapter;
2530
2531 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG,
2532 "w2i -- skb", skb->data, 8);
2533 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2534 hdd_ipa->stats.num_rx_excep++;
2535 skb_pull(skb, HDD_IPA_UC_WLAN_CLD_HDR_LEN);
2536 } else {
2537 skb_pull(skb, HDD_IPA_WLAN_CLD_HDR_LEN);
2538 }
2539
2540 iface_context->stats.num_rx_ipa_excep++;
2541
2542 /* Disable to forward Intra-BSS Rx packets when
2543 * ap_isolate=1 in hostapd.conf
2544 */
2545 if (adapter->sessionCtx.ap.apDisableIntraBssFwd) {
2546 /*
2547 * When INTRA_BSS_FWD_OFFLOAD is enabled, FW will send
2548 * all Rx packets to IPA uC, which need to be forwarded
2549 * to other interface.
2550 * And, IPA driver will send back to WLAN host driver
2551 * through exception pipe with fw_desc field set by FW.
2552 * Here we are checking fw_desc field for FORWARD bit
2553 * set, and forward to Tx. Then copy to kernel stack
2554 * only when DISCARD bit is not set.
2555 */
2556 fw_desc = (uint8_t)skb->cb[1];
2557
Leo Chang3bc8fed2015-11-13 10:59:47 -08002558 if (fw_desc & HDD_IPA_FW_RX_DESC_FORWARD_M) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002559 HDD_IPA_LOG(
2560 CDF_TRACE_LEVEL_DEBUG,
2561 "Forward packet to Tx (fw_desc=%d)",
2562 fw_desc);
2563 copy = cdf_nbuf_copy(skb);
2564 if (copy) {
2565 hdd_ipa->ipa_tx_forward++;
2566 ret = hdd_softap_hard_start_xmit(
2567 (struct sk_buff *)copy,
2568 adapter->dev);
2569 if (ret) {
2570 HDD_IPA_LOG(
2571 CDF_TRACE_LEVEL_DEBUG,
2572 "Forward packet tx fail");
2573 hdd_ipa->stats.
2574 num_tx_bcmc_err++;
2575 } else {
2576 hdd_ipa->stats.num_tx_bcmc++;
2577 }
2578 }
2579 }
Mahesh Kumar Kalikot Veetil221dc672015-11-06 14:27:28 -08002580
Leo Chang3bc8fed2015-11-13 10:59:47 -08002581 if (fw_desc & HDD_IPA_FW_RX_DESC_DISCARD_M) {
Mahesh Kumar Kalikot Veetil221dc672015-11-06 14:27:28 -08002582 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2583 hdd_ipa->ipa_rx_discard++;
2584 cdf_nbuf_free(skb);
2585 break;
2586 }
2587
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002588 } else {
2589 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2590 "Intra-BSS FWD is disabled-skip forward to Tx");
2591 }
2592
2593 hdd_ipa_send_skb_to_network(skb, adapter);
2594 break;
2595
2596 default:
2597 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2598 "w2i cb wrong event: 0x%x", evt);
2599 return;
2600 }
2601}
2602
2603/**
2604 * hdd_ipa_nbuf_cb() - IPA TX complete callback
2605 * @skb: packet buffer which was transmitted
2606 *
2607 * Return: None
2608 */
2609static void hdd_ipa_nbuf_cb(cdf_nbuf_t skb)
2610{
2611 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2612
2613 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "%lx", NBUF_OWNER_PRIV_DATA(skb));
2614 ipa_free_skb((struct ipa_rx_data *)NBUF_OWNER_PRIV_DATA(skb));
2615
2616 hdd_ipa->stats.num_tx_comp_cnt++;
2617
2618 atomic_dec(&hdd_ipa->tx_ref_cnt);
2619
2620 hdd_ipa_rm_try_release(hdd_ipa);
2621}
2622
2623/**
2624 * hdd_ipa_send_pkt_to_tl() - Send an IPA packet to TL
2625 * @iface_context: interface-specific IPA context
2626 * @ipa_tx_desc: packet data descriptor
2627 *
2628 * Return: None
2629 */
2630static void hdd_ipa_send_pkt_to_tl(
2631 struct hdd_ipa_iface_context *iface_context,
2632 struct ipa_rx_data *ipa_tx_desc)
2633{
2634 struct hdd_ipa_priv *hdd_ipa = iface_context->hdd_ipa;
2635 uint8_t interface_id;
2636 hdd_adapter_t *adapter = NULL;
2637 cdf_nbuf_t skb;
2638
2639 cdf_spin_lock_bh(&iface_context->interface_lock);
2640 adapter = iface_context->adapter;
2641 if (!adapter) {
2642 HDD_IPA_LOG(CDF_TRACE_LEVEL_WARN, "Interface Down");
2643 ipa_free_skb(ipa_tx_desc);
2644 iface_context->stats.num_tx_drop++;
2645 cdf_spin_unlock_bh(&iface_context->interface_lock);
2646 hdd_ipa_rm_try_release(hdd_ipa);
2647 return;
2648 }
2649
2650 /*
2651 * During CAC period, data packets shouldn't be sent over the air so
2652 * drop all the packets here
2653 */
2654 if (WLAN_HDD_GET_AP_CTX_PTR(adapter)->dfs_cac_block_tx) {
2655 ipa_free_skb(ipa_tx_desc);
2656 cdf_spin_unlock_bh(&iface_context->interface_lock);
2657 iface_context->stats.num_tx_cac_drop++;
2658 hdd_ipa_rm_try_release(hdd_ipa);
2659 return;
2660 }
2661
2662 interface_id = adapter->sessionId;
2663 ++adapter->stats.tx_packets;
2664
2665 cdf_spin_unlock_bh(&iface_context->interface_lock);
2666
2667 skb = ipa_tx_desc->skb;
2668
2669 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2670 NBUF_OWNER_ID(skb) = IPA_NBUF_OWNER_ID;
2671 NBUF_CALLBACK_FN(skb) = hdd_ipa_nbuf_cb;
2672 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2673 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr
2674 + HDD_IPA_WLAN_FRAG_HEADER
2675 + HDD_IPA_WLAN_IPA_HEADER;
2676 ipa_tx_desc->skb->len -=
2677 HDD_IPA_WLAN_FRAG_HEADER + HDD_IPA_WLAN_IPA_HEADER;
2678 } else
2679 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr;
2680
2681 NBUF_OWNER_PRIV_DATA(skb) = (unsigned long)ipa_tx_desc;
2682
2683 adapter->stats.tx_bytes += ipa_tx_desc->skb->len;
2684
2685 skb = ol_tx_send_ipa_data_frame(iface_context->tl_context,
2686 ipa_tx_desc->skb);
2687 if (skb) {
2688 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "TLSHIM tx fail");
2689 ipa_free_skb(ipa_tx_desc);
2690 iface_context->stats.num_tx_err++;
2691 hdd_ipa_rm_try_release(hdd_ipa);
2692 return;
2693 }
2694
2695 atomic_inc(&hdd_ipa->tx_ref_cnt);
2696
2697 iface_context->stats.num_tx++;
2698
2699}
2700
2701/**
2702 * hdd_ipa_pm_send_pkt_to_tl() - Send queued packets to TL
2703 * @work: pointer to the scheduled work
2704 *
2705 * Called during PM resume to send packets to TL which were queued
2706 * while host was in the process of suspending.
2707 *
2708 * Return: None
2709 */
2710static void hdd_ipa_pm_send_pkt_to_tl(struct work_struct *work)
2711{
2712 struct hdd_ipa_priv *hdd_ipa = container_of(work,
2713 struct hdd_ipa_priv,
2714 pm_work);
2715 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2716 cdf_nbuf_t skb;
2717 uint32_t dequeued = 0;
2718
2719 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2720
2721 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
2722 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2723
2724 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2725
2726 dequeued++;
2727
2728 hdd_ipa_send_pkt_to_tl(pm_tx_cb->iface_context,
2729 pm_tx_cb->ipa_tx_desc);
2730
2731 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2732 }
2733
2734 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2735
2736 hdd_ipa->stats.num_tx_dequeued += dequeued;
2737 if (dequeued > hdd_ipa->stats.num_max_pm_queue)
2738 hdd_ipa->stats.num_max_pm_queue = dequeued;
2739}
2740
2741/**
2742 * hdd_ipa_i2w_cb() - IPA to WLAN callback
2743 * @priv: pointer to private data registered with IPA (we register a
2744 * pointer to the interface-specific IPA context)
2745 * @evt: the IPA event which triggered the callback
2746 * @data: data associated with the event
2747 *
2748 * Return: None
2749 */
2750static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
2751 unsigned long data)
2752{
2753 struct hdd_ipa_priv *hdd_ipa = NULL;
2754 struct ipa_rx_data *ipa_tx_desc;
2755 struct hdd_ipa_iface_context *iface_context;
2756 cdf_nbuf_t skb;
2757 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2758 CDF_STATUS status = CDF_STATUS_SUCCESS;
2759
Mukul Sharma81661ae2015-10-30 20:26:02 +05302760 iface_context = (struct hdd_ipa_iface_context *)priv;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002761 if (evt != IPA_RECEIVE) {
2762 skb = (cdf_nbuf_t) data;
2763 dev_kfree_skb_any(skb);
2764 iface_context->stats.num_tx_drop++;
2765 return;
2766 }
2767
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002768 ipa_tx_desc = (struct ipa_rx_data *)data;
2769
2770 hdd_ipa = iface_context->hdd_ipa;
2771
2772 /*
2773 * When SSR is going on or driver is unloading, just drop the packets.
2774 * During SSR, there is no use in queueing the packets as STA has to
2775 * connect back any way
2776 */
2777 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
2778 if (0 != status) {
2779 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
2780 ipa_free_skb(ipa_tx_desc);
2781 iface_context->stats.num_tx_drop++;
2782 return;
2783 }
2784
2785 skb = ipa_tx_desc->skb;
2786
2787 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG, "i2w", skb->data, 8);
2788
2789 /*
2790 * If PROD resource is not requested here then there may be cases where
2791 * IPA hardware may be clocked down because of not having proper
2792 * dependency graph between WLAN CONS and modem PROD pipes. Adding the
2793 * workaround to request PROD resource while data is going over CONS
2794 * pipe to prevent the IPA hardware clockdown.
2795 */
2796 hdd_ipa_rm_request(hdd_ipa);
2797
2798 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2799 /*
2800 * If host is still suspended then queue the packets and these will be
2801 * drained later when resume completes. When packet is arrived here and
2802 * host is suspended, this means that there is already resume is in
2803 * progress.
2804 */
2805 if (hdd_ipa->suspended) {
2806 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2807 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2808 pm_tx_cb->iface_context = iface_context;
2809 pm_tx_cb->ipa_tx_desc = ipa_tx_desc;
2810 cdf_nbuf_queue_add(&hdd_ipa->pm_queue_head, skb);
2811 hdd_ipa->stats.num_tx_queued++;
2812
2813 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2814 return;
2815 }
2816
2817 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2818
2819 /*
2820 * If we are here means, host is not suspended, wait for the work queue
2821 * to finish.
2822 */
2823#ifdef WLAN_OPEN_SOURCE
2824 flush_work(&hdd_ipa->pm_work);
2825#endif
2826
2827 return hdd_ipa_send_pkt_to_tl(iface_context, ipa_tx_desc);
2828}
2829
2830/**
2831 * hdd_ipa_suspend() - Suspend IPA
2832 * @hdd_ctx: Global HDD context
2833 *
2834 * Return: 0 on success, negativer errno on error
2835 */
2836int hdd_ipa_suspend(hdd_context_t *hdd_ctx)
2837{
2838 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2839
2840 if (!hdd_ipa_is_enabled(hdd_ctx))
2841 return 0;
2842
2843 /*
2844 * Check if IPA is ready for suspend, If we are here means, there is
2845 * high chance that suspend would go through but just to avoid any race
2846 * condition after suspend started, these checks are conducted before
2847 * allowing to suspend.
2848 */
2849 if (atomic_read(&hdd_ipa->tx_ref_cnt))
2850 return -EAGAIN;
2851
2852 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2853
2854 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED) {
2855 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2856 return -EAGAIN;
2857 }
2858 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2859
2860 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2861 hdd_ipa->suspended = true;
2862 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2863
2864 return 0;
2865}
2866
2867/**
2868 * hdd_ipa_resume() - Resume IPA following suspend
2869 * hdd_ctx: Global HDD context
2870 *
2871 * Return: 0 on success, negative errno on error
2872 */
2873int hdd_ipa_resume(hdd_context_t *hdd_ctx)
2874{
2875 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2876
2877 if (!hdd_ipa_is_enabled(hdd_ctx))
2878 return 0;
2879
2880 schedule_work(&hdd_ipa->pm_work);
2881
2882 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2883 hdd_ipa->suspended = false;
2884 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2885
2886 return 0;
2887}
2888
2889/**
2890 * hdd_ipa_setup_sys_pipe() - Setup all IPA Sys pipes
2891 * @hdd_ipa: Global HDD IPA context
2892 *
2893 * Return: 0 on success, negative errno on error
2894 */
2895static int hdd_ipa_setup_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
2896{
2897 int i, ret = 0;
2898 struct ipa_sys_connect_params *ipa;
2899 uint32_t desc_fifo_sz;
2900
2901 /* The maximum number of descriptors that can be provided to a BAM at
2902 * once is one less than the total number of descriptors that the buffer
2903 * can contain.
2904 * If max_num_of_descriptors = (BAM_PIPE_DESCRIPTOR_FIFO_SIZE / sizeof
2905 * (SPS_DESCRIPTOR)), then (max_num_of_descriptors - 1) descriptors can
2906 * be provided at once.
2907 * Because of above requirement, one extra descriptor will be added to
2908 * make sure hardware always has one descriptor.
2909 */
2910 desc_fifo_sz = hdd_ipa->hdd_ctx->config->IpaDescSize
2911 + sizeof(struct sps_iovec);
2912
2913 /*setup TX pipes */
2914 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
2915 ipa = &hdd_ipa->sys_pipe[i].ipa_sys_params;
2916
2917 ipa->client = hdd_ipa_adapter_2_client[i].cons_client;
2918 ipa->desc_fifo_sz = desc_fifo_sz;
2919 ipa->priv = &hdd_ipa->iface_context[i];
2920 ipa->notify = hdd_ipa_i2w_cb;
2921
2922 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2923 ipa->ipa_ep_cfg.hdr.hdr_len =
2924 HDD_IPA_UC_WLAN_TX_HDR_LEN;
2925 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
2926 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
2927 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
2928 ipa->ipa_ep_cfg.hdr.hdr_additional_const_len =
2929 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
2930 ipa->ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
2931 } else {
2932 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
2933 }
2934 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
2935
2936 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2937 ipa->keep_ipa_awake = 1;
2938
2939 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
2940 if (ret) {
2941 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed for pipe %d"
2942 " ret: %d", i, ret);
2943 goto setup_sys_pipe_fail;
2944 }
2945 hdd_ipa->sys_pipe[i].conn_hdl_valid = 1;
2946 }
2947
2948 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2949 /*
2950 * Hard code it here, this can be extended if in case
2951 * PROD pipe is also per interface.
2952 * Right now there is no advantage of doing this.
2953 */
2954 hdd_ipa->prod_client = IPA_CLIENT_WLAN1_PROD;
2955
2956 ipa = &hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].ipa_sys_params;
2957
2958 ipa->client = hdd_ipa->prod_client;
2959
2960 ipa->desc_fifo_sz = desc_fifo_sz;
2961 ipa->priv = hdd_ipa;
2962 ipa->notify = hdd_ipa_w2i_cb;
2963
2964 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
2965 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_RX_HDR_LEN;
2966 ipa->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
2967 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
2968
2969 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2970 ipa->keep_ipa_awake = 1;
2971
2972 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
2973 if (ret) {
2974 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2975 "Failed for RX pipe: %d", ret);
2976 goto setup_sys_pipe_fail;
2977 }
2978 hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].conn_hdl_valid = 1;
2979 }
2980
2981 return ret;
2982
2983setup_sys_pipe_fail:
2984
2985 while (--i >= 0) {
2986 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].conn_hdl);
2987 cdf_mem_zero(&hdd_ipa->sys_pipe[i],
2988 sizeof(struct hdd_ipa_sys_pipe));
2989 }
2990
2991 return ret;
2992}
2993
2994/**
2995 * hdd_ipa_teardown_sys_pipe() - Tear down all IPA Sys pipes
2996 * @hdd_ipa: Global HDD IPA context
2997 *
2998 * Return: None
2999 */
3000static void hdd_ipa_teardown_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
3001{
3002 int ret = 0, i;
3003 for (i = 0; i < HDD_IPA_MAX_SYSBAM_PIPE; i++) {
3004 if (hdd_ipa->sys_pipe[i].conn_hdl_valid) {
3005 ret =
3006 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].
3007 conn_hdl);
3008 if (ret)
3009 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed: %d",
3010 ret);
3011
3012 hdd_ipa->sys_pipe[i].conn_hdl_valid = 0;
3013 }
3014 }
3015}
3016
3017/**
3018 * hdd_ipa_register_interface() - register IPA interface
3019 * @hdd_ipa: Global IPA context
3020 * @iface_context: Per-interface IPA context
3021 *
3022 * Return: 0 on success, negative errno on error
3023 */
3024static int hdd_ipa_register_interface(struct hdd_ipa_priv *hdd_ipa,
3025 struct hdd_ipa_iface_context
3026 *iface_context)
3027{
3028 struct ipa_tx_intf tx_intf;
3029 struct ipa_rx_intf rx_intf;
3030 struct ipa_ioc_tx_intf_prop *tx_prop = NULL;
3031 struct ipa_ioc_rx_intf_prop *rx_prop = NULL;
3032 char *ifname = iface_context->adapter->dev->name;
3033
3034 char ipv4_hdr_name[IPA_RESOURCE_NAME_MAX];
3035 char ipv6_hdr_name[IPA_RESOURCE_NAME_MAX];
3036
3037 int num_prop = 1;
3038 int ret = 0;
3039
3040 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx))
3041 num_prop++;
3042
3043 /* Allocate TX properties for TOS categories, 1 each for IPv4 & IPv6 */
3044 tx_prop =
3045 cdf_mem_malloc(sizeof(struct ipa_ioc_tx_intf_prop) * num_prop);
3046 if (!tx_prop) {
3047 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "tx_prop allocation failed");
3048 goto register_interface_fail;
3049 }
3050
3051 /* Allocate RX properties, 1 each for IPv4 & IPv6 */
3052 rx_prop =
3053 cdf_mem_malloc(sizeof(struct ipa_ioc_rx_intf_prop) * num_prop);
3054 if (!rx_prop) {
3055 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "rx_prop allocation failed");
3056 goto register_interface_fail;
3057 }
3058
3059 cdf_mem_zero(&tx_intf, sizeof(tx_intf));
3060 cdf_mem_zero(&rx_intf, sizeof(rx_intf));
3061
3062 snprintf(ipv4_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
3063 ifname, HDD_IPA_IPV4_NAME_EXT);
3064 snprintf(ipv6_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
3065 ifname, HDD_IPA_IPV6_NAME_EXT);
3066
3067 rx_prop[IPA_IP_v4].ip = IPA_IP_v4;
3068 rx_prop[IPA_IP_v4].src_pipe = iface_context->prod_client;
3069 rx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3070 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
3071
3072 /*
3073 * Interface ID is 3rd byte in the CLD header. Add the meta data and
3074 * mask to identify the interface in IPA hardware
3075 */
3076 rx_prop[IPA_IP_v4].attrib.meta_data =
3077 htonl(iface_context->adapter->sessionId << 16);
3078 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
3079
3080 rx_intf.num_props++;
3081 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3082 rx_prop[IPA_IP_v6].ip = IPA_IP_v6;
3083 rx_prop[IPA_IP_v6].src_pipe = iface_context->prod_client;
3084 rx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3085 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
3086 rx_prop[IPA_IP_v4].attrib.meta_data =
3087 htonl(iface_context->adapter->sessionId << 16);
3088 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
3089
3090 rx_intf.num_props++;
3091 }
3092
3093 tx_prop[IPA_IP_v4].ip = IPA_IP_v4;
3094 tx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3095 tx_prop[IPA_IP_v4].dst_pipe = IPA_CLIENT_WLAN1_CONS;
3096 tx_prop[IPA_IP_v4].alt_dst_pipe = iface_context->cons_client;
3097 strlcpy(tx_prop[IPA_IP_v4].hdr_name, ipv4_hdr_name,
3098 IPA_RESOURCE_NAME_MAX);
3099 tx_intf.num_props++;
3100
3101 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3102 tx_prop[IPA_IP_v6].ip = IPA_IP_v6;
3103 tx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3104 tx_prop[IPA_IP_v6].dst_pipe = IPA_CLIENT_WLAN1_CONS;
3105 tx_prop[IPA_IP_v6].alt_dst_pipe = iface_context->cons_client;
3106 strlcpy(tx_prop[IPA_IP_v6].hdr_name, ipv6_hdr_name,
3107 IPA_RESOURCE_NAME_MAX);
3108 tx_intf.num_props++;
3109 }
3110
3111 tx_intf.prop = tx_prop;
3112 rx_intf.prop = rx_prop;
3113
3114 /* Call the ipa api to register interface */
3115 ret = ipa_register_intf(ifname, &tx_intf, &rx_intf);
3116
3117register_interface_fail:
3118 cdf_mem_free(tx_prop);
3119 cdf_mem_free(rx_prop);
3120 return ret;
3121}
3122
3123/**
3124 * hdd_remove_ipa_header() - Remove a specific header from IPA
3125 * @name: Name of the header to be removed
3126 *
3127 * Return: None
3128 */
3129static void hdd_ipa_remove_header(char *name)
3130{
3131 struct ipa_ioc_get_hdr hdrlookup;
3132 int ret = 0, len;
3133 struct ipa_ioc_del_hdr *ipa_hdr;
3134
3135 cdf_mem_zero(&hdrlookup, sizeof(hdrlookup));
3136 strlcpy(hdrlookup.name, name, sizeof(hdrlookup.name));
3137 ret = ipa_get_hdr(&hdrlookup);
3138 if (ret) {
3139 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Hdr deleted already %s, %d",
3140 name, ret);
3141 return;
3142 }
3143
3144 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "hdl: 0x%x", hdrlookup.hdl);
3145 len = sizeof(struct ipa_ioc_del_hdr) + sizeof(struct ipa_hdr_del) * 1;
3146 ipa_hdr = (struct ipa_ioc_del_hdr *)cdf_mem_malloc(len);
3147 if (ipa_hdr == NULL) {
3148 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "ipa_hdr allocation failed");
3149 return;
3150 }
3151 ipa_hdr->num_hdls = 1;
3152 ipa_hdr->commit = 0;
3153 ipa_hdr->hdl[0].hdl = hdrlookup.hdl;
3154 ipa_hdr->hdl[0].status = -1;
3155 ret = ipa_del_hdr(ipa_hdr);
3156 if (ret != 0)
3157 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Delete header failed: %d",
3158 ret);
3159
3160 cdf_mem_free(ipa_hdr);
3161}
3162
3163/**
3164 * hdd_ipa_add_header_info() - Add IPA header for a given interface
3165 * @hdd_ipa: Global HDD IPA context
3166 * @iface_context: Interface-specific HDD IPA context
3167 * @mac_addr: Interface MAC address
3168 *
3169 * Return: 0 on success, negativer errno value on error
3170 */
3171static int hdd_ipa_add_header_info(struct hdd_ipa_priv *hdd_ipa,
3172 struct hdd_ipa_iface_context *iface_context,
3173 uint8_t *mac_addr)
3174{
3175 hdd_adapter_t *adapter = iface_context->adapter;
3176 char *ifname;
3177 struct ipa_ioc_add_hdr *ipa_hdr = NULL;
3178 int ret = -EINVAL;
3179 struct hdd_ipa_tx_hdr *tx_hdr = NULL;
3180 struct hdd_ipa_uc_tx_hdr *uc_tx_hdr = NULL;
3181
3182 ifname = adapter->dev->name;
3183
3184 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Add Partial hdr: %s, %pM",
3185 ifname, mac_addr);
3186
3187 /* dynamically allocate the memory to add the hdrs */
3188 ipa_hdr = cdf_mem_malloc(sizeof(struct ipa_ioc_add_hdr)
3189 + sizeof(struct ipa_hdr_add));
3190 if (!ipa_hdr) {
3191 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3192 "%s: ipa_hdr allocation failed", ifname);
3193 ret = -ENOMEM;
3194 goto end;
3195 }
3196
3197 ipa_hdr->commit = 0;
3198 ipa_hdr->num_hdrs = 1;
3199
3200 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3201 uc_tx_hdr = (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3202 memcpy(uc_tx_hdr, &ipa_uc_tx_hdr, HDD_IPA_UC_WLAN_TX_HDR_LEN);
3203 memcpy(uc_tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3204 uc_tx_hdr->ipa_hd.vdev_id = iface_context->adapter->sessionId;
3205 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
3206 "ifname=%s, vdev_id=%d",
3207 ifname, uc_tx_hdr->ipa_hd.vdev_id);
3208 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3209 ifname, HDD_IPA_IPV4_NAME_EXT);
3210 ipa_hdr->hdr[0].hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
3211 ipa_hdr->hdr[0].type = IPA_HDR_L2_ETHERNET_II;
3212 ipa_hdr->hdr[0].is_partial = 1;
3213 ipa_hdr->hdr[0].hdr_hdl = 0;
3214 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3215 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3216
3217 ret = ipa_add_hdr(ipa_hdr);
3218 } else {
3219 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3220
3221 /* Set the Source MAC */
3222 memcpy(tx_hdr, &ipa_tx_hdr, HDD_IPA_WLAN_TX_HDR_LEN);
3223 memcpy(tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3224
3225 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3226 ifname, HDD_IPA_IPV4_NAME_EXT);
3227 ipa_hdr->hdr[0].hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
3228 ipa_hdr->hdr[0].is_partial = 1;
3229 ipa_hdr->hdr[0].hdr_hdl = 0;
3230 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3231 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3232
3233 /* Set the type to IPV4 in the header */
3234 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IP);
3235
3236 ret = ipa_add_hdr(ipa_hdr);
3237 }
3238 if (ret) {
3239 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s IPv4 add hdr failed: %d",
3240 ifname, ret);
3241 goto end;
3242 }
3243
3244 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv4 hdr_hdl: 0x%x",
3245 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3246
3247 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3248 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3249 ifname, HDD_IPA_IPV6_NAME_EXT);
3250
3251 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3252 uc_tx_hdr =
3253 (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3254 uc_tx_hdr->eth.h_proto = cpu_to_be16(ETH_P_IPV6);
3255 } else {
3256 /* Set the type to IPV6 in the header */
3257 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3258 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IPV6);
3259 }
3260
3261 ret = ipa_add_hdr(ipa_hdr);
3262 if (ret) {
3263 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3264 "%s: IPv6 add hdr failed: %d", ifname, ret);
3265 goto clean_ipv4_hdr;
3266 }
3267
3268 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv6 hdr_hdl: 0x%x",
3269 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3270 }
3271
3272 cdf_mem_free(ipa_hdr);
3273
3274 return ret;
3275
3276clean_ipv4_hdr:
3277 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3278 ifname, HDD_IPA_IPV4_NAME_EXT);
3279 hdd_ipa_remove_header(ipa_hdr->hdr[0].name);
3280end:
3281 if (ipa_hdr)
3282 cdf_mem_free(ipa_hdr);
3283
3284 return ret;
3285}
3286
3287/**
3288 * hdd_ipa_clean_hdr() - Cleanup IPA on a given adapter
3289 * @adapter: Adapter upon which IPA was previously configured
3290 *
3291 * Return: None
3292 */
3293static void hdd_ipa_clean_hdr(hdd_adapter_t *adapter)
3294{
3295 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3296 int ret;
3297 char name_ipa[IPA_RESOURCE_NAME_MAX];
3298
3299 /* Remove the headers */
3300 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3301 adapter->dev->name, HDD_IPA_IPV4_NAME_EXT);
3302 hdd_ipa_remove_header(name_ipa);
3303
3304 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3305 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3306 adapter->dev->name, HDD_IPA_IPV6_NAME_EXT);
3307 hdd_ipa_remove_header(name_ipa);
3308 }
3309 /* unregister the interface with IPA */
3310 ret = ipa_deregister_intf(adapter->dev->name);
3311 if (ret)
3312 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3313 "%s: ipa_deregister_intf fail: %d",
3314 adapter->dev->name, ret);
3315}
3316
3317/**
3318 * hdd_ipa_cleanup_iface() - Cleanup IPA on a given interface
3319 * @iface_context: interface-specific IPA context
3320 *
3321 * Return: None
3322 */
3323static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context)
3324{
3325 if (iface_context == NULL)
3326 return;
3327
3328 hdd_ipa_clean_hdr(iface_context->adapter);
3329
3330 cdf_spin_lock_bh(&iface_context->interface_lock);
3331 iface_context->adapter->ipa_context = NULL;
3332 iface_context->adapter = NULL;
3333 iface_context->tl_context = NULL;
3334 cdf_spin_unlock_bh(&iface_context->interface_lock);
3335 iface_context->ifa_address = 0;
3336 if (!iface_context->hdd_ipa->num_iface) {
3337 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3338 "NUM INTF 0, Invalid");
3339 CDF_ASSERT(0);
3340 }
3341 iface_context->hdd_ipa->num_iface--;
3342}
3343
3344/**
3345 * hdd_ipa_setup_iface() - Setup IPA on a given interface
3346 * @hdd_ipa: HDD IPA global context
3347 * @adapter: Interface upon which IPA is being setup
3348 * @sta_id: Station ID of the API instance
3349 *
3350 * Return: 0 on success, negative errno value on error
3351 */
3352static int hdd_ipa_setup_iface(struct hdd_ipa_priv *hdd_ipa,
3353 hdd_adapter_t *adapter, uint8_t sta_id)
3354{
3355 struct hdd_ipa_iface_context *iface_context = NULL;
3356 void *tl_context = NULL;
3357 int i, ret = 0;
3358
3359 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
3360 * channel change indication. Since these indications are sent by lower
3361 * layer as SAP updates and IPA doesn't have to do anything for these
3362 * updates so ignoring!
3363 */
3364 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
3365 return 0;
3366
3367 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3368 if (hdd_ipa->iface_context[i].adapter == NULL) {
3369 iface_context = &(hdd_ipa->iface_context[i]);
3370 break;
3371 }
3372 }
3373
3374 if (iface_context == NULL) {
3375 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3376 "All the IPA interfaces are in use");
3377 ret = -ENOMEM;
3378 goto end;
3379 }
3380
3381 adapter->ipa_context = iface_context;
3382 iface_context->adapter = adapter;
3383 iface_context->sta_id = sta_id;
3384 tl_context = ol_txrx_get_vdev_by_sta_id(sta_id);
3385
3386 if (tl_context == NULL) {
3387 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3388 "Not able to get TL context sta_id: %d", sta_id);
3389 ret = -EINVAL;
3390 goto end;
3391 }
3392
3393 iface_context->tl_context = tl_context;
3394
3395 ret = hdd_ipa_add_header_info(hdd_ipa, iface_context,
3396 adapter->dev->dev_addr);
3397
3398 if (ret)
3399 goto end;
3400
3401 /* Configure the TX and RX pipes filter rules */
3402 ret = hdd_ipa_register_interface(hdd_ipa, iface_context);
3403 if (ret)
3404 goto cleanup_header;
3405
3406 hdd_ipa->num_iface++;
3407 return ret;
3408
3409cleanup_header:
3410
3411 hdd_ipa_clean_hdr(adapter);
3412end:
3413 if (iface_context)
3414 hdd_ipa_cleanup_iface(iface_context);
3415 return ret;
3416}
3417
3418/**
3419 * hdd_ipa_msg_free_fn() - Free an IPA message
3420 * @buff: pointer to the IPA message
3421 * @len: length of the IPA message
3422 * @type: type of IPA message
3423 *
3424 * Return: None
3425 */
3426static void hdd_ipa_msg_free_fn(void *buff, uint32_t len, uint32_t type)
3427{
3428 hddLog(LOG1, "msg type:%d, len:%d", type, len);
3429 ghdd_ipa->stats.num_free_msg++;
3430 cdf_mem_free(buff);
3431}
3432
3433/**
3434 * hdd_ipa_send_mcc_scc_msg() - send IPA WLAN_SWITCH_TO_MCC/SCC message
3435 * @mcc_mode: 0=MCC/1=SCC
3436 *
3437 * Return: 0 on success, negative errno value on error
3438 */
3439int hdd_ipa_send_mcc_scc_msg(hdd_context_t *pHddCtx, bool mcc_mode)
3440{
3441 hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
3442 CDF_STATUS status;
3443 hdd_adapter_t *pAdapter;
3444 struct ipa_msg_meta meta;
3445 struct ipa_wlan_msg *msg;
3446 int ret;
3447
3448 if (!hdd_ipa_uc_sta_is_enabled(pHddCtx))
3449 return -EINVAL;
3450
3451 if (!pHddCtx->mcc_mode) {
3452 /* Flush TxRx queue for each adapter before switch to SCC */
3453 status = hdd_get_front_adapter(pHddCtx, &adapter_node);
3454 while (NULL != adapter_node && CDF_STATUS_SUCCESS == status) {
3455 pAdapter = adapter_node->pAdapter;
3456 if (pAdapter->device_mode == WLAN_HDD_INFRA_STATION ||
3457 pAdapter->device_mode == WLAN_HDD_SOFTAP) {
3458 hddLog(CDF_TRACE_LEVEL_INFO,
3459 "MCC->SCC: Flush TxRx queue(d_mode=%d)",
3460 pAdapter->device_mode);
3461 hdd_deinit_tx_rx(pAdapter);
3462 }
3463 status = hdd_get_next_adapter(
3464 pHddCtx, adapter_node, &next);
3465 adapter_node = next;
3466 }
3467 }
3468
3469 /* Send SCC/MCC Switching event to IPA */
3470 meta.msg_len = sizeof(*msg);
3471 msg = cdf_mem_malloc(meta.msg_len);
3472 if (msg == NULL) {
3473 hddLog(LOGE, "msg allocation failed");
3474 return -ENOMEM;
3475 }
3476
3477 meta.msg_type = mcc_mode ?
3478 WLAN_SWITCH_TO_MCC : WLAN_SWITCH_TO_SCC;
3479 hddLog(LOG1, "ipa_send_msg(Evt:%d)", meta.msg_type);
3480
3481 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3482
3483 if (ret) {
3484 hddLog(LOGE, "ipa_send_msg(Evt:%d) - fail=%d",
3485 meta.msg_type, ret);
3486 cdf_mem_free(msg);
3487 }
3488
3489 return ret;
3490}
3491
3492/**
3493 * hdd_ipa_wlan_event_to_str() - convert IPA WLAN event to string
3494 * @event: IPA WLAN event to be converted to a string
3495 *
3496 * Return: ASCII string representing the IPA WLAN event
3497 */
3498static inline char *hdd_ipa_wlan_event_to_str(enum ipa_wlan_event event)
3499{
3500 switch (event) {
3501 case WLAN_CLIENT_CONNECT:
3502 return "WLAN_CLIENT_CONNECT";
3503 case WLAN_CLIENT_DISCONNECT:
3504 return "WLAN_CLIENT_DISCONNECT";
3505 case WLAN_CLIENT_POWER_SAVE_MODE:
3506 return "WLAN_CLIENT_POWER_SAVE_MODE";
3507 case WLAN_CLIENT_NORMAL_MODE:
3508 return "WLAN_CLIENT_NORMAL_MODE";
3509 case SW_ROUTING_ENABLE:
3510 return "SW_ROUTING_ENABLE";
3511 case SW_ROUTING_DISABLE:
3512 return "SW_ROUTING_DISABLE";
3513 case WLAN_AP_CONNECT:
3514 return "WLAN_AP_CONNECT";
3515 case WLAN_AP_DISCONNECT:
3516 return "WLAN_AP_DISCONNECT";
3517 case WLAN_STA_CONNECT:
3518 return "WLAN_STA_CONNECT";
3519 case WLAN_STA_DISCONNECT:
3520 return "WLAN_STA_DISCONNECT";
3521 case WLAN_CLIENT_CONNECT_EX:
3522 return "WLAN_CLIENT_CONNECT_EX";
3523
3524 case IPA_WLAN_EVENT_MAX:
3525 default:
3526 return "UNKNOWN";
3527 }
3528}
3529
3530/**
3531 * hdd_ipa_wlan_evt() - IPA event handler
3532 * @adapter: adapter upon which the event was received
3533 * @sta_id: station id for the event
3534 * @type: the event type
3535 * @mac_address: MAC address associated with the event
3536 *
3537 * Return: 0 on success, negative errno value on error
3538 */
3539int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
3540 enum ipa_wlan_event type, uint8_t *mac_addr)
3541{
3542 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3543 struct ipa_msg_meta meta;
3544 struct ipa_wlan_msg *msg;
3545 struct ipa_wlan_msg_ex *msg_ex = NULL;
3546 int ret;
3547
3548 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: %s evt, MAC: %pM sta_id: %d",
3549 adapter->dev->name, hdd_ipa_wlan_event_to_str(type),
3550 mac_addr, sta_id);
3551
3552 if (type >= IPA_WLAN_EVENT_MAX)
3553 return -EINVAL;
3554
3555 if (WARN_ON(is_zero_ether_addr(mac_addr)))
3556 return -EINVAL;
3557
3558 if (!hdd_ipa || !hdd_ipa_is_enabled(hdd_ipa->hdd_ctx)) {
3559 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "IPA OFFLOAD NOT ENABLED");
3560 return -EINVAL;
3561 }
3562
3563 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3564 !hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
3565 (WLAN_HDD_SOFTAP != adapter->device_mode)) {
3566 return 0;
3567 }
3568
3569 /*
3570 * During IPA UC resource loading/unloading new events can be issued.
3571 * Store the events separately and handle them later.
3572 */
3573 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3574 ((hdd_ipa->resource_loading) ||
3575 (hdd_ipa->resource_unloading))) {
Yun Parkf19e07d2015-11-20 11:34:27 -08003576 unsigned int pending_event_count;
3577 struct ipa_uc_pending_event *pending_event = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003578
Yun Parkf19e07d2015-11-20 11:34:27 -08003579 hdd_err("IPA resource %s inprogress",
3580 hdd_ipa->resource_loading ? "load":"unload");
3581
3582 cdf_mutex_acquire(&hdd_ipa->event_lock);
3583
3584 cdf_list_size(&hdd_ipa->pending_event, &pending_event_count);
3585 if (pending_event_count >= HDD_IPA_MAX_PENDING_EVENT_COUNT) {
3586 hdd_notice("Reached max pending event count");
3587 cdf_list_remove_front(&hdd_ipa->pending_event,
3588 (cdf_list_node_t **)&pending_event);
3589 } else {
3590 pending_event =
3591 (struct ipa_uc_pending_event *)cdf_mem_malloc(
3592 sizeof(struct ipa_uc_pending_event));
3593 }
3594
3595 if (!pending_event) {
3596 hdd_err("Pending event memory alloc fail");
3597 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003598 return -ENOMEM;
3599 }
Yun Parkf19e07d2015-11-20 11:34:27 -08003600
3601 pending_event->adapter = adapter;
3602 pending_event->sta_id = sta_id;
3603 pending_event->type = type;
3604 cdf_mem_copy(pending_event->mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003605 mac_addr,
3606 CDF_MAC_ADDR_SIZE);
3607 cdf_list_insert_back(&hdd_ipa->pending_event,
Yun Parkf19e07d2015-11-20 11:34:27 -08003608 &pending_event->node);
3609
3610 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003611 return 0;
3612 }
3613
3614 hdd_ipa->stats.event[type]++;
3615
Leo Chang3bc8fed2015-11-13 10:59:47 -08003616 meta.msg_type = type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003617 switch (type) {
3618 case WLAN_STA_CONNECT:
3619 /* STA already connected and without disconnect, connect again
3620 * This is Roaming scenario
3621 */
3622 if (hdd_ipa->sta_connected)
3623 hdd_ipa_cleanup_iface(adapter->ipa_context);
3624
3625 if ((hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) &&
3626 (!hdd_ipa->sta_connected))
3627 hdd_ipa_uc_offload_enable_disable(adapter,
3628 SIR_STA_RX_DATA_OFFLOAD, 1);
3629
3630 cdf_mutex_acquire(&hdd_ipa->event_lock);
3631
3632 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3633 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3634 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3635 msg_ex->name, meta.msg_type);
3636 } else if ((!hdd_ipa->sap_num_connected_sta) &&
3637 (!hdd_ipa->sta_connected)) {
3638 /* Enable IPA UC TX PIPE when STA connected */
3639 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
Yun Park4cab6ee2015-10-27 11:43:40 -07003640 if (ret) {
3641 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003642 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3643 "handle 1st con ret %d", ret);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003644 hdd_ipa_uc_offload_enable_disable(adapter,
3645 SIR_STA_RX_DATA_OFFLOAD, 0);
3646 goto end;
3647 }
3648 }
3649 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3650 if (ret) {
3651 cdf_mutex_release(&hdd_ipa->event_lock);
3652 hdd_ipa_uc_offload_enable_disable(adapter,
3653 SIR_STA_RX_DATA_OFFLOAD, 0);
3654 goto end;
3655
3656#ifdef IPA_UC_OFFLOAD
3657 vdev_to_iface[adapter->sessionId] =
3658 ((struct hdd_ipa_iface_context *)
3659 (adapter->ipa_context))->iface_id;
3660#endif /* IPA_UC_OFFLOAD */
3661 }
3662
3663 cdf_mutex_release(&hdd_ipa->event_lock);
3664
3665 hdd_ipa->sta_connected = 1;
3666 break;
3667
3668 case WLAN_AP_CONNECT:
3669 /* For DFS channel we get two start_bss event (before and after
3670 * CAC). Also when ACS range includes both DFS and non DFS
3671 * channels, we could possibly change channel many times due to
3672 * RADAR detection and chosen channel may not be a DFS channels.
3673 * So dont return error here. Just discard the event.
3674 */
3675 if (adapter->ipa_context)
3676 return 0;
3677
3678 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3679 hdd_ipa_uc_offload_enable_disable(adapter,
3680 SIR_AP_RX_DATA_OFFLOAD, 1);
3681 }
3682 cdf_mutex_acquire(&hdd_ipa->event_lock);
3683 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3684 if (ret) {
3685 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3686 "%s: Evt: %d, Interface setup failed",
3687 msg_ex->name, meta.msg_type);
3688 cdf_mutex_release(&hdd_ipa->event_lock);
3689 goto end;
3690
3691#ifdef IPA_UC_OFFLOAD
3692 vdev_to_iface[adapter->sessionId] =
3693 ((struct hdd_ipa_iface_context *)
3694 (adapter->ipa_context))->iface_id;
3695#endif /* IPA_UC_OFFLOAD */
3696 }
3697 cdf_mutex_release(&hdd_ipa->event_lock);
3698 break;
3699
3700 case WLAN_STA_DISCONNECT:
3701 cdf_mutex_acquire(&hdd_ipa->event_lock);
3702 hdd_ipa_cleanup_iface(adapter->ipa_context);
3703
3704 if (!hdd_ipa->sta_connected) {
3705 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3706 "%s: Evt: %d, STA already disconnected",
3707 msg_ex->name, meta.msg_type);
3708 cdf_mutex_release(&hdd_ipa->event_lock);
3709 return -EINVAL;
3710 }
3711 hdd_ipa->sta_connected = 0;
3712 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3713 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3714 "%s: IPA UC OFFLOAD NOT ENABLED",
3715 msg_ex->name);
3716 } else {
3717 /* Disable IPA UC TX PIPE when STA disconnected */
3718 if ((!hdd_ipa->sap_num_connected_sta) ||
3719 ((!hdd_ipa->num_iface) &&
3720 (HDD_IPA_UC_NUM_WDI_PIPE ==
3721 hdd_ipa->activated_fw_pipe))) {
3722 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3723 }
3724 }
3725
3726 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
3727 hdd_ipa_uc_offload_enable_disable(adapter,
3728 SIR_STA_RX_DATA_OFFLOAD, 0);
3729 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3730 }
3731
3732 cdf_mutex_release(&hdd_ipa->event_lock);
3733 break;
3734
3735 case WLAN_AP_DISCONNECT:
3736 if (!adapter->ipa_context) {
3737 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3738 "%s: Evt: %d, SAP already disconnected",
3739 msg_ex->name, meta.msg_type);
3740 return -EINVAL;
3741 }
3742
3743 cdf_mutex_acquire(&hdd_ipa->event_lock);
3744 hdd_ipa_cleanup_iface(adapter->ipa_context);
3745 if ((!hdd_ipa->num_iface) &&
3746 (HDD_IPA_UC_NUM_WDI_PIPE ==
3747 hdd_ipa->activated_fw_pipe)) {
3748 if (hdd_ipa->hdd_ctx->isUnloadInProgress) {
3749 /*
3750 * We disable WDI pipes directly here since
3751 * IPA_OPCODE_TX/RX_SUSPEND message will not be
3752 * processed when unloading WLAN driver is in
3753 * progress
3754 */
3755 hdd_ipa_uc_disable_pipes(hdd_ipa);
3756 } else {
3757 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3758 "NO INTF left but still pipe clean up");
3759 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3760 }
3761 }
3762
3763 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3764 hdd_ipa_uc_offload_enable_disable(adapter,
3765 SIR_AP_RX_DATA_OFFLOAD, 0);
3766 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3767 }
3768 cdf_mutex_release(&hdd_ipa->event_lock);
3769 break;
3770
3771 case WLAN_CLIENT_CONNECT_EX:
3772 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%d %d",
3773 adapter->dev->ifindex, sta_id);
3774
3775 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3776 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3777 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3778 adapter->dev->name, meta.msg_type);
3779 return 0;
3780 }
3781
3782 cdf_mutex_acquire(&hdd_ipa->event_lock);
3783 if (hdd_ipa_uc_find_add_assoc_sta(hdd_ipa,
3784 true, sta_id)) {
3785 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3786 "%s: STA ID %d found, not valid",
3787 adapter->dev->name, sta_id);
3788 cdf_mutex_release(&hdd_ipa->event_lock);
3789 return 0;
3790 }
Yun Park312f71a2015-12-08 10:22:42 -08003791
3792 /* Enable IPA UC Data PIPEs when first STA connected */
3793 if ((0 == hdd_ipa->sap_num_connected_sta) &&
3794 (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) ||
3795 !hdd_ipa->sta_connected)) {
3796 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
3797 if (ret) {
3798 cdf_mutex_release(&hdd_ipa->event_lock);
3799 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3800 "%s: handle 1st con ret %d",
3801 adapter->dev->name, ret);
3802 return ret;
3803 }
3804 }
3805
3806 hdd_ipa->sap_num_connected_sta++;
3807 hdd_ipa->pending_cons_req = false;
3808
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003809 cdf_mutex_release(&hdd_ipa->event_lock);
3810
3811 meta.msg_type = type;
3812 meta.msg_len = (sizeof(struct ipa_wlan_msg_ex) +
3813 sizeof(struct ipa_wlan_hdr_attrib_val));
3814 msg_ex = cdf_mem_malloc(meta.msg_len);
3815
3816 if (msg_ex == NULL) {
3817 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3818 "msg_ex allocation failed");
3819 return -ENOMEM;
3820 }
3821 strlcpy(msg_ex->name, adapter->dev->name,
3822 IPA_RESOURCE_NAME_MAX);
3823 msg_ex->num_of_attribs = 1;
3824 msg_ex->attribs[0].attrib_type = WLAN_HDR_ATTRIB_MAC_ADDR;
3825 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3826 msg_ex->attribs[0].offset =
3827 HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3828 } else {
3829 msg_ex->attribs[0].offset =
3830 HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3831 }
3832 memcpy(msg_ex->attribs[0].u.mac_addr, mac_addr,
3833 IPA_MAC_ADDR_SIZE);
3834
3835 ret = ipa_send_msg(&meta, msg_ex, hdd_ipa_msg_free_fn);
3836
3837 if (ret) {
3838 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d : %d",
3839 msg_ex->name, meta.msg_type, ret);
3840 cdf_mem_free(msg_ex);
3841 return ret;
3842 }
3843 hdd_ipa->stats.num_send_msg++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003844 return ret;
3845
3846 case WLAN_CLIENT_DISCONNECT:
3847 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3848 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3849 "%s: IPA UC OFFLOAD NOT ENABLED",
3850 msg_ex->name);
3851 return 0;
3852 }
3853
3854 cdf_mutex_acquire(&hdd_ipa->event_lock);
3855 if (!hdd_ipa_uc_find_add_assoc_sta(hdd_ipa, false, sta_id)) {
3856 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3857 "%s: STA ID %d NOT found, not valid",
3858 msg_ex->name, sta_id);
3859 cdf_mutex_release(&hdd_ipa->event_lock);
3860 return 0;
3861 }
3862 hdd_ipa->sap_num_connected_sta--;
3863 /* Disable IPA UC TX PIPE when last STA disconnected */
3864 if (!hdd_ipa->sap_num_connected_sta
3865 && (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) ||
3866 !hdd_ipa->sta_connected)
3867 && (false == hdd_ipa->resource_unloading)
3868 && (HDD_IPA_UC_NUM_WDI_PIPE ==
3869 hdd_ipa->activated_fw_pipe))
3870 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3871 cdf_mutex_release(&hdd_ipa->event_lock);
3872 break;
3873
3874 default:
3875 return 0;
3876 }
3877
3878 meta.msg_len = sizeof(struct ipa_wlan_msg);
3879 msg = cdf_mem_malloc(meta.msg_len);
3880 if (msg == NULL) {
3881 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "msg allocation failed");
3882 return -ENOMEM;
3883 }
3884
3885 meta.msg_type = type;
3886 strlcpy(msg->name, adapter->dev->name, IPA_RESOURCE_NAME_MAX);
3887 memcpy(msg->mac_addr, mac_addr, ETH_ALEN);
3888
3889 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d",
3890 msg->name, meta.msg_type);
3891
3892 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3893
3894 if (ret) {
3895 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d fail:%d",
3896 msg->name, meta.msg_type, ret);
3897 cdf_mem_free(msg);
3898 return ret;
3899 }
3900
3901 hdd_ipa->stats.num_send_msg++;
3902
3903end:
3904 return ret;
3905}
3906
3907/**
3908 * hdd_ipa_rm_state_to_str() - Convert IPA RM state to string
3909 * @state: IPA RM state value
3910 *
3911 * Return: ASCII string representing the IPA RM state
3912 */
3913static inline char *hdd_ipa_rm_state_to_str(enum hdd_ipa_rm_state state)
3914{
3915 switch (state) {
3916 case HDD_IPA_RM_RELEASED:
3917 return "RELEASED";
3918 case HDD_IPA_RM_GRANT_PENDING:
3919 return "GRANT_PENDING";
3920 case HDD_IPA_RM_GRANTED:
3921 return "GRANTED";
3922 }
3923
3924 return "UNKNOWN";
3925}
3926
3927/**
3928 * hdd_ipa_init() - IPA initialization function
3929 * @hdd_ctx: HDD global context
3930 *
3931 * Allocate hdd_ipa resources, ipa pipe resource and register
3932 * wlan interface with IPA module.
3933 *
3934 * Return: CDF_STATUS enumeration
3935 */
3936CDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx)
3937{
3938 struct hdd_ipa_priv *hdd_ipa = NULL;
3939 int ret, i;
3940 struct hdd_ipa_iface_context *iface_context = NULL;
3941
3942 if (!hdd_ipa_is_enabled(hdd_ctx))
3943 return CDF_STATUS_SUCCESS;
3944
3945 hdd_ipa = cdf_mem_malloc(sizeof(*hdd_ipa));
3946 if (!hdd_ipa) {
3947 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL, "hdd_ipa allocation failed");
Leo Chang3bc8fed2015-11-13 10:59:47 -08003948 goto fail_return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003949 }
3950
3951 hdd_ctx->hdd_ipa = hdd_ipa;
3952 ghdd_ipa = hdd_ipa;
3953 hdd_ipa->hdd_ctx = hdd_ctx;
3954 hdd_ipa->num_iface = 0;
Leo Chang3bc8fed2015-11-13 10:59:47 -08003955 ol_txrx_ipa_uc_get_resource(cds_get_context(CDF_MODULE_ID_TXRX),
3956 &hdd_ipa->ce_sr_base_paddr,
3957 &hdd_ipa->ce_sr_ring_size,
3958 &hdd_ipa->ce_reg_paddr,
3959 &hdd_ipa->tx_comp_ring_base_paddr,
3960 &hdd_ipa->tx_comp_ring_size,
3961 &hdd_ipa->tx_num_alloc_buffer,
3962 &hdd_ipa->rx_rdy_ring_base_paddr,
3963 &hdd_ipa->rx_rdy_ring_size,
3964 &hdd_ipa->rx_proc_done_idx_paddr,
3965 &hdd_ipa->rx_proc_done_idx_vaddr,
3966 &hdd_ipa->rx2_rdy_ring_base_paddr,
3967 &hdd_ipa->rx2_rdy_ring_size,
3968 &hdd_ipa->rx2_proc_done_idx_paddr,
3969 &hdd_ipa->rx2_proc_done_idx_vaddr);
3970 if ((0 == hdd_ipa->ce_sr_base_paddr) ||
3971 (0 == hdd_ipa->tx_comp_ring_base_paddr) ||
3972 (0 == hdd_ipa->rx_rdy_ring_base_paddr) ||
3973 (0 == hdd_ipa->rx2_rdy_ring_base_paddr)) {
3974 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
3975 "IPA UC resource alloc fail");
3976 goto fail_get_resource;
3977 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003978
3979 /* Create the interface context */
3980 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3981 iface_context = &hdd_ipa->iface_context[i];
3982 iface_context->hdd_ipa = hdd_ipa;
3983 iface_context->cons_client =
3984 hdd_ipa_adapter_2_client[i].cons_client;
3985 iface_context->prod_client =
3986 hdd_ipa_adapter_2_client[i].prod_client;
3987 iface_context->iface_id = i;
3988 iface_context->adapter = NULL;
3989 cdf_spinlock_init(&iface_context->interface_lock);
3990 }
3991
3992#ifdef CONFIG_CNSS
3993 cnss_init_work(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
3994#else
3995 INIT_WORK(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
3996#endif
3997 cdf_spinlock_init(&hdd_ipa->pm_lock);
3998 cdf_nbuf_queue_init(&hdd_ipa->pm_queue_head);
3999
4000 ret = hdd_ipa_setup_rm(hdd_ipa);
4001 if (ret)
4002 goto fail_setup_rm;
4003
4004 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
4005 hdd_ipa_uc_rt_debug_init(hdd_ctx);
4006 cdf_mem_zero(&hdd_ipa->stats, sizeof(hdd_ipa->stats));
4007 hdd_ipa->sap_num_connected_sta = 0;
4008 hdd_ipa->ipa_tx_packets_diff = 0;
4009 hdd_ipa->ipa_rx_packets_diff = 0;
4010 hdd_ipa->ipa_p_tx_packets = 0;
4011 hdd_ipa->ipa_p_rx_packets = 0;
4012 hdd_ipa->resource_loading = false;
4013 hdd_ipa->resource_unloading = false;
4014 hdd_ipa->sta_connected = 0;
Leo Change3e49442015-10-26 20:07:13 -07004015 hdd_ipa->ipa_pipes_down = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004016 /* Setup IPA sys_pipe for MCC */
4017 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
4018 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
4019 if (ret)
4020 goto fail_create_sys_pipe;
4021 }
4022 hdd_ipa_uc_ol_init(hdd_ctx);
4023 } else {
4024 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
4025 if (ret)
4026 goto fail_create_sys_pipe;
4027 }
4028
4029 return CDF_STATUS_SUCCESS;
4030
4031fail_create_sys_pipe:
4032 hdd_ipa_destroy_rm_resource(hdd_ipa);
4033fail_setup_rm:
Leo Chang3bc8fed2015-11-13 10:59:47 -08004034 cdf_spinlock_destroy(&hdd_ipa->pm_lock);
4035fail_get_resource:
4036 cdf_mem_free(hdd_ipa);
4037 hdd_ctx->hdd_ipa = NULL;
4038 ghdd_ipa = NULL;
4039fail_return:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004040 return CDF_STATUS_E_FAILURE;
4041}
4042
4043/**
Yun Parkf19e07d2015-11-20 11:34:27 -08004044 * hdd_ipa_cleanup_pending_event() - Cleanup IPA pending event list
4045 * @hdd_ipa: pointer to HDD IPA struct
4046 *
4047 * Return: none
4048 */
4049void hdd_ipa_cleanup_pending_event(struct hdd_ipa_priv *hdd_ipa)
4050{
4051 struct ipa_uc_pending_event *pending_event = NULL;
4052
4053 while (cdf_list_remove_front(&hdd_ipa->pending_event,
4054 (cdf_list_node_t **)&pending_event) == CDF_STATUS_SUCCESS) {
4055 cdf_mem_free(pending_event);
4056 }
4057
4058 cdf_list_destroy(&hdd_ipa->pending_event);
4059}
4060
4061/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004062 * hdd_ipa_cleanup - IPA cleanup function
4063 * @hdd_ctx: HDD global context
4064 *
4065 * Return: CDF_STATUS enumeration
4066 */
4067CDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx)
4068{
4069 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
4070 int i;
4071 struct hdd_ipa_iface_context *iface_context = NULL;
4072 cdf_nbuf_t skb;
4073 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
4074
4075 if (!hdd_ipa_is_enabled(hdd_ctx))
4076 return CDF_STATUS_SUCCESS;
4077
4078 if (!hdd_ipa_uc_is_enabled(hdd_ctx)) {
4079 unregister_inetaddr_notifier(&hdd_ipa->ipv4_notifier);
4080 hdd_ipa_teardown_sys_pipe(hdd_ipa);
4081 }
4082
4083 /* Teardown IPA sys_pipe for MCC */
4084 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx))
4085 hdd_ipa_teardown_sys_pipe(hdd_ipa);
4086
4087 hdd_ipa_destroy_rm_resource(hdd_ipa);
4088
4089#ifdef WLAN_OPEN_SOURCE
4090 cancel_work_sync(&hdd_ipa->pm_work);
4091#endif
4092
4093 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
4094
4095 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
4096 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
4097
4098 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
4099 ipa_free_skb(pm_tx_cb->ipa_tx_desc);
4100
4101 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
4102 }
4103 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
4104
4105 cdf_spinlock_destroy(&hdd_ipa->pm_lock);
4106
4107 /* destory the interface lock */
4108 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
4109 iface_context = &hdd_ipa->iface_context[i];
4110 cdf_spinlock_destroy(&iface_context->interface_lock);
4111 }
4112
4113 /* This should never hit but still make sure that there are no pending
4114 * descriptor in IPA hardware
4115 */
4116 if (hdd_ipa->pending_hw_desc_cnt != 0) {
4117 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
4118 "IPA Pending write done: %d Waiting!",
4119 hdd_ipa->pending_hw_desc_cnt);
4120
4121 for (i = 0; hdd_ipa->pending_hw_desc_cnt != 0 && i < 10; i++) {
4122 usleep_range(100, 100);
4123 }
4124
4125 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
4126 "IPA Pending write done: desc: %d %s(%d)!",
4127 hdd_ipa->pending_hw_desc_cnt,
4128 hdd_ipa->pending_hw_desc_cnt == 0 ? "completed"
4129 : "leak", i);
4130 }
4131 if (hdd_ipa_uc_is_enabled(hdd_ctx)) {
4132 hdd_ipa_uc_rt_debug_deinit(hdd_ctx);
4133 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
4134 "%s: Disconnect TX PIPE", __func__);
4135 ipa_disconnect_wdi_pipe(hdd_ipa->tx_pipe_handle);
4136 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
4137 "%s: Disconnect RX PIPE", __func__);
4138 ipa_disconnect_wdi_pipe(hdd_ipa->rx_pipe_handle);
4139 cdf_mutex_destroy(&hdd_ipa->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08004140 cdf_mutex_destroy(&hdd_ipa->ipa_lock);
Yun Parkf19e07d2015-11-20 11:34:27 -08004141 hdd_ipa_cleanup_pending_event(hdd_ipa);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004142
4143#ifdef WLAN_OPEN_SOURCE
4144 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
4145 cancel_work_sync(&hdd_ipa->uc_op_work[i].work);
4146 hdd_ipa->uc_op_work[i].msg = NULL;
4147 }
4148#endif
4149 }
4150
4151 cdf_mem_free(hdd_ipa);
4152 hdd_ctx->hdd_ipa = NULL;
4153
4154 return CDF_STATUS_SUCCESS;
4155}
4156#endif /* IPA_OFFLOAD */