blob: 2f4e006b0701dfeae0b68516c45fe4efb343f877 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wlan_hdd_ipa.c
30 *
31 * WLAN HDD and ipa interface implementation
32 * Originally written by Qualcomm Atheros, Inc
33 */
34
35#ifdef IPA_OFFLOAD
36
37/* Include Files */
38#include <wlan_hdd_includes.h>
39#include <wlan_hdd_ipa.h>
40
41#include <linux/etherdevice.h>
42#include <linux/atomic.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/debugfs.h>
47#include <linux/inetdevice.h>
48#include <linux/ip.h>
49#include <wlan_hdd_softap_tx_rx.h>
50#include <ol_txrx_osif_api.h>
51
52#include "cds_sched.h"
53
54#include "wma.h"
55#include "wma_api.h"
56
57#define HDD_IPA_DESC_BUFFER_RATIO 4
58#define HDD_IPA_IPV4_NAME_EXT "_ipv4"
59#define HDD_IPA_IPV6_NAME_EXT "_ipv6"
60
61#define HDD_IPA_RX_INACTIVITY_MSEC_DELAY 1000
62#define HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET 12
63#define HDD_IPA_UC_WLAN_8023_HDR_SIZE 14
64/* WDI TX and RX PIPE */
65#define HDD_IPA_UC_NUM_WDI_PIPE 2
66#define HDD_IPA_UC_MAX_PENDING_EVENT 33
67
68#define HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE 32000
69#define HDD_IPA_UC_RT_DEBUG_PERIOD 300
70#define HDD_IPA_UC_RT_DEBUG_BUF_COUNT 30
71#define HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL 10000
72
73#define HDD_IPA_WLAN_HDR_DES_MAC_OFFSET 0
74#define HDD_IPA_MAX_IFACE 3
75#define HDD_IPA_MAX_SYSBAM_PIPE 4
76#define HDD_IPA_RX_PIPE HDD_IPA_MAX_IFACE
77#define HDD_IPA_ENABLE_MASK BIT(0)
78#define HDD_IPA_PRE_FILTER_ENABLE_MASK BIT(1)
79#define HDD_IPA_IPV6_ENABLE_MASK BIT(2)
80#define HDD_IPA_RM_ENABLE_MASK BIT(3)
81#define HDD_IPA_CLK_SCALING_ENABLE_MASK BIT(4)
82#define HDD_IPA_UC_ENABLE_MASK BIT(5)
83#define HDD_IPA_UC_STA_ENABLE_MASK BIT(6)
84#define HDD_IPA_REAL_TIME_DEBUGGING BIT(8)
85
Yun Parkf19e07d2015-11-20 11:34:27 -080086#define HDD_IPA_MAX_PENDING_EVENT_COUNT 20
87
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088typedef enum {
89 HDD_IPA_UC_OPCODE_TX_SUSPEND = 0,
90 HDD_IPA_UC_OPCODE_TX_RESUME = 1,
91 HDD_IPA_UC_OPCODE_RX_SUSPEND = 2,
92 HDD_IPA_UC_OPCODE_RX_RESUME = 3,
93 HDD_IPA_UC_OPCODE_STATS = 4,
94 /* keep this last */
95 HDD_IPA_UC_OPCODE_MAX
96} hdd_ipa_uc_op_code;
97
98/**
99 * enum - Reason codes for stat query
100 *
101 * @HDD_IPA_UC_STAT_REASON_NONE: Initial value
102 * @HDD_IPA_UC_STAT_REASON_DEBUG: For debug/info
103 * @HDD_IPA_UC_STAT_REASON_BW_CAL: For bandwidth calibration
104 */
105enum {
106 HDD_IPA_UC_STAT_REASON_NONE,
107 HDD_IPA_UC_STAT_REASON_DEBUG,
108 HDD_IPA_UC_STAT_REASON_BW_CAL
109};
110
111/**
112 * enum hdd_ipa_rm_state - IPA resource manager state
113 * @HDD_IPA_RM_RELEASED: PROD pipe resource released
114 * @HDD_IPA_RM_GRANT_PENDING: PROD pipe resource requested but not granted yet
115 * @HDD_IPA_RM_GRANTED: PROD pipe resource granted
116 */
117enum hdd_ipa_rm_state {
118 HDD_IPA_RM_RELEASED,
119 HDD_IPA_RM_GRANT_PENDING,
120 HDD_IPA_RM_GRANTED,
121};
122
123struct llc_snap_hdr {
124 uint8_t dsap;
125 uint8_t ssap;
126 uint8_t resv[4];
127 __be16 eth_type;
128} __packed;
129
Leo Chang3bc8fed2015-11-13 10:59:47 -0800130/**
131 * struct hdd_ipa_tx_hdr - header type which IPA should handle to TX packet
132 * @eth: ether II header
133 * @llc_snap: LLC snap header
134 *
135 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136struct hdd_ipa_tx_hdr {
137 struct ethhdr eth;
138 struct llc_snap_hdr llc_snap;
139} __packed;
140
Leo Chang3bc8fed2015-11-13 10:59:47 -0800141/**
142 * struct frag_header - fragment header type registered to IPA hardware
143 * @length: fragment length
144 * @reserved1: Reserved not used
145 * @reserved2: Reserved not used
146 *
147 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800148struct frag_header {
Leo Chang3bc8fed2015-11-13 10:59:47 -0800149 uint16_t length;
150 uint32_t reserved1;
151 uint32_t reserved2;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800152} __packed;
153
Leo Chang3bc8fed2015-11-13 10:59:47 -0800154/**
155 * struct ipa_header - ipa header type registered to IPA hardware
156 * @vdev_id: vdev id
157 * @reserved: Reserved not used
158 *
159 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800160struct ipa_header {
161 uint32_t
162 vdev_id:8, /* vdev_id field is LSB of IPA DESC */
163 reserved:24;
164} __packed;
165
Leo Chang3bc8fed2015-11-13 10:59:47 -0800166/**
167 * struct hdd_ipa_uc_tx_hdr - full tx header registered to IPA hardware
168 * @frag_hd: fragment header
169 * @ipa_hd: ipa header
170 * @eth: ether II header
171 *
172 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173struct hdd_ipa_uc_tx_hdr {
174 struct frag_header frag_hd;
175 struct ipa_header ipa_hd;
176 struct ethhdr eth;
177} __packed;
178
179#define HDD_IPA_WLAN_FRAG_HEADER sizeof(struct frag_header)
180#define HDD_IPA_WLAN_IPA_HEADER sizeof(struct frag_header)
181
182/**
183 * struct hdd_ipa_cld_hdr - IPA CLD Header
184 * @reserved: reserved fields
185 * @iface_id: interface ID
186 * @sta_id: Station ID
187 *
188 * Packed 32-bit structure
189 * +----------+----------+--------------+--------+
190 * | Reserved | QCMAP ID | interface id | STA ID |
191 * +----------+----------+--------------+--------+
192 */
193struct hdd_ipa_cld_hdr {
194 uint8_t reserved[2];
195 uint8_t iface_id;
196 uint8_t sta_id;
197} __packed;
198
199struct hdd_ipa_rx_hdr {
200 struct hdd_ipa_cld_hdr cld_hdr;
201 struct ethhdr eth;
202} __packed;
203
204struct hdd_ipa_pm_tx_cb {
205 struct hdd_ipa_iface_context *iface_context;
206 struct ipa_rx_data *ipa_tx_desc;
207};
208
209struct hdd_ipa_uc_rx_hdr {
210 struct ethhdr eth;
211} __packed;
212
213struct hdd_ipa_sys_pipe {
214 uint32_t conn_hdl;
215 uint8_t conn_hdl_valid;
216 struct ipa_sys_connect_params ipa_sys_params;
217};
218
219struct hdd_ipa_iface_stats {
220 uint64_t num_tx;
221 uint64_t num_tx_drop;
222 uint64_t num_tx_err;
223 uint64_t num_tx_cac_drop;
224 uint64_t num_rx_prefilter;
225 uint64_t num_rx_ipa_excep;
226 uint64_t num_rx_recv;
227 uint64_t num_rx_recv_mul;
228 uint64_t num_rx_send_desc_err;
229 uint64_t max_rx_mul;
230};
231
232struct hdd_ipa_priv;
233
234struct hdd_ipa_iface_context {
235 struct hdd_ipa_priv *hdd_ipa;
236 hdd_adapter_t *adapter;
237 void *tl_context;
238
239 enum ipa_client_type cons_client;
240 enum ipa_client_type prod_client;
241
242 uint8_t iface_id; /* This iface ID */
243 uint8_t sta_id; /* This iface station ID */
244 cdf_spinlock_t interface_lock;
245 uint32_t ifa_address;
246 struct hdd_ipa_iface_stats stats;
247};
248
249struct hdd_ipa_stats {
250 uint32_t event[IPA_WLAN_EVENT_MAX];
251 uint64_t num_send_msg;
252 uint64_t num_free_msg;
253
254 uint64_t num_rm_grant;
255 uint64_t num_rm_release;
256 uint64_t num_rm_grant_imm;
257 uint64_t num_cons_perf_req;
258 uint64_t num_prod_perf_req;
259
260 uint64_t num_rx_drop;
261 uint64_t num_rx_ipa_tx_dp;
262 uint64_t num_rx_ipa_splice;
263 uint64_t num_rx_ipa_loop;
264 uint64_t num_rx_ipa_tx_dp_err;
265 uint64_t num_rx_ipa_write_done;
266 uint64_t num_max_ipa_tx_mul;
267 uint64_t num_rx_ipa_hw_maxed_out;
268 uint64_t max_pend_q_cnt;
269
270 uint64_t num_tx_comp_cnt;
271 uint64_t num_tx_queued;
272 uint64_t num_tx_dequeued;
273 uint64_t num_max_pm_queue;
274
275 uint64_t num_freeq_empty;
276 uint64_t num_pri_freeq_empty;
277 uint64_t num_rx_excep;
278 uint64_t num_tx_bcmc;
279 uint64_t num_tx_bcmc_err;
280};
281
282struct ipa_uc_stas_map {
283 bool is_reserved;
284 uint8_t sta_id;
285};
286struct op_msg_type {
287 uint8_t msg_t;
288 uint8_t rsvd;
289 uint16_t op_code;
290 uint16_t len;
291 uint16_t rsvd_snd;
292};
293
294struct ipa_uc_fw_stats {
295 uint32_t tx_comp_ring_base;
296 uint32_t tx_comp_ring_size;
297 uint32_t tx_comp_ring_dbell_addr;
298 uint32_t tx_comp_ring_dbell_ind_val;
299 uint32_t tx_comp_ring_dbell_cached_val;
300 uint32_t tx_pkts_enqueued;
301 uint32_t tx_pkts_completed;
302 uint32_t tx_is_suspend;
303 uint32_t tx_reserved;
304 uint32_t rx_ind_ring_base;
305 uint32_t rx_ind_ring_size;
306 uint32_t rx_ind_ring_dbell_addr;
307 uint32_t rx_ind_ring_dbell_ind_val;
308 uint32_t rx_ind_ring_dbell_ind_cached_val;
309 uint32_t rx_ind_ring_rdidx_addr;
310 uint32_t rx_ind_ring_rd_idx_cached_val;
311 uint32_t rx_refill_idx;
312 uint32_t rx_num_pkts_indicated;
313 uint32_t rx_buf_refilled;
314 uint32_t rx_num_ind_drop_no_space;
315 uint32_t rx_num_ind_drop_no_buf;
316 uint32_t rx_is_suspend;
317 uint32_t rx_reserved;
318};
319
320struct ipa_uc_pending_event {
321 cdf_list_node_t node;
322 hdd_adapter_t *adapter;
323 enum ipa_wlan_event type;
324 uint8_t sta_id;
325 uint8_t mac_addr[CDF_MAC_ADDR_SIZE];
326};
327
328/**
329 * struct uc_rm_work_struct
330 * @work: uC RM work
331 * @event: IPA RM event
332 */
333struct uc_rm_work_struct {
334 struct work_struct work;
335 enum ipa_rm_event event;
336};
337
338/**
339 * struct uc_op_work_struct
340 * @work: uC OP work
341 * @msg: OP message
342 */
343struct uc_op_work_struct {
344 struct work_struct work;
345 struct op_msg_type *msg;
346};
347static uint8_t vdev_to_iface[CSR_ROAM_SESSION_MAX];
348
349/**
350 * struct uc_rt_debug_info
351 * @time: system time
352 * @ipa_excep_count: IPA exception packet count
353 * @rx_drop_count: IPA Rx drop packet count
354 * @net_sent_count: IPA Rx packet sent to network stack count
355 * @rx_discard_count: IPA Rx discard packet count
356 * @rx_mcbc_count: IPA Rx BCMC packet count
357 * @tx_mcbc_count: IPA Tx BCMC packet countt
358 * @tx_fwd_count: IPA Tx forward packet count
359 * @rx_destructor_call: IPA Rx packet destructor count
360 */
361struct uc_rt_debug_info {
362 v_TIME_t time;
363 uint64_t ipa_excep_count;
364 uint64_t rx_drop_count;
365 uint64_t net_sent_count;
366 uint64_t rx_discard_count;
367 uint64_t rx_mcbc_count;
368 uint64_t tx_mcbc_count;
369 uint64_t tx_fwd_count;
370 uint64_t rx_destructor_call;
371};
372
373struct hdd_ipa_priv {
374 struct hdd_ipa_sys_pipe sys_pipe[HDD_IPA_MAX_SYSBAM_PIPE];
375 struct hdd_ipa_iface_context iface_context[HDD_IPA_MAX_IFACE];
376 uint8_t num_iface;
377 enum hdd_ipa_rm_state rm_state;
378 /*
379 * IPA driver can send RM notifications with IRQ disabled so using cdf
380 * APIs as it is taken care gracefully. Without this, kernel would throw
381 * an warning if spin_lock_bh is used while IRQ is disabled
382 */
383 cdf_spinlock_t rm_lock;
384 struct uc_rm_work_struct uc_rm_work;
385 struct uc_op_work_struct uc_op_work[HDD_IPA_UC_OPCODE_MAX];
386 cdf_wake_lock_t wake_lock;
387 struct delayed_work wake_lock_work;
388 bool wake_lock_released;
389
390 enum ipa_client_type prod_client;
391
392 atomic_t tx_ref_cnt;
393 cdf_nbuf_queue_t pm_queue_head;
394 struct work_struct pm_work;
395 cdf_spinlock_t pm_lock;
396 bool suspended;
397
398 uint32_t pending_hw_desc_cnt;
399 uint32_t hw_desc_cnt;
400 spinlock_t q_lock;
401 uint32_t freeq_cnt;
402 struct list_head free_desc_head;
403
404 uint32_t pend_q_cnt;
405 struct list_head pend_desc_head;
406
407 hdd_context_t *hdd_ctx;
408
409 struct dentry *debugfs_dir;
410 struct hdd_ipa_stats stats;
411
412 struct notifier_block ipv4_notifier;
413 uint32_t curr_prod_bw;
414 uint32_t curr_cons_bw;
415
416 uint8_t activated_fw_pipe;
417 uint8_t sap_num_connected_sta;
418 uint8_t sta_connected;
419 uint32_t tx_pipe_handle;
420 uint32_t rx_pipe_handle;
421 bool resource_loading;
422 bool resource_unloading;
423 bool pending_cons_req;
424 struct ipa_uc_stas_map assoc_stas_map[WLAN_MAX_STA_COUNT];
425 cdf_list_t pending_event;
426 cdf_mutex_t event_lock;
Leo Change3e49442015-10-26 20:07:13 -0700427 bool ipa_pipes_down;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800428 uint32_t ipa_tx_packets_diff;
429 uint32_t ipa_rx_packets_diff;
430 uint32_t ipa_p_tx_packets;
431 uint32_t ipa_p_rx_packets;
432 uint32_t stat_req_reason;
433 uint64_t ipa_tx_forward;
434 uint64_t ipa_rx_discard;
435 uint64_t ipa_rx_net_send_count;
436 uint64_t ipa_rx_internel_drop_count;
437 uint64_t ipa_rx_destructor_count;
438 cdf_mc_timer_t rt_debug_timer;
439 struct uc_rt_debug_info rt_bug_buffer[HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
440 unsigned int rt_buf_fill_index;
441 cdf_mc_timer_t rt_debug_fill_timer;
442 cdf_mutex_t rt_debug_lock;
Yun Parke59b3912015-11-09 13:19:06 -0800443 cdf_mutex_t ipa_lock;
Leo Chang3bc8fed2015-11-13 10:59:47 -0800444
445 /* CE resources */
446 cdf_dma_addr_t ce_sr_base_paddr;
447 uint32_t ce_sr_ring_size;
448 cdf_dma_addr_t ce_reg_paddr;
449
450 /* WLAN TX:IPA->WLAN */
451 cdf_dma_addr_t tx_comp_ring_base_paddr;
452 uint32_t tx_comp_ring_size;
453 uint32_t tx_num_alloc_buffer;
454
455 /* WLAN RX:WLAN->IPA */
456 cdf_dma_addr_t rx_rdy_ring_base_paddr;
457 uint32_t rx_rdy_ring_size;
458 cdf_dma_addr_t rx_proc_done_idx_paddr;
459 void *rx_proc_done_idx_vaddr;
460
461 /* WLAN RX2:WLAN->IPA */
462 cdf_dma_addr_t rx2_rdy_ring_base_paddr;
463 uint32_t rx2_rdy_ring_size;
464 cdf_dma_addr_t rx2_proc_done_idx_paddr;
465 void *rx2_proc_done_idx_vaddr;
466
467 /* IPA UC doorbell registers paddr */
468 cdf_dma_addr_t tx_comp_doorbell_paddr;
469 cdf_dma_addr_t rx_ready_doorbell_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470};
471
472#define HDD_IPA_WLAN_CLD_HDR_LEN sizeof(struct hdd_ipa_cld_hdr)
473#define HDD_IPA_UC_WLAN_CLD_HDR_LEN 0
474#define HDD_IPA_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_tx_hdr)
475#define HDD_IPA_UC_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_uc_tx_hdr)
476#define HDD_IPA_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_rx_hdr)
477#define HDD_IPA_UC_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_uc_rx_hdr)
478
Leo Chang3bc8fed2015-11-13 10:59:47 -0800479#define HDD_IPA_FW_RX_DESC_DISCARD_M 0x1
480#define HDD_IPA_FW_RX_DESC_FORWARD_M 0x2
481
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800482#define HDD_IPA_GET_IFACE_ID(_data) \
483 (((struct hdd_ipa_cld_hdr *) (_data))->iface_id)
484
485#define HDD_IPA_LOG(LVL, fmt, args ...) \
486 CDF_TRACE(CDF_MODULE_ID_HDD, LVL, \
487 "%s:%d: "fmt, __func__, __LINE__, ## args)
488
489#define HDD_IPA_DBG_DUMP(_lvl, _prefix, _buf, _len) \
490 do { \
491 CDF_TRACE(CDF_MODULE_ID_HDD, _lvl, "%s:", _prefix); \
492 CDF_TRACE_HEX_DUMP(CDF_MODULE_ID_HDD, _lvl, _buf, _len); \
493 } while (0)
494
495#define HDD_IPA_IS_CONFIG_ENABLED(_hdd_ctx, _mask) \
496 (((_hdd_ctx)->config->IpaConfig & (_mask)) == (_mask))
497
498#define HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa) \
499 do { \
500 hdd_ipa->ipa_rx_internel_drop_count++; \
501 } while (0)
502#define HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa) \
503 do { \
504 hdd_ipa->ipa_rx_net_send_count++; \
505 } while (0)
506#define HDD_BW_GET_DIFF(_x, _y) (unsigned long)((ULONG_MAX - (_y)) + (_x) + 1)
507
Leo Chang3bc8fed2015-11-13 10:59:47 -0800508/* Temporary macro to make a build without IPA V2 */
509#ifdef IPA_V2
510#define HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt) \
511do { \
512 pipe_in.u.ul.rdy_ring_rp_va = ipa_ctxt->rx_proc_done_idx_vaddr; \
513 pipe_in.u.ul.rdy_comp_ring_base_pa = ipa_ctxt->rx2_rdy_ring_base_paddr;\
514 pipe_in.u.ul.rdy_comp_ring_size = ipa_ctxt->rx2_rdy_ring_size; \
515 pipe_in.u.ul.rdy_comp_ring_wp_pa = ipa_ctxt->rx2_proc_done_idx_paddr; \
516 pipe_in.u.ul.rdy_comp_ring_wp_va = ipa_ctxt->rx2_proc_done_idx_vaddr; \
517} while (0)
518#else
519/* Do nothing */
520#define HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt)
521#endif /* IPA_V2 */
522
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800523static struct hdd_ipa_adapter_2_client {
524 enum ipa_client_type cons_client;
525 enum ipa_client_type prod_client;
526} hdd_ipa_adapter_2_client[HDD_IPA_MAX_IFACE] = {
527 {
528 IPA_CLIENT_WLAN2_CONS, IPA_CLIENT_WLAN1_PROD
529 }, {
530 IPA_CLIENT_WLAN3_CONS, IPA_CLIENT_WLAN1_PROD
531 }, {
532 IPA_CLIENT_WLAN4_CONS, IPA_CLIENT_WLAN1_PROD
533 },
534};
535
536/* For Tx pipes, use Ethernet-II Header format */
537struct hdd_ipa_uc_tx_hdr ipa_uc_tx_hdr = {
538 {
Leo Chang3bc8fed2015-11-13 10:59:47 -0800539 0x0000,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540 0x00000000,
541 0x00000000
542 },
543 {
544 0x00000000
545 },
546 {
547 {0x00, 0x03, 0x7f, 0xaa, 0xbb, 0xcc},
548 {0x00, 0x03, 0x7f, 0xdd, 0xee, 0xff},
549 0x0008
550 }
551};
552
553/* For Tx pipes, use 802.3 Header format */
554static struct hdd_ipa_tx_hdr ipa_tx_hdr = {
555 {
556 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
557 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
558 0x00 /* length can be zero */
559 },
560 {
561 /* LLC SNAP header 8 bytes */
562 0xaa, 0xaa,
563 {0x03, 0x00, 0x00, 0x00},
564 0x0008 /* type value(2 bytes) ,filled by wlan */
565 /* 0x0800 - IPV4, 0x86dd - IPV6 */
566 }
567};
568
569static const char *op_string[] = {
570 "TX_SUSPEND",
571 "TX_RESUME",
572 "RX_SUSPEND",
573 "RX_RESUME",
574 "STATS",
575};
576
577static struct hdd_ipa_priv *ghdd_ipa;
578
579/* Local Function Prototypes */
580static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
581 unsigned long data);
582static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
583 unsigned long data);
584
585static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context);
586
587/**
588 * hdd_ipa_is_enabled() - Is IPA enabled?
589 * @hdd_ctx: Global HDD context
590 *
591 * Return: true if IPA is enabled, false otherwise
592 */
593bool hdd_ipa_is_enabled(hdd_context_t *hdd_ctx)
594{
595 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_ENABLE_MASK);
596}
597
598/**
599 * hdd_ipa_uc_is_enabled() - Is IPA uC offload enabled?
600 * @hdd_ctx: Global HDD context
601 *
602 * Return: true if IPA uC offload is enabled, false otherwise
603 */
604bool hdd_ipa_uc_is_enabled(hdd_context_t *hdd_ctx)
605{
606 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_ENABLE_MASK);
607}
608
609/**
610 * hdd_ipa_uc_sta_is_enabled() - Is STA mode IPA uC offload enabled?
611 * @hdd_ctx: Global HDD context
612 *
613 * Return: true if STA mode IPA uC offload is enabled, false otherwise
614 */
615static inline bool hdd_ipa_uc_sta_is_enabled(hdd_context_t *hdd_ctx)
616{
617 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_STA_ENABLE_MASK);
618}
619
620/**
621 * hdd_ipa_is_pre_filter_enabled() - Is IPA pre-filter enabled?
622 * @hdd_ipa: Global HDD IPA context
623 *
624 * Return: true if pre-filter is enabled, otherwise false
625 */
626static inline bool hdd_ipa_is_pre_filter_enabled(hdd_context_t *hdd_ctx)
627{
628 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
629 HDD_IPA_PRE_FILTER_ENABLE_MASK);
630}
631
632/**
633 * hdd_ipa_is_ipv6_enabled() - Is IPA IPv6 enabled?
634 * @hdd_ipa: Global HDD IPA context
635 *
636 * Return: true if IPv6 is enabled, otherwise false
637 */
638static inline bool hdd_ipa_is_ipv6_enabled(hdd_context_t *hdd_ctx)
639{
640 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_IPV6_ENABLE_MASK);
641}
642
643/**
644 * hdd_ipa_is_rm_enabled() - Is IPA resource manager enabled?
645 * @hdd_ipa: Global HDD IPA context
646 *
647 * Return: true if resource manager is enabled, otherwise false
648 */
649static inline bool hdd_ipa_is_rm_enabled(hdd_context_t *hdd_ctx)
650{
651 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_RM_ENABLE_MASK);
652}
653
654/**
655 * hdd_ipa_is_rt_debugging_enabled() - Is IPA real-time debug enabled?
656 * @hdd_ipa: Global HDD IPA context
657 *
658 * Return: true if resource manager is enabled, otherwise false
659 */
660static inline bool hdd_ipa_is_rt_debugging_enabled(hdd_context_t *hdd_ctx)
661{
662 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_REAL_TIME_DEBUGGING);
663}
664
665/**
666 * hdd_ipa_is_clk_scaling_enabled() - Is IPA clock scaling enabled?
667 * @hdd_ipa: Global HDD IPA context
668 *
669 * Return: true if clock scaling is enabled, otherwise false
670 */
671static inline bool hdd_ipa_is_clk_scaling_enabled(hdd_context_t *hdd_ctx)
672{
673 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
674 HDD_IPA_CLK_SCALING_ENABLE_MASK |
675 HDD_IPA_RM_ENABLE_MASK);
676}
677
678/**
679 * hdd_ipa_uc_rt_debug_host_fill - fill rt debug buffer
680 * @ctext: pointer to hdd context.
681 *
682 * If rt debug enabled, periodically called, and fill debug buffer
683 *
684 * Return: none
685 */
686static void hdd_ipa_uc_rt_debug_host_fill(void *ctext)
687{
688 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
689 struct hdd_ipa_priv *hdd_ipa;
690 struct uc_rt_debug_info *dump_info = NULL;
691
692 if (wlan_hdd_validate_context(hdd_ctx))
693 return;
694
695 if (!hdd_ctx->hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
696 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
697 "%s: IPA UC is not enabled", __func__);
698 return;
699 }
700
701 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
702
703 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
704 dump_info = &hdd_ipa->rt_bug_buffer[
705 hdd_ipa->rt_buf_fill_index % HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
706
707 dump_info->time = cdf_mc_timer_get_system_time();
708 dump_info->ipa_excep_count = hdd_ipa->stats.num_rx_excep;
709 dump_info->rx_drop_count = hdd_ipa->ipa_rx_internel_drop_count;
710 dump_info->net_sent_count = hdd_ipa->ipa_rx_net_send_count;
711 dump_info->rx_discard_count = hdd_ipa->ipa_rx_discard;
712 dump_info->tx_mcbc_count = hdd_ipa->stats.num_tx_bcmc;
713 dump_info->tx_fwd_count = hdd_ipa->ipa_tx_forward;
714 dump_info->rx_destructor_call = hdd_ipa->ipa_rx_destructor_count;
715 hdd_ipa->rt_buf_fill_index++;
716 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
717
718 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
719 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
720}
721
722/**
723 * hdd_ipa_uc_rt_debug_host_dump - dump rt debug buffer
724 * @hdd_ctx: pointer to hdd context.
725 *
726 * If rt debug enabled, dump debug buffer contents based on requirement
727 *
728 * Return: none
729 */
730void hdd_ipa_uc_rt_debug_host_dump(hdd_context_t *hdd_ctx)
731{
732 struct hdd_ipa_priv *hdd_ipa;
733 unsigned int dump_count;
734 unsigned int dump_index;
735 struct uc_rt_debug_info *dump_info = NULL;
736
737 if (wlan_hdd_validate_context(hdd_ctx))
738 return;
739
740 hdd_ipa = hdd_ctx->hdd_ipa;
741 if (!hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
742 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
743 "%s: IPA UC is not enabled", __func__);
744 return;
745 }
746
747 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
748 "========= WLAN-IPA DEBUG BUF DUMP ==========\n");
749 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
750 " TM : EXEP : DROP : NETS : MCBC : TXFD : DSTR : DSCD\n");
751
752 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
753 for (dump_count = 0;
754 dump_count < HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
755 dump_count++) {
756 dump_index = (hdd_ipa->rt_buf_fill_index + dump_count) %
757 HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
758 dump_info = &hdd_ipa->rt_bug_buffer[dump_index];
759 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
760 "%12lu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu\n",
761 dump_info->time, dump_info->ipa_excep_count,
762 dump_info->rx_drop_count, dump_info->net_sent_count,
763 dump_info->tx_mcbc_count, dump_info->tx_fwd_count,
764 dump_info->rx_destructor_call,
765 dump_info->rx_discard_count);
766 }
767 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
768 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
769 "======= WLAN-IPA DEBUG BUF DUMP END ========\n");
770}
771
772/**
773 * hdd_ipa_uc_rt_debug_handler - periodic memory health monitor handler
774 * @ctext: pointer to hdd context.
775 *
776 * periodically called by timer expire
777 * will try to alloc dummy memory and detect out of memory condition
778 * if out of memory detected, dump wlan-ipa stats
779 *
780 * Return: none
781 */
782static void hdd_ipa_uc_rt_debug_handler(void *ctext)
783{
784 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
785 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
786 void *dummy_ptr = NULL;
787
788 if (wlan_hdd_validate_context(hdd_ctx))
789 return;
790
791 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
792 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
793 "%s: IPA RT debug is not enabled", __func__);
794 return;
795 }
796
797 /* Allocate dummy buffer periodically and free immediately. this will
798 * proactively detect OOM and if allocation fails dump ipa stats
799 */
800 dummy_ptr = kmalloc(HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE,
801 GFP_KERNEL | GFP_ATOMIC);
802 if (!dummy_ptr) {
803 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
804 "%s: Dummy alloc fail", __func__);
805 hdd_ipa_uc_rt_debug_host_dump(hdd_ctx);
806 hdd_ipa_uc_stat_request(
807 hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP), 1);
808 } else {
809 kfree(dummy_ptr);
810 }
811
812 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
813 HDD_IPA_UC_RT_DEBUG_PERIOD);
814}
815
816/**
817 * hdd_ipa_uc_rt_debug_destructor - called by data packet free
818 * @skb: packet pinter
819 *
820 * when free data packet, will be invoked by wlan client and will increase
821 * free counter
822 *
823 * Return: none
824 */
825void hdd_ipa_uc_rt_debug_destructor(struct sk_buff *skb)
826{
827 if (!ghdd_ipa) {
828 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
829 "%s: invalid hdd context", __func__);
830 return;
831 }
832
833 ghdd_ipa->ipa_rx_destructor_count++;
834}
835
836/**
837 * hdd_ipa_uc_rt_debug_deinit - remove resources to handle rt debugging
838 * @hdd_ctx: hdd main context
839 *
840 * free all rt debugging resources
841 *
842 * Return: none
843 */
844static void hdd_ipa_uc_rt_debug_deinit(hdd_context_t *hdd_ctx)
845{
846 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
847
848 if (CDF_TIMER_STATE_STOPPED !=
849 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_fill_timer)) {
850 cdf_mc_timer_stop(&hdd_ipa->rt_debug_fill_timer);
851 }
852 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_fill_timer);
853 cdf_mutex_destroy(&hdd_ipa->rt_debug_lock);
854
855 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
856 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
857 "%s: IPA RT debug is not enabled", __func__);
858 return;
859 }
860
861 if (CDF_TIMER_STATE_STOPPED !=
862 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_timer)) {
863 cdf_mc_timer_stop(&hdd_ipa->rt_debug_timer);
864 }
865 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_timer);
866}
867
868/**
869 * hdd_ipa_uc_rt_debug_init - intialize resources to handle rt debugging
870 * @hdd_ctx: hdd main context
871 *
872 * alloc and initialize all rt debugging resources
873 *
874 * Return: none
875 */
876static void hdd_ipa_uc_rt_debug_init(hdd_context_t *hdd_ctx)
877{
878 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
879
880 cdf_mutex_init(&hdd_ipa->rt_debug_lock);
881 cdf_mc_timer_init(&hdd_ipa->rt_debug_fill_timer, CDF_TIMER_TYPE_SW,
882 hdd_ipa_uc_rt_debug_host_fill, (void *)hdd_ctx);
883 hdd_ipa->rt_buf_fill_index = 0;
884 cdf_mem_zero(hdd_ipa->rt_bug_buffer,
885 sizeof(struct uc_rt_debug_info) *
886 HDD_IPA_UC_RT_DEBUG_BUF_COUNT);
887 hdd_ipa->ipa_tx_forward = 0;
888 hdd_ipa->ipa_rx_discard = 0;
889 hdd_ipa->ipa_rx_net_send_count = 0;
890 hdd_ipa->ipa_rx_internel_drop_count = 0;
891 hdd_ipa->ipa_rx_destructor_count = 0;
892
893 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
894 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
895
896 /* Reatime debug enable on feature enable */
897 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
898 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
899 "%s: IPA RT debug is not enabled", __func__);
900 return;
901 }
902 cdf_mc_timer_init(&hdd_ipa->rt_debug_timer, CDF_TIMER_TYPE_SW,
903 hdd_ipa_uc_rt_debug_handler, (void *)hdd_ctx);
904 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
905 HDD_IPA_UC_RT_DEBUG_PERIOD);
906
907}
908
909/**
910 * hdd_ipa_uc_stat_query() - Query the IPA stats
911 * @hdd_ctx: Global HDD context
912 * @ipa_tx_diff: tx packet count diff from previous
913 * tx packet count
914 * @ipa_rx_diff: rx packet count diff from previous
915 * rx packet count
916 *
917 * Return: true if IPA is enabled, false otherwise
918 */
919void hdd_ipa_uc_stat_query(hdd_context_t *pHddCtx,
920 uint32_t *ipa_tx_diff, uint32_t *ipa_rx_diff)
921{
922 struct hdd_ipa_priv *hdd_ipa;
923
924 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
925 *ipa_tx_diff = 0;
926 *ipa_rx_diff = 0;
927
928 if (!hdd_ipa_is_enabled(pHddCtx) ||
929 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
930 return;
931 }
932
Yun Parke59b3912015-11-09 13:19:06 -0800933 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800934 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
935 (false == hdd_ipa->resource_loading)) {
936 *ipa_tx_diff = hdd_ipa->ipa_tx_packets_diff;
937 *ipa_rx_diff = hdd_ipa->ipa_rx_packets_diff;
938 HDD_IPA_LOG(LOG1, "STAT Query TX DIFF %d, RX DIFF %d",
939 *ipa_tx_diff, *ipa_rx_diff);
940 }
Yun Parke59b3912015-11-09 13:19:06 -0800941 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800942 return;
943}
944
945/**
946 * hdd_ipa_uc_stat_request() - Get IPA stats from IPA.
947 * @adapter: network adapter
948 * @reason: STAT REQ Reason
949 *
950 * Return: None
951 */
952void hdd_ipa_uc_stat_request(hdd_adapter_t *adapter, uint8_t reason)
953{
954 hdd_context_t *pHddCtx;
955 struct hdd_ipa_priv *hdd_ipa;
956
957 if (!adapter) {
958 return;
959 }
960
961 pHddCtx = (hdd_context_t *)adapter->pHddCtx;
962 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
963 if (!hdd_ipa_is_enabled(pHddCtx) ||
964 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
965 return;
966 }
967
968 HDD_IPA_LOG(LOG1, "STAT REQ Reason %d", reason);
Yun Parke59b3912015-11-09 13:19:06 -0800969 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800970 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
971 (false == hdd_ipa->resource_loading)) {
972 hdd_ipa->stat_req_reason = reason;
973 wma_cli_set_command(
974 (int)adapter->sessionId,
975 (int)WMA_VDEV_TXRX_GET_IPA_UC_FW_STATS_CMDID,
976 0, VDEV_CMD);
977 }
Yun Parke59b3912015-11-09 13:19:06 -0800978 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800979}
980
981/**
982 * hdd_ipa_uc_find_add_assoc_sta() - Find associated station
983 * @hdd_ipa: Global HDD IPA context
984 * @sta_add: Should station be added
985 * @sta_id: ID of the station being queried
986 *
987 * Return: true if the station was found
988 */
989static bool hdd_ipa_uc_find_add_assoc_sta(struct hdd_ipa_priv *hdd_ipa,
990 bool sta_add, uint8_t sta_id)
991{
992 bool sta_found = false;
993 uint8_t idx;
994 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
995 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
996 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
997 sta_found = true;
998 break;
999 }
1000 }
1001 if (sta_add && sta_found) {
1002 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1003 "%s: STA ID %d already exist, cannot add",
1004 __func__, sta_id);
1005 return sta_found;
1006 }
1007 if (sta_add) {
1008 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1009 if (!hdd_ipa->assoc_stas_map[idx].is_reserved) {
1010 hdd_ipa->assoc_stas_map[idx].is_reserved = true;
1011 hdd_ipa->assoc_stas_map[idx].sta_id = sta_id;
1012 return sta_found;
1013 }
1014 }
1015 }
1016 if (!sta_add && !sta_found) {
1017 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1018 "%s: STA ID %d does not exist, cannot delete",
1019 __func__, sta_id);
1020 return sta_found;
1021 }
1022 if (!sta_add) {
1023 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1024 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
1025 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
1026 hdd_ipa->assoc_stas_map[idx].is_reserved =
1027 false;
1028 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
1029 return sta_found;
1030 }
1031 }
1032 }
1033 return sta_found;
1034}
1035
1036/**
1037 * hdd_ipa_uc_enable_pipes() - Enable IPA uC pipes
1038 * @hdd_ipa: Global HDD IPA context
1039 *
1040 * Return: 0 on success, negative errno if error
1041 */
1042static int hdd_ipa_uc_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
1043{
1044 int result;
1045 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
1046
1047 /* ACTIVATE TX PIPE */
Yun Park4cab6ee2015-10-27 11:43:40 -07001048 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1049 "%s: Enable TX PIPE(tx_pipe_handle=%d)",
1050 __func__, hdd_ipa->tx_pipe_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001051 result = ipa_enable_wdi_pipe(hdd_ipa->tx_pipe_handle);
1052 if (result) {
1053 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1054 "%s: Enable TX PIPE fail, code %d",
1055 __func__, result);
1056 return result;
1057 }
1058 result = ipa_resume_wdi_pipe(hdd_ipa->tx_pipe_handle);
1059 if (result) {
1060 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1061 "%s: Resume TX PIPE fail, code %d",
1062 __func__, result);
1063 return result;
1064 }
1065 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, true);
1066
1067 /* ACTIVATE RX PIPE */
Yun Park4cab6ee2015-10-27 11:43:40 -07001068 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1069 "%s: Enable RX PIPE(rx_pipe_handle=%d)",
1070 __func__, hdd_ipa->rx_pipe_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001071 result = ipa_enable_wdi_pipe(hdd_ipa->rx_pipe_handle);
1072 if (result) {
1073 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1074 "%s: Enable RX PIPE fail, code %d",
1075 __func__, result);
1076 return result;
1077 }
1078 result = ipa_resume_wdi_pipe(hdd_ipa->rx_pipe_handle);
1079 if (result) {
1080 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1081 "%s: Resume RX PIPE fail, code %d",
1082 __func__, result);
1083 return result;
1084 }
1085 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, false);
Leo Change3e49442015-10-26 20:07:13 -07001086 hdd_ipa->ipa_pipes_down = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001087 return 0;
1088}
1089
1090/**
1091 * hdd_ipa_uc_disable_pipes() - Disable IPA uC pipes
1092 * @hdd_ipa: Global HDD IPA context
1093 *
1094 * Return: 0 on success, negative errno if error
1095 */
1096static int hdd_ipa_uc_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
1097{
1098 int result;
1099
Leo Change3e49442015-10-26 20:07:13 -07001100 hdd_ipa->ipa_pipes_down = true;
1101
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001102 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable RX PIPE", __func__);
1103 result = ipa_suspend_wdi_pipe(hdd_ipa->rx_pipe_handle);
1104 if (result) {
1105 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1106 "%s: Suspend RX PIPE fail, code %d",
1107 __func__, result);
1108 return result;
1109 }
1110 result = ipa_disable_wdi_pipe(hdd_ipa->rx_pipe_handle);
1111 if (result) {
1112 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1113 "%s: Disable RX PIPE fail, code %d",
1114 __func__, result);
1115 return result;
1116 }
1117
1118 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable TX PIPE", __func__);
1119 result = ipa_suspend_wdi_pipe(hdd_ipa->tx_pipe_handle);
1120 if (result) {
1121 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1122 "%s: Suspend TX PIPE fail, code %d",
1123 __func__, result);
1124 return result;
1125 }
1126 result = ipa_disable_wdi_pipe(hdd_ipa->tx_pipe_handle);
1127 if (result) {
1128 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1129 "%s: Disable TX PIPE fail, code %d",
1130 __func__, result);
1131 return result;
1132 }
1133
1134 return 0;
1135}
1136
1137/**
1138 * hdd_ipa_uc_handle_first_con() - Handle first uC IPA connection
1139 * @hdd_ipa: Global HDD IPA context
1140 *
1141 * Return: 0 on success, negative errno if error
1142 */
1143static int hdd_ipa_uc_handle_first_con(struct hdd_ipa_priv *hdd_ipa)
1144{
1145 hdd_ipa->activated_fw_pipe = 0;
1146 hdd_ipa->resource_loading = true;
Yun Park4cab6ee2015-10-27 11:43:40 -07001147
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001148 /* If RM feature enabled
1149 * Request PROD Resource first
1150 * PROD resource may return sync or async manners */
Yun Park4cab6ee2015-10-27 11:43:40 -07001151 if (hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx)) {
1152 if (!ipa_rm_request_resource(IPA_RM_RESOURCE_WLAN_PROD)) {
1153 /* RM PROD request sync return
1154 * enable pipe immediately
1155 */
1156 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1157 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1158 "%s: IPA WDI Pipe activation failed",
1159 __func__);
1160 hdd_ipa->resource_loading = false;
1161 return -EBUSY;
1162 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001163 }
1164 } else {
1165 /* RM Disabled
Yun Park4cab6ee2015-10-27 11:43:40 -07001166 * Just enabled all the PIPEs
1167 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001168 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1169 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
Yun Park4cab6ee2015-10-27 11:43:40 -07001170 "%s: IPA WDI Pipe activation failed",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001171 __func__);
1172 hdd_ipa->resource_loading = false;
1173 return -EBUSY;
1174 }
1175 hdd_ipa->resource_loading = false;
1176 }
Yun Park4cab6ee2015-10-27 11:43:40 -07001177
1178 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1179 "%s: IPA WDI Pipes activated successfully", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001180 return 0;
1181}
1182
1183/**
1184 * hdd_ipa_uc_handle_last_discon() - Handle last uC IPA disconnection
1185 * @hdd_ipa: Global HDD IPA context
1186 *
1187 * Return: None
1188 */
1189static void hdd_ipa_uc_handle_last_discon(struct hdd_ipa_priv *hdd_ipa)
1190{
1191 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
1192
1193 hdd_ipa->resource_unloading = true;
1194 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW RX PIPE", __func__);
1195 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, false);
1196 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW TX PIPE", __func__);
1197 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, true);
1198}
1199
1200/**
1201 * hdd_ipa_uc_rm_notify_handler() - IPA uC resource notification handler
1202 * @context: User context registered with TL (the IPA Global context is
1203 * registered
1204 * @rxpkt: Packet containing the notification
1205 * @staid: ID of the station associated with the packet
1206 *
1207 * Return: None
1208 */
1209static void
1210hdd_ipa_uc_rm_notify_handler(void *context, enum ipa_rm_event event)
1211{
1212 struct hdd_ipa_priv *hdd_ipa = context;
1213 CDF_STATUS status = CDF_STATUS_SUCCESS;
1214
1215 /*
1216 * When SSR is going on or driver is unloading, just return.
1217 */
1218 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
1219 if (0 != status) {
1220 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1221 return;
1222 }
1223
1224 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
1225 return;
1226
1227 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s, event code %d",
1228 __func__, event);
1229
1230 switch (event) {
1231 case IPA_RM_RESOURCE_GRANTED:
1232 /* Differed RM Granted */
1233 hdd_ipa_uc_enable_pipes(hdd_ipa);
Yun Parke59b3912015-11-09 13:19:06 -08001234 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001235 if ((false == hdd_ipa->resource_unloading) &&
1236 (!hdd_ipa->activated_fw_pipe)) {
1237 hdd_ipa_uc_enable_pipes(hdd_ipa);
1238 }
Yun Parke59b3912015-11-09 13:19:06 -08001239 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001240 if (hdd_ipa->pending_cons_req) {
1241 ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
1242 IPA_RM_RESOURCE_WLAN_CONS);
1243 }
1244 hdd_ipa->pending_cons_req = false;
1245 break;
1246
1247 case IPA_RM_RESOURCE_RELEASED:
1248 /* Differed RM Released */
1249 hdd_ipa->resource_unloading = false;
1250 if (hdd_ipa->pending_cons_req) {
1251 ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED,
1252 IPA_RM_RESOURCE_WLAN_CONS);
1253 }
1254 hdd_ipa->pending_cons_req = false;
1255 break;
1256
1257 default:
1258 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1259 "%s, invalid event code %d", __func__, event);
1260 break;
1261 }
1262}
1263
1264/**
1265 * hdd_ipa_uc_rm_notify_defer() - Defer IPA uC notification
1266 * @hdd_ipa: Global HDD IPA context
1267 * @event: IPA resource manager event to be deferred
1268 *
1269 * This function is called when a resource manager event is received
1270 * from firmware in interrupt context. This function will defer the
1271 * handling to the OL RX thread
1272 *
1273 * Return: None
1274 */
1275static void hdd_ipa_uc_rm_notify_defer(struct work_struct *work)
1276{
1277 enum ipa_rm_event event;
1278 struct uc_rm_work_struct *uc_rm_work = container_of(work,
1279 struct uc_rm_work_struct, work);
1280 struct hdd_ipa_priv *hdd_ipa = container_of(uc_rm_work,
1281 struct hdd_ipa_priv, uc_rm_work);
1282
1283 cds_ssr_protect(__func__);
1284 event = uc_rm_work->event;
1285 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1286 "%s, posted event %d", __func__, event);
1287
1288 hdd_ipa_uc_rm_notify_handler(hdd_ipa, event);
1289 cds_ssr_unprotect(__func__);
1290
1291 return;
1292}
1293
1294/**
1295 * hdd_ipa_uc_proc_pending_event() - Process IPA uC pending events
1296 * @hdd_ipa: Global HDD IPA context
1297 *
1298 * Return: None
1299 */
1300static void hdd_ipa_uc_proc_pending_event(struct hdd_ipa_priv *hdd_ipa)
1301{
1302 unsigned int pending_event_count;
1303 struct ipa_uc_pending_event *pending_event = NULL;
1304
1305 cdf_list_size(&hdd_ipa->pending_event, &pending_event_count);
1306 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1307 "%s, Pending Event Count %d", __func__, pending_event_count);
1308 if (!pending_event_count) {
1309 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1310 "%s, No Pending Event", __func__);
1311 return;
1312 }
1313
1314 cdf_list_remove_front(&hdd_ipa->pending_event,
1315 (cdf_list_node_t **)&pending_event);
1316 while (pending_event != NULL) {
1317 hdd_ipa_wlan_evt(pending_event->adapter,
1318 pending_event->type,
1319 pending_event->sta_id,
1320 pending_event->mac_addr);
1321 cdf_mem_free(pending_event);
1322 pending_event = NULL;
1323 cdf_list_remove_front(&hdd_ipa->pending_event,
1324 (cdf_list_node_t **)&pending_event);
1325 }
1326}
1327
1328/**
1329 * hdd_ipa_uc_op_cb() - IPA uC operation callback
1330 * @op_msg: operation message received from firmware
1331 * @usr_ctxt: user context registered with TL (we register the HDD Global
1332 * context)
1333 *
1334 * Return: None
1335 */
1336static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
1337{
1338 struct op_msg_type *msg = op_msg;
1339 struct ipa_uc_fw_stats *uc_fw_stat;
1340 struct IpaHwStatsWDIInfoData_t ipa_stat;
1341 struct hdd_ipa_priv *hdd_ipa;
1342 hdd_context_t *hdd_ctx;
1343 CDF_STATUS status = CDF_STATUS_SUCCESS;
1344
1345 if (!op_msg || !usr_ctxt) {
1346 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s, INVALID ARG", __func__);
1347 return;
1348 }
1349
1350 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1351 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1352 "%s, INVALID OPCODE %d", __func__, msg->op_code);
1353 return;
1354 }
1355
1356 hdd_ctx = (hdd_context_t *) usr_ctxt;
1357
1358 /*
1359 * When SSR is going on or driver is unloading, just return.
1360 */
1361 status = wlan_hdd_validate_context(hdd_ctx);
1362 if (0 != status) {
1363 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1364 cdf_mem_free(op_msg);
1365 return;
1366 }
1367
1368 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1369
1370 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
1371 "%s, OPCODE %s", __func__, op_string[msg->op_code]);
1372
1373 if ((HDD_IPA_UC_OPCODE_TX_RESUME == msg->op_code) ||
1374 (HDD_IPA_UC_OPCODE_RX_RESUME == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001375 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001376 hdd_ipa->activated_fw_pipe++;
1377 if (HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) {
1378 hdd_ipa->resource_loading = false;
1379 hdd_ipa_uc_proc_pending_event(hdd_ipa);
Yun Parkccc6d7a2015-12-02 14:50:13 -08001380 if (hdd_ipa->pending_cons_req)
1381 ipa_rm_notify_completion(
1382 IPA_RM_RESOURCE_GRANTED,
1383 IPA_RM_RESOURCE_WLAN_CONS);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001384 }
Yun Parke59b3912015-11-09 13:19:06 -08001385 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001386 }
1387
1388 if ((HDD_IPA_UC_OPCODE_TX_SUSPEND == msg->op_code) ||
1389 (HDD_IPA_UC_OPCODE_RX_SUSPEND == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001390 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001391 hdd_ipa->activated_fw_pipe--;
1392 if (!hdd_ipa->activated_fw_pipe) {
1393 hdd_ipa_uc_disable_pipes(hdd_ipa);
1394 if ((hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx)) &&
1395 (!ipa_rm_release_resource(IPA_RM_RESOURCE_WLAN_PROD))) {
1396 /* Sync return success from IPA
1397 * Enable/resume all the PIPEs */
1398 hdd_ipa->resource_unloading = false;
1399 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1400 } else {
1401 hdd_ipa->resource_unloading = false;
1402 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1403 }
1404 }
Yun Parke59b3912015-11-09 13:19:06 -08001405 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001406 }
1407
1408 if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1409 (HDD_IPA_UC_STAT_REASON_DEBUG == hdd_ipa->stat_req_reason)) {
1410
1411 /* STATs from host */
1412 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1413 "==== IPA_UC WLAN_HOST CE ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001414 "CE RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001415 "CE RING SIZE: %d\n"
1416 "CE REG ADDR : 0x%llx",
Leo Chang3bc8fed2015-11-13 10:59:47 -08001417 hdd_ipa->ce_sr_base_paddr,
1418 hdd_ipa->ce_sr_ring_size,
1419 hdd_ipa->ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001420 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1421 "==== IPA_UC WLAN_HOST TX ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001422 "COMP RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001423 "COMP RING SIZE: %d\n"
1424 "NUM ALLOC BUF: %d\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001425 "COMP RING DBELL : 0x%llx",
1426 hdd_ipa->tx_comp_ring_base_paddr,
1427 hdd_ipa->tx_comp_ring_size,
1428 hdd_ipa->tx_num_alloc_buffer,
1429 hdd_ipa->tx_comp_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001430 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1431 "==== IPA_UC WLAN_HOST RX ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001432 "IND RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001433 "IND RING SIZE: %d\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001434 "IND RING DBELL : 0x%llx\n"
1435 "PROC DONE IND ADDR : 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001436 "NUM EXCP PKT : %llu\n"
1437 "NUM TX BCMC : %llu\n"
1438 "NUM TX BCMC ERR : %llu",
Leo Chang3bc8fed2015-11-13 10:59:47 -08001439 hdd_ipa->rx_rdy_ring_base_paddr,
1440 hdd_ipa->rx_rdy_ring_size,
1441 hdd_ipa->rx_ready_doorbell_paddr,
1442 hdd_ipa->rx_proc_done_idx_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001443 hdd_ipa->stats.num_rx_excep,
1444 hdd_ipa->stats.num_tx_bcmc,
1445 hdd_ipa->stats.num_tx_bcmc_err);
1446 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1447 "==== IPA_UC WLAN_HOST CONTROL ====\n"
1448 "SAP NUM STAs: %d\n"
1449 "STA CONNECTED: %d\n"
1450 "TX PIPE HDL: %d\n"
1451 "RX PIPE HDL : %d\n"
1452 "RSC LOADING : %d\n"
1453 "RSC UNLOADING : %d\n"
1454 "PNDNG CNS RQT : %d",
1455 hdd_ipa->sap_num_connected_sta,
1456 hdd_ipa->sta_connected,
1457 hdd_ipa->tx_pipe_handle,
1458 hdd_ipa->rx_pipe_handle,
1459 (unsigned int)hdd_ipa->resource_loading,
1460 (unsigned int)hdd_ipa->resource_unloading,
1461 (unsigned int)hdd_ipa->pending_cons_req);
1462
1463 /* STATs from FW */
1464 uc_fw_stat = (struct ipa_uc_fw_stats *)
1465 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
1466 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1467 "==== IPA_UC WLAN_FW TX ====\n"
1468 "COMP RING BASE: 0x%x\n"
1469 "COMP RING SIZE: %d\n"
1470 "COMP RING DBELL : 0x%x\n"
1471 "COMP RING DBELL IND VAL : %d\n"
1472 "COMP RING DBELL CACHED VAL : %d\n"
1473 "COMP RING DBELL CACHED VAL : %d\n"
1474 "PKTS ENQ : %d\n"
1475 "PKTS COMP : %d\n"
1476 "IS SUSPEND : %d\n"
1477 "RSVD : 0x%x",
1478 uc_fw_stat->tx_comp_ring_base,
1479 uc_fw_stat->tx_comp_ring_size,
1480 uc_fw_stat->tx_comp_ring_dbell_addr,
1481 uc_fw_stat->tx_comp_ring_dbell_ind_val,
1482 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1483 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1484 uc_fw_stat->tx_pkts_enqueued,
1485 uc_fw_stat->tx_pkts_completed,
1486 uc_fw_stat->tx_is_suspend, uc_fw_stat->tx_reserved);
1487 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1488 "==== IPA_UC WLAN_FW RX ====\n"
1489 "IND RING BASE: 0x%x\n"
1490 "IND RING SIZE: %d\n"
1491 "IND RING DBELL : 0x%x\n"
1492 "IND RING DBELL IND VAL : %d\n"
1493 "IND RING DBELL CACHED VAL : %d\n"
1494 "RDY IND ADDR : 0x%x\n"
1495 "RDY IND CACHE VAL : %d\n"
1496 "RFIL IND : %d\n"
1497 "NUM PKT INDICAT : %d\n"
1498 "BUF REFIL : %d\n"
1499 "NUM DROP NO SPC : %d\n"
1500 "NUM DROP NO BUF : %d\n"
1501 "IS SUSPND : %d\n"
1502 "RSVD : 0x%x\n",
1503 uc_fw_stat->rx_ind_ring_base,
1504 uc_fw_stat->rx_ind_ring_size,
1505 uc_fw_stat->rx_ind_ring_dbell_addr,
1506 uc_fw_stat->rx_ind_ring_dbell_ind_val,
1507 uc_fw_stat->rx_ind_ring_dbell_ind_cached_val,
1508 uc_fw_stat->rx_ind_ring_rdidx_addr,
1509 uc_fw_stat->rx_ind_ring_rd_idx_cached_val,
1510 uc_fw_stat->rx_refill_idx,
1511 uc_fw_stat->rx_num_pkts_indicated,
1512 uc_fw_stat->rx_buf_refilled,
1513 uc_fw_stat->rx_num_ind_drop_no_space,
1514 uc_fw_stat->rx_num_ind_drop_no_buf,
1515 uc_fw_stat->rx_is_suspend, uc_fw_stat->rx_reserved);
1516 /* STATs from IPA */
1517 ipa_get_wdi_stats(&ipa_stat);
1518 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1519 "==== IPA_UC IPA TX ====\n"
1520 "NUM PROCD : %d\n"
1521 "CE DBELL : 0x%x\n"
1522 "NUM DBELL FIRED : %d\n"
1523 "COMP RNG FULL : %d\n"
1524 "COMP RNG EMPT : %d\n"
1525 "COMP RNG USE HGH : %d\n"
1526 "COMP RNG USE LOW : %d\n"
1527 "BAM FIFO FULL : %d\n"
1528 "BAM FIFO EMPT : %d\n"
1529 "BAM FIFO USE HGH : %d\n"
1530 "BAM FIFO USE LOW : %d\n"
1531 "NUM DBELL : %d\n"
1532 "NUM UNEXP DBELL : %d\n"
1533 "NUM BAM INT HDL : 0x%x\n"
1534 "NUM BAM INT NON-RUN : 0x%x\n"
1535 "NUM QMB INT HDL : 0x%x",
1536 ipa_stat.tx_ch_stats.num_pkts_processed,
1537 ipa_stat.tx_ch_stats.copy_engine_doorbell_value,
1538 ipa_stat.tx_ch_stats.num_db_fired,
1539 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringFull,
1540 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
1541 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
1542 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
1543 ipa_stat.tx_ch_stats.bam_stats.bamFifoFull,
1544 ipa_stat.tx_ch_stats.bam_stats.bamFifoEmpty,
1545 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageHigh,
1546 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageLow,
1547 ipa_stat.tx_ch_stats.num_db,
1548 ipa_stat.tx_ch_stats.num_unexpected_db,
1549 ipa_stat.tx_ch_stats.num_bam_int_handled,
1550 ipa_stat.tx_ch_stats.
1551 num_bam_int_in_non_runnning_state,
1552 ipa_stat.tx_ch_stats.num_qmb_int_handled);
1553
1554 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1555 "==== IPA_UC IPA RX ====\n"
1556 "MAX OST PKT : %d\n"
1557 "NUM PKT PRCSD : %d\n"
1558 "RNG RP : 0x%x\n"
1559 "COMP RNG FULL : %d\n"
1560 "COMP RNG EMPT : %d\n"
1561 "COMP RNG USE HGH : %d\n"
1562 "COMP RNG USE LOW : %d\n"
1563 "BAM FIFO FULL : %d\n"
1564 "BAM FIFO EMPT : %d\n"
1565 "BAM FIFO USE HGH : %d\n"
1566 "BAM FIFO USE LOW : %d\n"
1567 "NUM DB : %d\n"
1568 "NUM UNEXP DB : %d\n"
1569 "NUM BAM INT HNDL : 0x%x\n",
1570 ipa_stat.rx_ch_stats.max_outstanding_pkts,
1571 ipa_stat.rx_ch_stats.num_pkts_processed,
1572 ipa_stat.rx_ch_stats.rx_ring_rp_value,
1573 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringFull,
1574 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
1575 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
1576 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
1577 ipa_stat.rx_ch_stats.bam_stats.bamFifoFull,
1578 ipa_stat.rx_ch_stats.bam_stats.bamFifoEmpty,
1579 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageHigh,
1580 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageLow,
1581 ipa_stat.rx_ch_stats.num_db,
1582 ipa_stat.rx_ch_stats.num_unexpected_db,
1583 ipa_stat.rx_ch_stats.num_bam_int_handled);
1584 } else if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1585 (HDD_IPA_UC_STAT_REASON_BW_CAL == hdd_ipa->stat_req_reason)) {
1586 /* STATs from FW */
1587 uc_fw_stat = (struct ipa_uc_fw_stats *)
1588 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
Yun Parke59b3912015-11-09 13:19:06 -08001589 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001590 hdd_ipa->ipa_tx_packets_diff = HDD_BW_GET_DIFF(
1591 uc_fw_stat->tx_pkts_completed,
1592 hdd_ipa->ipa_p_tx_packets);
1593 hdd_ipa->ipa_rx_packets_diff = HDD_BW_GET_DIFF(
1594 (uc_fw_stat->rx_num_ind_drop_no_space +
1595 uc_fw_stat->rx_num_ind_drop_no_buf +
1596 uc_fw_stat->rx_num_pkts_indicated),
1597 hdd_ipa->ipa_p_rx_packets);
1598
1599 hdd_ipa->ipa_p_tx_packets = uc_fw_stat->tx_pkts_completed;
1600 hdd_ipa->ipa_p_rx_packets =
1601 (uc_fw_stat->rx_num_ind_drop_no_space +
1602 uc_fw_stat->rx_num_ind_drop_no_buf +
1603 uc_fw_stat->rx_num_pkts_indicated);
Yun Parke59b3912015-11-09 13:19:06 -08001604 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001605 } else {
1606 HDD_IPA_LOG(LOGE, "INVALID REASON %d",
1607 hdd_ipa->stat_req_reason);
1608 }
1609 cdf_mem_free(op_msg);
1610}
1611
1612
1613/**
1614 * hdd_ipa_uc_offload_enable_disable() - wdi enable/disable notify to fw
1615 * @adapter: device adapter instance
1616 * @offload_type: MCC or SCC
1617 * @enable: TX offload enable or disable
1618 *
1619 * Return: none
1620 */
1621static void hdd_ipa_uc_offload_enable_disable(hdd_adapter_t *adapter,
1622 uint32_t offload_type, uint32_t enable)
1623{
1624 struct sir_ipa_offload_enable_disable ipa_offload_enable_disable;
1625
1626 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1627 * channel change indication. Since these indications are sent by lower
1628 * layer as SAP updates and IPA doesn't have to do anything for these
1629 * updates so ignoring!
1630 */
1631 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
1632 return;
1633
1634 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1635 * channel change indication. Since these indications are sent by lower
1636 * layer as SAP updates and IPA doesn't have to do anything for these
1637 * updates so ignoring!
1638 */
1639 if (adapter->ipa_context)
1640 return;
1641
1642 cdf_mem_zero(&ipa_offload_enable_disable,
1643 sizeof(ipa_offload_enable_disable));
1644 ipa_offload_enable_disable.offload_type = offload_type;
1645 ipa_offload_enable_disable.vdev_id = adapter->sessionId;
1646 ipa_offload_enable_disable.enable = enable;
1647
1648 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1649 "%s: offload_type=%d, vdev_id=%d, enable=%d", __func__,
1650 ipa_offload_enable_disable.offload_type,
1651 ipa_offload_enable_disable.vdev_id,
1652 ipa_offload_enable_disable.enable);
1653
1654 if (CDF_STATUS_SUCCESS !=
1655 sme_ipa_offload_enable_disable(WLAN_HDD_GET_HAL_CTX(adapter),
1656 adapter->sessionId, &ipa_offload_enable_disable)) {
1657 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1658 "%s: Failure to enable IPA offload \
1659 (offload_type=%d, vdev_id=%d, enable=%d)", __func__,
1660 ipa_offload_enable_disable.offload_type,
1661 ipa_offload_enable_disable.vdev_id,
1662 ipa_offload_enable_disable.enable);
1663 }
1664}
1665
1666/**
1667 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1668 * @work: uC OP work
1669 *
1670 * Return: None
1671 */
1672static void hdd_ipa_uc_fw_op_event_handler(struct work_struct *work)
1673{
1674 struct op_msg_type *msg;
1675 struct uc_op_work_struct *uc_op_work = container_of(work,
1676 struct uc_op_work_struct, work);
1677 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1678
1679 cds_ssr_protect(__func__);
1680
1681 msg = uc_op_work->msg;
1682 uc_op_work->msg = NULL;
1683 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1684 "%s, posted msg %d", __func__, msg->op_code);
1685
1686 hdd_ipa_uc_op_cb(msg, hdd_ipa->hdd_ctx);
1687
1688 cds_ssr_unprotect(__func__);
1689
1690 return;
1691}
1692
1693/**
1694 * hdd_ipa_uc_op_event_handler() - Adapter lookup
1695 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1696 * @op_msg: operation message received from firmware
1697 * @hdd_ctx: Global HDD context
1698 *
1699 * Return: None
1700 */
1701static void hdd_ipa_uc_op_event_handler(uint8_t *op_msg, void *hdd_ctx)
1702{
1703 struct hdd_ipa_priv *hdd_ipa;
1704 struct op_msg_type *msg;
1705 struct uc_op_work_struct *uc_op_work;
1706 CDF_STATUS status = CDF_STATUS_SUCCESS;
1707
1708 status = wlan_hdd_validate_context(hdd_ctx);
1709 if (0 != status) {
1710 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1711 goto end;
1712 }
1713
1714 msg = (struct op_msg_type *)op_msg;
1715 hdd_ipa = ((hdd_context_t *)hdd_ctx)->hdd_ipa;
1716
1717 if (unlikely(!hdd_ipa))
1718 goto end;
1719
1720 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1721 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s: Invalid OP Code (%d)",
1722 __func__, msg->op_code);
1723 goto end;
1724 }
1725
1726 uc_op_work = &hdd_ipa->uc_op_work[msg->op_code];
1727 if (uc_op_work->msg)
1728 /* When the same uC OPCODE is already pended, just return */
1729 goto end;
1730
1731 uc_op_work->msg = msg;
1732 schedule_work(&uc_op_work->work);
1733 return;
1734
1735end:
1736 cdf_mem_free(op_msg);
1737}
1738
1739/**
1740 * hdd_ipa_uc_ol_init() - Initialize IPA uC offload
1741 * @hdd_ctx: Global HDD context
1742 *
1743 * Return: CDF_STATUS
1744 */
1745static CDF_STATUS hdd_ipa_uc_ol_init(hdd_context_t *hdd_ctx)
1746{
1747 struct ipa_wdi_in_params pipe_in;
1748 struct ipa_wdi_out_params pipe_out;
1749 struct hdd_ipa_priv *ipa_ctxt = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1750 p_cds_contextType cds_ctx = hdd_ctx->pcds_context;
1751 uint8_t i;
1752
1753 cdf_mem_zero(&pipe_in, sizeof(struct ipa_wdi_in_params));
1754 cdf_mem_zero(&pipe_out, sizeof(struct ipa_wdi_out_params));
1755
1756 cdf_list_init(&ipa_ctxt->pending_event, 1000);
1757 cdf_mutex_init(&ipa_ctxt->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08001758 cdf_mutex_init(&ipa_ctxt->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001759
1760 /* TX PIPE */
1761 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1762 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
1763 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1764 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
1765 pipe_in.sys.ipa_ep_cfg.hdr.hdr_additional_const_len =
1766 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
1767 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1768 pipe_in.sys.client = IPA_CLIENT_WLAN1_CONS;
1769 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize;
1770 pipe_in.sys.priv = hdd_ctx->hdd_ipa;
1771 pipe_in.sys.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
1772 pipe_in.sys.notify = hdd_ipa_i2w_cb;
1773 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1774 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1775 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1776 pipe_in.sys.keep_ipa_awake = true;
1777 }
1778
Leo Chang3bc8fed2015-11-13 10:59:47 -08001779 pipe_in.u.dl.comp_ring_base_pa = ipa_ctxt->tx_comp_ring_base_paddr;
1780 pipe_in.u.dl.comp_ring_size =
1781 ipa_ctxt->tx_comp_ring_size * sizeof(cdf_dma_addr_t);
1782 pipe_in.u.dl.ce_ring_base_pa = ipa_ctxt->ce_sr_base_paddr;
1783 pipe_in.u.dl.ce_door_bell_pa = ipa_ctxt->ce_reg_paddr;
1784 pipe_in.u.dl.ce_ring_size = ipa_ctxt->ce_sr_ring_size;
1785 pipe_in.u.dl.num_tx_buffers = ipa_ctxt->tx_num_alloc_buffer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001786
1787 /* Connect WDI IPA PIPE */
1788 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
1789 /* Micro Controller Doorbell register */
Leo Chang3bc8fed2015-11-13 10:59:47 -08001790 ipa_ctxt->tx_comp_doorbell_paddr = pipe_out.uc_door_bell_pa;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001791 /* WLAN TX PIPE Handle */
1792 ipa_ctxt->tx_pipe_handle = pipe_out.clnt_hdl;
1793 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1794 "TX : CRBPA 0x%x, CRS %d, CERBPA 0x%x, CEDPA 0x%x,"
1795 " CERZ %d, NB %d, CDBPAD 0x%x",
1796 (unsigned int)pipe_in.u.dl.comp_ring_base_pa,
1797 pipe_in.u.dl.comp_ring_size,
1798 (unsigned int)pipe_in.u.dl.ce_ring_base_pa,
1799 (unsigned int)pipe_in.u.dl.ce_door_bell_pa,
1800 pipe_in.u.dl.ce_ring_size,
1801 pipe_in.u.dl.num_tx_buffers,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001802 (unsigned int)ipa_ctxt->tx_comp_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001803
1804 /* RX PIPE */
1805 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1806 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_RX_HDR_LEN;
1807 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
1808 pipe_in.sys.ipa_ep_cfg.hdr.hdr_metadata_reg_valid = 1;
1809 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1810 pipe_in.sys.client = IPA_CLIENT_WLAN1_PROD;
1811 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize +
1812 sizeof(struct sps_iovec);
1813 pipe_in.sys.notify = hdd_ipa_w2i_cb;
1814 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1815 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1816 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1817 pipe_in.sys.keep_ipa_awake = true;
1818 }
1819
Leo Chang3bc8fed2015-11-13 10:59:47 -08001820 pipe_in.u.ul.rdy_ring_base_pa = ipa_ctxt->rx_rdy_ring_base_paddr;
1821 pipe_in.u.ul.rdy_ring_size = ipa_ctxt->rx_rdy_ring_size;
1822 pipe_in.u.ul.rdy_ring_rp_pa = ipa_ctxt->rx_proc_done_idx_paddr;
1823 HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001824 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
Leo Chang3bc8fed2015-11-13 10:59:47 -08001825 ipa_ctxt->rx_ready_doorbell_paddr = pipe_out.uc_door_bell_pa;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001826 ipa_ctxt->rx_pipe_handle = pipe_out.clnt_hdl;
1827 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1828 "RX : RRBPA 0x%x, RRS %d, PDIPA 0x%x, RDY_DB_PAD 0x%x",
1829 (unsigned int)pipe_in.u.ul.rdy_ring_base_pa,
1830 pipe_in.u.ul.rdy_ring_size,
1831 (unsigned int)pipe_in.u.ul.rdy_ring_rp_pa,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001832 (unsigned int)ipa_ctxt->rx_ready_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001833
1834 ol_txrx_ipa_uc_set_doorbell_paddr(cds_ctx->pdev_txrx_ctx,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001835 ipa_ctxt->tx_comp_doorbell_paddr,
1836 ipa_ctxt->rx_ready_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001837
1838 ol_txrx_ipa_uc_register_op_cb(cds_ctx->pdev_txrx_ctx,
1839 hdd_ipa_uc_op_event_handler, (void *)hdd_ctx);
1840
1841 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
1842 cnss_init_work(&ipa_ctxt->uc_op_work[i].work,
1843 hdd_ipa_uc_fw_op_event_handler);
1844 ipa_ctxt->uc_op_work[i].msg = NULL;
1845 }
1846
1847 return CDF_STATUS_SUCCESS;
1848}
1849
Leo Change3e49442015-10-26 20:07:13 -07001850/**
1851 * hdd_ipa_uc_force_pipe_shutdown() - Force shutdown IPA pipe
1852 * @hdd_ctx: hdd main context
1853 *
1854 * Force shutdown IPA pipe
1855 * Independent of FW pipe status, IPA pipe shutdonw progress
1856 * in case, any STA does not leave properly, IPA HW pipe should cleaned up
1857 * independent from FW pipe status
1858 *
1859 * Return: NONE
1860 */
1861void hdd_ipa_uc_force_pipe_shutdown(hdd_context_t *hdd_ctx)
1862{
1863 struct hdd_ipa_priv *hdd_ipa;
1864
1865 if (!hdd_ipa_is_enabled(hdd_ctx) || !hdd_ctx->hdd_ipa)
1866 return;
1867
1868 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1869 if (false == hdd_ipa->ipa_pipes_down) {
1870 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1871 "IPA pipes are not down yet, force shutdown");
1872 hdd_ipa_uc_disable_pipes(hdd_ipa);
1873 } else {
1874 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1875 "IPA pipes are down, do nothing");
1876 }
1877
1878 return;
1879}
1880
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001881/**
1882 * hdd_ipa_uc_ssr_deinit() - handle ipa deinit for SSR
1883 *
1884 * Deinit basic IPA UC host side to be in sync reloaded FW during
1885 * SSR
1886 *
1887 * Return: 0 - Success
1888 */
1889int hdd_ipa_uc_ssr_deinit(void)
1890{
1891 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1892 int idx;
1893 struct hdd_ipa_iface_context *iface_context;
1894
Leo Chang3bc8fed2015-11-13 10:59:47 -08001895 if ((!hdd_ipa) || (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001896 return 0;
1897
1898 /* Clean up HDD IPA interfaces */
1899 for (idx = 0; (hdd_ipa->num_iface > 0) &&
1900 (idx < HDD_IPA_MAX_IFACE); idx++) {
1901 iface_context = &hdd_ipa->iface_context[idx];
1902 if (iface_context && iface_context->adapter)
1903 hdd_ipa_cleanup_iface(iface_context);
1904 }
1905
1906 /* After SSR, wlan driver reloads FW again. But we need to protect
1907 * IPA submodule during SSR transient state. So deinit basic IPA
1908 * UC host side to be in sync with reloaded FW during SSR
1909 */
Yun Parkf7dc8cd2015-11-17 15:25:12 -08001910 if (!hdd_ipa->ipa_pipes_down)
1911 hdd_ipa_uc_disable_pipes(hdd_ipa);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001912
Leo Chang3bc8fed2015-11-13 10:59:47 -08001913 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001914 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1915 hdd_ipa->assoc_stas_map[idx].is_reserved = false;
1916 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
1917 }
Leo Chang3bc8fed2015-11-13 10:59:47 -08001918 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001919
1920 /* Full IPA driver cleanup not required since wlan driver is now
1921 * unloaded and reloaded after SSR.
1922 */
1923 return 0;
1924}
1925
1926/**
1927 * hdd_ipa_uc_ssr_reinit() - handle ipa reinit after SSR
1928 *
1929 * Init basic IPA UC host side to be in sync with reloaded FW after
1930 * SSR to resume IPA UC operations
1931 *
1932 * Return: 0 - Success
1933 */
1934int hdd_ipa_uc_ssr_reinit(void)
1935{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001936
1937 /* After SSR is complete, IPA UC can resume operation. But now wlan
1938 * driver will be unloaded and reloaded, which takes care of IPA cleanup
1939 * and initialization. This is a placeholder func if IPA has to resume
1940 * operations without driver reload.
1941 */
1942 return 0;
1943}
Leo Chang3bc8fed2015-11-13 10:59:47 -08001944
1945/**
1946 * hdd_ipa_tx_packet_ipa() - send packet to IPA
1947 * @hdd_ctx: Global HDD context
1948 * @skb: skb sent to IPA
1949 * @session_id: send packet instance session id
1950 *
1951 * Send TX packet which generated by system to IPA.
1952 * This routine only will be used for function verification
1953 *
1954 * Return: NULL packet sent to IPA properly
1955 * NULL invalid packet drop
1956 * skb packet not sent to IPA. legacy data path should handle
1957 */
1958struct sk_buff *hdd_ipa_tx_packet_ipa(hdd_context_t *hdd_ctx,
1959 struct sk_buff *skb, uint8_t session_id)
Leo Change3e49442015-10-26 20:07:13 -07001960{
Leo Chang3bc8fed2015-11-13 10:59:47 -08001961 struct ipa_header *ipa_header;
1962 struct frag_header *frag_header;
1963
1964 if (!hdd_ipa_uc_is_enabled(hdd_ctx))
1965 return skb;
1966
1967 ipa_header = (struct ipa_header *) skb_push(skb,
1968 sizeof(struct ipa_header));
1969 if (!ipa_header) {
1970 /* No headroom, legacy */
1971 return skb;
1972 }
1973 memset(ipa_header, 0, sizeof(*ipa_header));
1974 ipa_header->vdev_id = 0;
1975
1976 frag_header = (struct frag_header *) skb_push(skb,
1977 sizeof(struct frag_header));
1978 if (!frag_header) {
1979 /* No headroom, drop */
1980 kfree_skb(skb);
1981 return NULL;
1982 }
1983 memset(frag_header, 0, sizeof(*frag_header));
1984 frag_header->length = skb->len - sizeof(struct frag_header)
1985 - sizeof(struct ipa_header);
1986
1987 ipa_tx_dp(IPA_CLIENT_WLAN1_CONS, skb, NULL);
1988 return NULL;
Leo Change3e49442015-10-26 20:07:13 -07001989}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001990
1991/**
1992 * hdd_ipa_wake_lock_timer_func() - Wake lock work handler
1993 * @work: scheduled work
1994 *
1995 * When IPA resources are released in hdd_ipa_rm_try_release() we do
1996 * not want to immediately release the wake lock since the system
1997 * would then potentially try to suspend when there is a healthy data
1998 * rate. Deferred work is scheduled and this function handles the
1999 * work. When this function is called, if the IPA resource is still
2000 * released then we release the wake lock.
2001 *
2002 * Return: None
2003 */
2004static void hdd_ipa_wake_lock_timer_func(struct work_struct *work)
2005{
2006 struct hdd_ipa_priv *hdd_ipa = container_of(to_delayed_work(work),
2007 struct hdd_ipa_priv,
2008 wake_lock_work);
2009
2010 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2011
2012 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED)
2013 goto end;
2014
2015 hdd_ipa->wake_lock_released = true;
2016 cdf_wake_lock_release(&hdd_ipa->wake_lock,
2017 WIFI_POWER_EVENT_WAKELOCK_IPA);
2018
2019end:
2020 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2021}
2022
2023/**
2024 * hdd_ipa_rm_request() - Request resource from IPA
2025 * @hdd_ipa: Global HDD IPA context
2026 *
2027 * Return: 0 on success, negative errno on error
2028 */
2029static int hdd_ipa_rm_request(struct hdd_ipa_priv *hdd_ipa)
2030{
2031 int ret = 0;
2032
2033 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2034 return 0;
2035
2036 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2037
2038 switch (hdd_ipa->rm_state) {
2039 case HDD_IPA_RM_GRANTED:
2040 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2041 return 0;
2042 case HDD_IPA_RM_GRANT_PENDING:
2043 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2044 return -EINPROGRESS;
2045 case HDD_IPA_RM_RELEASED:
2046 hdd_ipa->rm_state = HDD_IPA_RM_GRANT_PENDING;
2047 break;
2048 }
2049
2050 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2051
2052 ret = ipa_rm_inactivity_timer_request_resource(
2053 IPA_RM_RESOURCE_WLAN_PROD);
2054
2055 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2056 if (ret == 0) {
2057 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2058 hdd_ipa->stats.num_rm_grant_imm++;
2059 }
2060
2061 cancel_delayed_work(&hdd_ipa->wake_lock_work);
2062 if (hdd_ipa->wake_lock_released) {
2063 cdf_wake_lock_acquire(&hdd_ipa->wake_lock,
2064 WIFI_POWER_EVENT_WAKELOCK_IPA);
2065 hdd_ipa->wake_lock_released = false;
2066 }
2067 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2068
2069 return ret;
2070}
2071
2072/**
2073 * hdd_ipa_rm_try_release() - Attempt to release IPA resource
2074 * @hdd_ipa: Global HDD IPA context
2075 *
2076 * Return: 0 if resources released, negative errno otherwise
2077 */
2078static int hdd_ipa_rm_try_release(struct hdd_ipa_priv *hdd_ipa)
2079{
2080 int ret = 0;
2081
2082 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2083 return 0;
2084
2085 if (atomic_read(&hdd_ipa->tx_ref_cnt))
2086 return -EAGAIN;
2087
2088 spin_lock_bh(&hdd_ipa->q_lock);
2089 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
2090 (hdd_ipa->pending_hw_desc_cnt || hdd_ipa->pend_q_cnt)) {
2091 spin_unlock_bh(&hdd_ipa->q_lock);
2092 return -EAGAIN;
2093 }
2094 spin_unlock_bh(&hdd_ipa->q_lock);
2095
2096 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2097
2098 if (!cdf_nbuf_is_queue_empty(&hdd_ipa->pm_queue_head)) {
2099 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2100 return -EAGAIN;
2101 }
2102 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2103
2104 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2105 switch (hdd_ipa->rm_state) {
2106 case HDD_IPA_RM_GRANTED:
2107 break;
2108 case HDD_IPA_RM_GRANT_PENDING:
2109 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2110 return -EINPROGRESS;
2111 case HDD_IPA_RM_RELEASED:
2112 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2113 return 0;
2114 }
2115
2116 /* IPA driver returns immediately so set the state here to avoid any
2117 * race condition.
2118 */
2119 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2120 hdd_ipa->stats.num_rm_release++;
2121 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2122
2123 ret =
2124 ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_WLAN_PROD);
2125
2126 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2127 if (unlikely(ret != 0)) {
2128 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2129 WARN_ON(1);
2130 }
2131
2132 /*
2133 * If wake_lock is released immediately, kernel would try to suspend
2134 * immediately as well, Just avoid ping-pong between suspend-resume
2135 * while there is healthy amount of data transfer going on by
2136 * releasing the wake_lock after some delay.
2137 */
2138 schedule_delayed_work(&hdd_ipa->wake_lock_work,
2139 msecs_to_jiffies
2140 (HDD_IPA_RX_INACTIVITY_MSEC_DELAY));
2141
2142 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2143
2144 return ret;
2145}
2146
2147/**
2148 * hdd_ipa_rm_notify() - IPA resource manager notifier callback
2149 * @user_data: user data registered with IPA
2150 * @event: the IPA resource manager event that occurred
2151 * @data: the data associated with the event
2152 *
2153 * Return: None
2154 */
2155static void hdd_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
2156 unsigned long data)
2157{
2158 struct hdd_ipa_priv *hdd_ipa = user_data;
2159
2160 if (unlikely(!hdd_ipa))
2161 return;
2162
2163 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2164 return;
2165
2166 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Evt: %d", event);
2167
2168 switch (event) {
2169 case IPA_RM_RESOURCE_GRANTED:
2170 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2171 /* RM Notification comes with ISR context
2172 * it should be serialized into work queue to avoid
2173 * ISR sleep problem
2174 */
2175 hdd_ipa->uc_rm_work.event = event;
2176 schedule_work(&hdd_ipa->uc_rm_work.work);
2177 break;
2178 }
2179 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2180 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2181 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2182 hdd_ipa->stats.num_rm_grant++;
2183 break;
2184
2185 case IPA_RM_RESOURCE_RELEASED:
2186 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "RM Release");
2187 hdd_ipa->resource_unloading = false;
2188 break;
2189
2190 default:
2191 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Unknown RM Evt: %d", event);
2192 break;
2193 }
2194}
2195
2196/**
2197 * hdd_ipa_rm_cons_release() - WLAN consumer resource release handler
2198 *
2199 * Callback function registered with IPA that is called when IPA wants
2200 * to release the WLAN consumer resource
2201 *
2202 * Return: 0 if the request is granted, negative errno otherwise
2203 */
2204static int hdd_ipa_rm_cons_release(void)
2205{
2206 return 0;
2207}
2208
2209/**
2210 * hdd_ipa_rm_cons_request() - WLAN consumer resource request handler
2211 *
2212 * Callback function registered with IPA that is called when IPA wants
2213 * to access the WLAN consumer resource
2214 *
2215 * Return: 0 if the request is granted, negative errno otherwise
2216 */
2217static int hdd_ipa_rm_cons_request(void)
2218{
Yun Park4d8b60a2015-10-22 13:59:32 -07002219 int ret = 0;
2220
2221 if (ghdd_ipa->resource_loading) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002222 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
Yun Park4d8b60a2015-10-22 13:59:32 -07002223 "%s: IPA resource loading in progress",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002224 __func__);
2225 ghdd_ipa->pending_cons_req = true;
Yun Park4d8b60a2015-10-22 13:59:32 -07002226 ret = -EINPROGRESS;
2227 } else if (ghdd_ipa->resource_unloading) {
2228 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
2229 "%s: IPA resource unloading in progress",
2230 __func__);
2231 ghdd_ipa->pending_cons_req = true;
2232 ret = -EPERM;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002233 }
Yun Park4d8b60a2015-10-22 13:59:32 -07002234
2235 return ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002236}
2237
2238/**
2239 * hdd_ipa_set_perf_level() - Set IPA performance level
2240 * @hdd_ctx: Global HDD context
2241 * @tx_packets: Number of packets transmitted in the last sample period
2242 * @rx_packets: Number of packets received in the last sample period
2243 *
2244 * Return: 0 on success, negative errno on error
2245 */
2246int hdd_ipa_set_perf_level(hdd_context_t *hdd_ctx, uint64_t tx_packets,
2247 uint64_t rx_packets)
2248{
2249 uint32_t next_cons_bw, next_prod_bw;
2250 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2251 struct ipa_rm_perf_profile profile;
2252 int ret;
2253
2254 if ((!hdd_ipa_is_enabled(hdd_ctx)) ||
2255 (!hdd_ipa_is_clk_scaling_enabled(hdd_ctx)))
2256 return 0;
2257
2258 memset(&profile, 0, sizeof(profile));
2259
2260 if (tx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2261 next_cons_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2262 else if (tx_packets >
2263 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2264 next_cons_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2265 else
2266 next_cons_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2267
2268 if (rx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2269 next_prod_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2270 else if (rx_packets >
2271 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2272 next_prod_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2273 else
2274 next_prod_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2275
2276 HDD_IPA_LOG(LOG1,
2277 "CONS perf curr: %d, next: %d",
2278 hdd_ipa->curr_cons_bw, next_cons_bw);
2279 HDD_IPA_LOG(LOG1,
2280 "PROD perf curr: %d, next: %d",
2281 hdd_ipa->curr_prod_bw, next_prod_bw);
2282
2283 if (hdd_ipa->curr_cons_bw != next_cons_bw) {
2284 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2285 "Requesting CONS perf curr: %d, next: %d",
2286 hdd_ipa->curr_cons_bw, next_cons_bw);
2287 profile.max_supported_bandwidth_mbps = next_cons_bw;
2288 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_CONS,
2289 &profile);
2290 if (ret) {
2291 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2292 "RM CONS set perf profile failed: %d", ret);
2293
2294 return ret;
2295 }
2296 hdd_ipa->curr_cons_bw = next_cons_bw;
2297 hdd_ipa->stats.num_cons_perf_req++;
2298 }
2299
2300 if (hdd_ipa->curr_prod_bw != next_prod_bw) {
2301 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2302 "Requesting PROD perf curr: %d, next: %d",
2303 hdd_ipa->curr_prod_bw, next_prod_bw);
2304 profile.max_supported_bandwidth_mbps = next_prod_bw;
2305 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_PROD,
2306 &profile);
2307 if (ret) {
2308 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2309 "RM PROD set perf profile failed: %d", ret);
2310 return ret;
2311 }
2312 hdd_ipa->curr_prod_bw = next_prod_bw;
2313 hdd_ipa->stats.num_prod_perf_req++;
2314 }
2315
2316 return 0;
2317}
2318
2319/**
2320 * hdd_ipa_setup_rm() - Setup IPA resource management
2321 * @hdd_ipa: Global HDD IPA context
2322 *
2323 * Return: 0 on success, negative errno on error
2324 */
2325static int hdd_ipa_setup_rm(struct hdd_ipa_priv *hdd_ipa)
2326{
2327 struct ipa_rm_create_params create_params = { 0 };
2328 int ret;
2329
2330 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2331 return 0;
2332
2333 cnss_init_work(&hdd_ipa->uc_rm_work.work, hdd_ipa_uc_rm_notify_defer);
2334 memset(&create_params, 0, sizeof(create_params));
2335 create_params.name = IPA_RM_RESOURCE_WLAN_PROD;
2336 create_params.reg_params.user_data = hdd_ipa;
2337 create_params.reg_params.notify_cb = hdd_ipa_rm_notify;
2338 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2339
2340 ret = ipa_rm_create_resource(&create_params);
2341 if (ret) {
2342 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2343 "Create RM resource failed: %d", ret);
2344 goto setup_rm_fail;
2345 }
2346
2347 memset(&create_params, 0, sizeof(create_params));
2348 create_params.name = IPA_RM_RESOURCE_WLAN_CONS;
2349 create_params.request_resource = hdd_ipa_rm_cons_request;
2350 create_params.release_resource = hdd_ipa_rm_cons_release;
2351 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2352
2353 ret = ipa_rm_create_resource(&create_params);
2354 if (ret) {
2355 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2356 "Create RM CONS resource failed: %d", ret);
2357 goto delete_prod;
2358 }
2359
2360 ipa_rm_add_dependency(IPA_RM_RESOURCE_WLAN_PROD,
2361 IPA_RM_RESOURCE_APPS_CONS);
2362
2363 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WLAN_PROD,
2364 HDD_IPA_RX_INACTIVITY_MSEC_DELAY);
2365 if (ret) {
2366 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Timer init failed: %d",
2367 ret);
2368 goto timer_init_failed;
2369 }
2370
2371 /* Set the lowest bandwidth to start with */
2372 ret = hdd_ipa_set_perf_level(hdd_ipa->hdd_ctx, 0, 0);
2373
2374 if (ret) {
2375 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2376 "Set perf level failed: %d", ret);
2377 goto set_perf_failed;
2378 }
2379
2380 cdf_wake_lock_init(&hdd_ipa->wake_lock, "wlan_ipa");
2381#ifdef CONFIG_CNSS
2382 cnss_init_delayed_work(&hdd_ipa->wake_lock_work,
2383 hdd_ipa_wake_lock_timer_func);
2384#else
2385 INIT_DELAYED_WORK(&hdd_ipa->wake_lock_work,
2386 hdd_ipa_wake_lock_timer_func);
2387#endif
2388 cdf_spinlock_init(&hdd_ipa->rm_lock);
2389 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2390 hdd_ipa->wake_lock_released = true;
2391 atomic_set(&hdd_ipa->tx_ref_cnt, 0);
2392
2393 return ret;
2394
2395set_perf_failed:
2396 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2397
2398timer_init_failed:
2399 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2400
2401delete_prod:
2402 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2403
2404setup_rm_fail:
2405 return ret;
2406}
2407
2408/**
2409 * hdd_ipa_destroy_rm_resource() - Destroy IPA resources
2410 * @hdd_ipa: Global HDD IPA context
2411 *
2412 * Destroys all resources associated with the IPA resource manager
2413 *
2414 * Return: None
2415 */
2416static void hdd_ipa_destroy_rm_resource(struct hdd_ipa_priv *hdd_ipa)
2417{
2418 int ret;
2419
2420 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2421 return;
2422
2423 cancel_delayed_work_sync(&hdd_ipa->wake_lock_work);
2424 cdf_wake_lock_destroy(&hdd_ipa->wake_lock);
2425
2426#ifdef WLAN_OPEN_SOURCE
2427 cancel_work_sync(&hdd_ipa->uc_rm_work.work);
2428#endif
2429 cdf_spinlock_destroy(&hdd_ipa->rm_lock);
2430
2431 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2432
2433 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2434 if (ret)
2435 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2436 "RM PROD resource delete failed %d", ret);
2437
2438 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2439 if (ret)
2440 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2441 "RM CONS resource delete failed %d", ret);
2442}
2443
2444/**
2445 * hdd_ipa_send_skb_to_network() - Send skb to kernel
2446 * @skb: network buffer
2447 * @adapter: network adapter
2448 *
2449 * Called when a network buffer is received which should not be routed
2450 * to the IPA module.
2451 *
2452 * Return: None
2453 */
2454static void hdd_ipa_send_skb_to_network(cdf_nbuf_t skb,
2455 hdd_adapter_t *adapter)
2456{
2457 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2458 unsigned int cpu_index;
2459
2460 if (!adapter || adapter->magic != WLAN_HDD_ADAPTER_MAGIC) {
2461 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_LOW, "Invalid adapter: 0x%p",
2462 adapter);
2463 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2464 cdf_nbuf_free(skb);
2465 return;
2466 }
2467
2468 if (hdd_ipa->hdd_ctx->isUnloadInProgress) {
2469 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2470 cdf_nbuf_free(skb);
2471 return;
2472 }
2473
2474 skb->destructor = hdd_ipa_uc_rt_debug_destructor;
2475 skb->dev = adapter->dev;
2476 skb->protocol = eth_type_trans(skb, skb->dev);
2477 skb->ip_summed = CHECKSUM_NONE;
2478
2479 cpu_index = wlan_hdd_get_cpu();
2480
2481 ++adapter->hdd_stats.hddTxRxStats.rxPackets[cpu_index];
2482 if (netif_rx_ni(skb) == NET_RX_SUCCESS)
2483 ++adapter->hdd_stats.hddTxRxStats.rxDelivered[cpu_index];
2484 else
2485 ++adapter->hdd_stats.hddTxRxStats.rxRefused[cpu_index];
2486
2487 HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa);
2488 adapter->dev->last_rx = jiffies;
2489}
2490
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002491/**
2492 * hdd_ipa_w2i_cb() - WLAN to IPA callback handler
2493 * @priv: pointer to private data registered with IPA (we register a
2494 * pointer to the global IPA context)
2495 * @evt: the IPA event which triggered the callback
2496 * @data: data associated with the event
2497 *
2498 * Return: None
2499 */
2500static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
2501 unsigned long data)
2502{
2503 struct hdd_ipa_priv *hdd_ipa = NULL;
2504 hdd_adapter_t *adapter = NULL;
2505 cdf_nbuf_t skb;
2506 uint8_t iface_id;
2507 uint8_t session_id;
2508 struct hdd_ipa_iface_context *iface_context;
2509 cdf_nbuf_t copy;
2510 uint8_t fw_desc;
2511 int ret;
2512
2513 hdd_ipa = (struct hdd_ipa_priv *)priv;
2514
2515 switch (evt) {
2516 case IPA_RECEIVE:
2517 skb = (cdf_nbuf_t) data;
2518 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2519 session_id = (uint8_t)skb->cb[0];
2520 iface_id = vdev_to_iface[session_id];
2521 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2522 "IPA_RECEIVE: session_id=%u, iface_id=%u",
2523 session_id, iface_id);
2524 } else {
2525 iface_id = HDD_IPA_GET_IFACE_ID(skb->data);
2526 }
2527
2528 if (iface_id >= HDD_IPA_MAX_IFACE) {
2529 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2530 "IPA_RECEIVE: Invalid iface_id: %u",
2531 iface_id);
2532 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_INFO_HIGH,
2533 "w2i -- skb", skb->data, 8);
2534 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2535 cdf_nbuf_free(skb);
2536 return;
2537 }
2538
2539 iface_context = &hdd_ipa->iface_context[iface_id];
2540 adapter = iface_context->adapter;
2541
2542 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG,
2543 "w2i -- skb", skb->data, 8);
2544 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2545 hdd_ipa->stats.num_rx_excep++;
2546 skb_pull(skb, HDD_IPA_UC_WLAN_CLD_HDR_LEN);
2547 } else {
2548 skb_pull(skb, HDD_IPA_WLAN_CLD_HDR_LEN);
2549 }
2550
2551 iface_context->stats.num_rx_ipa_excep++;
2552
2553 /* Disable to forward Intra-BSS Rx packets when
2554 * ap_isolate=1 in hostapd.conf
2555 */
2556 if (adapter->sessionCtx.ap.apDisableIntraBssFwd) {
2557 /*
2558 * When INTRA_BSS_FWD_OFFLOAD is enabled, FW will send
2559 * all Rx packets to IPA uC, which need to be forwarded
2560 * to other interface.
2561 * And, IPA driver will send back to WLAN host driver
2562 * through exception pipe with fw_desc field set by FW.
2563 * Here we are checking fw_desc field for FORWARD bit
2564 * set, and forward to Tx. Then copy to kernel stack
2565 * only when DISCARD bit is not set.
2566 */
2567 fw_desc = (uint8_t)skb->cb[1];
2568
Leo Chang3bc8fed2015-11-13 10:59:47 -08002569 if (fw_desc & HDD_IPA_FW_RX_DESC_FORWARD_M) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002570 HDD_IPA_LOG(
2571 CDF_TRACE_LEVEL_DEBUG,
2572 "Forward packet to Tx (fw_desc=%d)",
2573 fw_desc);
2574 copy = cdf_nbuf_copy(skb);
2575 if (copy) {
2576 hdd_ipa->ipa_tx_forward++;
2577 ret = hdd_softap_hard_start_xmit(
2578 (struct sk_buff *)copy,
2579 adapter->dev);
2580 if (ret) {
2581 HDD_IPA_LOG(
2582 CDF_TRACE_LEVEL_DEBUG,
2583 "Forward packet tx fail");
2584 hdd_ipa->stats.
2585 num_tx_bcmc_err++;
2586 } else {
2587 hdd_ipa->stats.num_tx_bcmc++;
2588 }
2589 }
2590 }
Mahesh Kumar Kalikot Veetil221dc672015-11-06 14:27:28 -08002591
Leo Chang3bc8fed2015-11-13 10:59:47 -08002592 if (fw_desc & HDD_IPA_FW_RX_DESC_DISCARD_M) {
Mahesh Kumar Kalikot Veetil221dc672015-11-06 14:27:28 -08002593 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2594 hdd_ipa->ipa_rx_discard++;
2595 cdf_nbuf_free(skb);
2596 break;
2597 }
2598
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002599 } else {
2600 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2601 "Intra-BSS FWD is disabled-skip forward to Tx");
2602 }
2603
2604 hdd_ipa_send_skb_to_network(skb, adapter);
2605 break;
2606
2607 default:
2608 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2609 "w2i cb wrong event: 0x%x", evt);
2610 return;
2611 }
2612}
2613
2614/**
2615 * hdd_ipa_nbuf_cb() - IPA TX complete callback
2616 * @skb: packet buffer which was transmitted
2617 *
2618 * Return: None
2619 */
2620static void hdd_ipa_nbuf_cb(cdf_nbuf_t skb)
2621{
2622 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2623
2624 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "%lx", NBUF_OWNER_PRIV_DATA(skb));
2625 ipa_free_skb((struct ipa_rx_data *)NBUF_OWNER_PRIV_DATA(skb));
2626
2627 hdd_ipa->stats.num_tx_comp_cnt++;
2628
2629 atomic_dec(&hdd_ipa->tx_ref_cnt);
2630
2631 hdd_ipa_rm_try_release(hdd_ipa);
2632}
2633
2634/**
2635 * hdd_ipa_send_pkt_to_tl() - Send an IPA packet to TL
2636 * @iface_context: interface-specific IPA context
2637 * @ipa_tx_desc: packet data descriptor
2638 *
2639 * Return: None
2640 */
2641static void hdd_ipa_send_pkt_to_tl(
2642 struct hdd_ipa_iface_context *iface_context,
2643 struct ipa_rx_data *ipa_tx_desc)
2644{
2645 struct hdd_ipa_priv *hdd_ipa = iface_context->hdd_ipa;
2646 uint8_t interface_id;
2647 hdd_adapter_t *adapter = NULL;
2648 cdf_nbuf_t skb;
2649
2650 cdf_spin_lock_bh(&iface_context->interface_lock);
2651 adapter = iface_context->adapter;
2652 if (!adapter) {
2653 HDD_IPA_LOG(CDF_TRACE_LEVEL_WARN, "Interface Down");
2654 ipa_free_skb(ipa_tx_desc);
2655 iface_context->stats.num_tx_drop++;
2656 cdf_spin_unlock_bh(&iface_context->interface_lock);
2657 hdd_ipa_rm_try_release(hdd_ipa);
2658 return;
2659 }
2660
2661 /*
2662 * During CAC period, data packets shouldn't be sent over the air so
2663 * drop all the packets here
2664 */
2665 if (WLAN_HDD_GET_AP_CTX_PTR(adapter)->dfs_cac_block_tx) {
2666 ipa_free_skb(ipa_tx_desc);
2667 cdf_spin_unlock_bh(&iface_context->interface_lock);
2668 iface_context->stats.num_tx_cac_drop++;
2669 hdd_ipa_rm_try_release(hdd_ipa);
2670 return;
2671 }
2672
2673 interface_id = adapter->sessionId;
2674 ++adapter->stats.tx_packets;
2675
2676 cdf_spin_unlock_bh(&iface_context->interface_lock);
2677
2678 skb = ipa_tx_desc->skb;
2679
2680 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2681 NBUF_OWNER_ID(skb) = IPA_NBUF_OWNER_ID;
2682 NBUF_CALLBACK_FN(skb) = hdd_ipa_nbuf_cb;
2683 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2684 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr
2685 + HDD_IPA_WLAN_FRAG_HEADER
2686 + HDD_IPA_WLAN_IPA_HEADER;
2687 ipa_tx_desc->skb->len -=
2688 HDD_IPA_WLAN_FRAG_HEADER + HDD_IPA_WLAN_IPA_HEADER;
2689 } else
2690 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr;
2691
2692 NBUF_OWNER_PRIV_DATA(skb) = (unsigned long)ipa_tx_desc;
2693
2694 adapter->stats.tx_bytes += ipa_tx_desc->skb->len;
2695
2696 skb = ol_tx_send_ipa_data_frame(iface_context->tl_context,
2697 ipa_tx_desc->skb);
2698 if (skb) {
2699 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "TLSHIM tx fail");
2700 ipa_free_skb(ipa_tx_desc);
2701 iface_context->stats.num_tx_err++;
2702 hdd_ipa_rm_try_release(hdd_ipa);
2703 return;
2704 }
2705
2706 atomic_inc(&hdd_ipa->tx_ref_cnt);
2707
2708 iface_context->stats.num_tx++;
2709
2710}
2711
2712/**
2713 * hdd_ipa_pm_send_pkt_to_tl() - Send queued packets to TL
2714 * @work: pointer to the scheduled work
2715 *
2716 * Called during PM resume to send packets to TL which were queued
2717 * while host was in the process of suspending.
2718 *
2719 * Return: None
2720 */
2721static void hdd_ipa_pm_send_pkt_to_tl(struct work_struct *work)
2722{
2723 struct hdd_ipa_priv *hdd_ipa = container_of(work,
2724 struct hdd_ipa_priv,
2725 pm_work);
2726 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2727 cdf_nbuf_t skb;
2728 uint32_t dequeued = 0;
2729
2730 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2731
2732 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
2733 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2734
2735 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2736
2737 dequeued++;
2738
2739 hdd_ipa_send_pkt_to_tl(pm_tx_cb->iface_context,
2740 pm_tx_cb->ipa_tx_desc);
2741
2742 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2743 }
2744
2745 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2746
2747 hdd_ipa->stats.num_tx_dequeued += dequeued;
2748 if (dequeued > hdd_ipa->stats.num_max_pm_queue)
2749 hdd_ipa->stats.num_max_pm_queue = dequeued;
2750}
2751
2752/**
2753 * hdd_ipa_i2w_cb() - IPA to WLAN callback
2754 * @priv: pointer to private data registered with IPA (we register a
2755 * pointer to the interface-specific IPA context)
2756 * @evt: the IPA event which triggered the callback
2757 * @data: data associated with the event
2758 *
2759 * Return: None
2760 */
2761static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
2762 unsigned long data)
2763{
2764 struct hdd_ipa_priv *hdd_ipa = NULL;
2765 struct ipa_rx_data *ipa_tx_desc;
2766 struct hdd_ipa_iface_context *iface_context;
2767 cdf_nbuf_t skb;
2768 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2769 CDF_STATUS status = CDF_STATUS_SUCCESS;
2770
Mukul Sharma81661ae2015-10-30 20:26:02 +05302771 iface_context = (struct hdd_ipa_iface_context *)priv;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002772 if (evt != IPA_RECEIVE) {
2773 skb = (cdf_nbuf_t) data;
2774 dev_kfree_skb_any(skb);
2775 iface_context->stats.num_tx_drop++;
2776 return;
2777 }
2778
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002779 ipa_tx_desc = (struct ipa_rx_data *)data;
2780
2781 hdd_ipa = iface_context->hdd_ipa;
2782
2783 /*
2784 * When SSR is going on or driver is unloading, just drop the packets.
2785 * During SSR, there is no use in queueing the packets as STA has to
2786 * connect back any way
2787 */
2788 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
2789 if (0 != status) {
2790 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
2791 ipa_free_skb(ipa_tx_desc);
2792 iface_context->stats.num_tx_drop++;
2793 return;
2794 }
2795
2796 skb = ipa_tx_desc->skb;
2797
2798 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG, "i2w", skb->data, 8);
2799
2800 /*
2801 * If PROD resource is not requested here then there may be cases where
2802 * IPA hardware may be clocked down because of not having proper
2803 * dependency graph between WLAN CONS and modem PROD pipes. Adding the
2804 * workaround to request PROD resource while data is going over CONS
2805 * pipe to prevent the IPA hardware clockdown.
2806 */
2807 hdd_ipa_rm_request(hdd_ipa);
2808
2809 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2810 /*
2811 * If host is still suspended then queue the packets and these will be
2812 * drained later when resume completes. When packet is arrived here and
2813 * host is suspended, this means that there is already resume is in
2814 * progress.
2815 */
2816 if (hdd_ipa->suspended) {
2817 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2818 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2819 pm_tx_cb->iface_context = iface_context;
2820 pm_tx_cb->ipa_tx_desc = ipa_tx_desc;
2821 cdf_nbuf_queue_add(&hdd_ipa->pm_queue_head, skb);
2822 hdd_ipa->stats.num_tx_queued++;
2823
2824 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2825 return;
2826 }
2827
2828 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2829
2830 /*
2831 * If we are here means, host is not suspended, wait for the work queue
2832 * to finish.
2833 */
2834#ifdef WLAN_OPEN_SOURCE
2835 flush_work(&hdd_ipa->pm_work);
2836#endif
2837
2838 return hdd_ipa_send_pkt_to_tl(iface_context, ipa_tx_desc);
2839}
2840
2841/**
2842 * hdd_ipa_suspend() - Suspend IPA
2843 * @hdd_ctx: Global HDD context
2844 *
2845 * Return: 0 on success, negativer errno on error
2846 */
2847int hdd_ipa_suspend(hdd_context_t *hdd_ctx)
2848{
2849 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2850
2851 if (!hdd_ipa_is_enabled(hdd_ctx))
2852 return 0;
2853
2854 /*
2855 * Check if IPA is ready for suspend, If we are here means, there is
2856 * high chance that suspend would go through but just to avoid any race
2857 * condition after suspend started, these checks are conducted before
2858 * allowing to suspend.
2859 */
2860 if (atomic_read(&hdd_ipa->tx_ref_cnt))
2861 return -EAGAIN;
2862
2863 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2864
2865 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED) {
2866 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2867 return -EAGAIN;
2868 }
2869 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2870
2871 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2872 hdd_ipa->suspended = true;
2873 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2874
2875 return 0;
2876}
2877
2878/**
2879 * hdd_ipa_resume() - Resume IPA following suspend
2880 * hdd_ctx: Global HDD context
2881 *
2882 * Return: 0 on success, negative errno on error
2883 */
2884int hdd_ipa_resume(hdd_context_t *hdd_ctx)
2885{
2886 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2887
2888 if (!hdd_ipa_is_enabled(hdd_ctx))
2889 return 0;
2890
2891 schedule_work(&hdd_ipa->pm_work);
2892
2893 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2894 hdd_ipa->suspended = false;
2895 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2896
2897 return 0;
2898}
2899
2900/**
2901 * hdd_ipa_setup_sys_pipe() - Setup all IPA Sys pipes
2902 * @hdd_ipa: Global HDD IPA context
2903 *
2904 * Return: 0 on success, negative errno on error
2905 */
2906static int hdd_ipa_setup_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
2907{
2908 int i, ret = 0;
2909 struct ipa_sys_connect_params *ipa;
2910 uint32_t desc_fifo_sz;
2911
2912 /* The maximum number of descriptors that can be provided to a BAM at
2913 * once is one less than the total number of descriptors that the buffer
2914 * can contain.
2915 * If max_num_of_descriptors = (BAM_PIPE_DESCRIPTOR_FIFO_SIZE / sizeof
2916 * (SPS_DESCRIPTOR)), then (max_num_of_descriptors - 1) descriptors can
2917 * be provided at once.
2918 * Because of above requirement, one extra descriptor will be added to
2919 * make sure hardware always has one descriptor.
2920 */
2921 desc_fifo_sz = hdd_ipa->hdd_ctx->config->IpaDescSize
2922 + sizeof(struct sps_iovec);
2923
2924 /*setup TX pipes */
2925 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
2926 ipa = &hdd_ipa->sys_pipe[i].ipa_sys_params;
2927
2928 ipa->client = hdd_ipa_adapter_2_client[i].cons_client;
2929 ipa->desc_fifo_sz = desc_fifo_sz;
2930 ipa->priv = &hdd_ipa->iface_context[i];
2931 ipa->notify = hdd_ipa_i2w_cb;
2932
2933 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2934 ipa->ipa_ep_cfg.hdr.hdr_len =
2935 HDD_IPA_UC_WLAN_TX_HDR_LEN;
2936 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
2937 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
2938 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
2939 ipa->ipa_ep_cfg.hdr.hdr_additional_const_len =
2940 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
2941 ipa->ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
2942 } else {
2943 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
2944 }
2945 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
2946
2947 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2948 ipa->keep_ipa_awake = 1;
2949
2950 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
2951 if (ret) {
2952 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed for pipe %d"
2953 " ret: %d", i, ret);
2954 goto setup_sys_pipe_fail;
2955 }
2956 hdd_ipa->sys_pipe[i].conn_hdl_valid = 1;
2957 }
2958
2959 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2960 /*
2961 * Hard code it here, this can be extended if in case
2962 * PROD pipe is also per interface.
2963 * Right now there is no advantage of doing this.
2964 */
2965 hdd_ipa->prod_client = IPA_CLIENT_WLAN1_PROD;
2966
2967 ipa = &hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].ipa_sys_params;
2968
2969 ipa->client = hdd_ipa->prod_client;
2970
2971 ipa->desc_fifo_sz = desc_fifo_sz;
2972 ipa->priv = hdd_ipa;
2973 ipa->notify = hdd_ipa_w2i_cb;
2974
2975 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
2976 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_RX_HDR_LEN;
2977 ipa->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
2978 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
2979
2980 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2981 ipa->keep_ipa_awake = 1;
2982
2983 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
2984 if (ret) {
2985 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2986 "Failed for RX pipe: %d", ret);
2987 goto setup_sys_pipe_fail;
2988 }
2989 hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].conn_hdl_valid = 1;
2990 }
2991
2992 return ret;
2993
2994setup_sys_pipe_fail:
2995
2996 while (--i >= 0) {
2997 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].conn_hdl);
2998 cdf_mem_zero(&hdd_ipa->sys_pipe[i],
2999 sizeof(struct hdd_ipa_sys_pipe));
3000 }
3001
3002 return ret;
3003}
3004
3005/**
3006 * hdd_ipa_teardown_sys_pipe() - Tear down all IPA Sys pipes
3007 * @hdd_ipa: Global HDD IPA context
3008 *
3009 * Return: None
3010 */
3011static void hdd_ipa_teardown_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
3012{
3013 int ret = 0, i;
3014 for (i = 0; i < HDD_IPA_MAX_SYSBAM_PIPE; i++) {
3015 if (hdd_ipa->sys_pipe[i].conn_hdl_valid) {
3016 ret =
3017 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].
3018 conn_hdl);
3019 if (ret)
3020 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed: %d",
3021 ret);
3022
3023 hdd_ipa->sys_pipe[i].conn_hdl_valid = 0;
3024 }
3025 }
3026}
3027
3028/**
3029 * hdd_ipa_register_interface() - register IPA interface
3030 * @hdd_ipa: Global IPA context
3031 * @iface_context: Per-interface IPA context
3032 *
3033 * Return: 0 on success, negative errno on error
3034 */
3035static int hdd_ipa_register_interface(struct hdd_ipa_priv *hdd_ipa,
3036 struct hdd_ipa_iface_context
3037 *iface_context)
3038{
3039 struct ipa_tx_intf tx_intf;
3040 struct ipa_rx_intf rx_intf;
3041 struct ipa_ioc_tx_intf_prop *tx_prop = NULL;
3042 struct ipa_ioc_rx_intf_prop *rx_prop = NULL;
3043 char *ifname = iface_context->adapter->dev->name;
3044
3045 char ipv4_hdr_name[IPA_RESOURCE_NAME_MAX];
3046 char ipv6_hdr_name[IPA_RESOURCE_NAME_MAX];
3047
3048 int num_prop = 1;
3049 int ret = 0;
3050
3051 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx))
3052 num_prop++;
3053
3054 /* Allocate TX properties for TOS categories, 1 each for IPv4 & IPv6 */
3055 tx_prop =
3056 cdf_mem_malloc(sizeof(struct ipa_ioc_tx_intf_prop) * num_prop);
3057 if (!tx_prop) {
3058 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "tx_prop allocation failed");
3059 goto register_interface_fail;
3060 }
3061
3062 /* Allocate RX properties, 1 each for IPv4 & IPv6 */
3063 rx_prop =
3064 cdf_mem_malloc(sizeof(struct ipa_ioc_rx_intf_prop) * num_prop);
3065 if (!rx_prop) {
3066 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "rx_prop allocation failed");
3067 goto register_interface_fail;
3068 }
3069
3070 cdf_mem_zero(&tx_intf, sizeof(tx_intf));
3071 cdf_mem_zero(&rx_intf, sizeof(rx_intf));
3072
3073 snprintf(ipv4_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
3074 ifname, HDD_IPA_IPV4_NAME_EXT);
3075 snprintf(ipv6_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
3076 ifname, HDD_IPA_IPV6_NAME_EXT);
3077
3078 rx_prop[IPA_IP_v4].ip = IPA_IP_v4;
3079 rx_prop[IPA_IP_v4].src_pipe = iface_context->prod_client;
3080 rx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3081 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
3082
3083 /*
3084 * Interface ID is 3rd byte in the CLD header. Add the meta data and
3085 * mask to identify the interface in IPA hardware
3086 */
3087 rx_prop[IPA_IP_v4].attrib.meta_data =
3088 htonl(iface_context->adapter->sessionId << 16);
3089 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
3090
3091 rx_intf.num_props++;
3092 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3093 rx_prop[IPA_IP_v6].ip = IPA_IP_v6;
3094 rx_prop[IPA_IP_v6].src_pipe = iface_context->prod_client;
3095 rx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3096 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
3097 rx_prop[IPA_IP_v4].attrib.meta_data =
3098 htonl(iface_context->adapter->sessionId << 16);
3099 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
3100
3101 rx_intf.num_props++;
3102 }
3103
3104 tx_prop[IPA_IP_v4].ip = IPA_IP_v4;
3105 tx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3106 tx_prop[IPA_IP_v4].dst_pipe = IPA_CLIENT_WLAN1_CONS;
3107 tx_prop[IPA_IP_v4].alt_dst_pipe = iface_context->cons_client;
3108 strlcpy(tx_prop[IPA_IP_v4].hdr_name, ipv4_hdr_name,
3109 IPA_RESOURCE_NAME_MAX);
3110 tx_intf.num_props++;
3111
3112 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3113 tx_prop[IPA_IP_v6].ip = IPA_IP_v6;
3114 tx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3115 tx_prop[IPA_IP_v6].dst_pipe = IPA_CLIENT_WLAN1_CONS;
3116 tx_prop[IPA_IP_v6].alt_dst_pipe = iface_context->cons_client;
3117 strlcpy(tx_prop[IPA_IP_v6].hdr_name, ipv6_hdr_name,
3118 IPA_RESOURCE_NAME_MAX);
3119 tx_intf.num_props++;
3120 }
3121
3122 tx_intf.prop = tx_prop;
3123 rx_intf.prop = rx_prop;
3124
3125 /* Call the ipa api to register interface */
3126 ret = ipa_register_intf(ifname, &tx_intf, &rx_intf);
3127
3128register_interface_fail:
3129 cdf_mem_free(tx_prop);
3130 cdf_mem_free(rx_prop);
3131 return ret;
3132}
3133
3134/**
3135 * hdd_remove_ipa_header() - Remove a specific header from IPA
3136 * @name: Name of the header to be removed
3137 *
3138 * Return: None
3139 */
3140static void hdd_ipa_remove_header(char *name)
3141{
3142 struct ipa_ioc_get_hdr hdrlookup;
3143 int ret = 0, len;
3144 struct ipa_ioc_del_hdr *ipa_hdr;
3145
3146 cdf_mem_zero(&hdrlookup, sizeof(hdrlookup));
3147 strlcpy(hdrlookup.name, name, sizeof(hdrlookup.name));
3148 ret = ipa_get_hdr(&hdrlookup);
3149 if (ret) {
3150 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Hdr deleted already %s, %d",
3151 name, ret);
3152 return;
3153 }
3154
3155 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "hdl: 0x%x", hdrlookup.hdl);
3156 len = sizeof(struct ipa_ioc_del_hdr) + sizeof(struct ipa_hdr_del) * 1;
3157 ipa_hdr = (struct ipa_ioc_del_hdr *)cdf_mem_malloc(len);
3158 if (ipa_hdr == NULL) {
3159 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "ipa_hdr allocation failed");
3160 return;
3161 }
3162 ipa_hdr->num_hdls = 1;
3163 ipa_hdr->commit = 0;
3164 ipa_hdr->hdl[0].hdl = hdrlookup.hdl;
3165 ipa_hdr->hdl[0].status = -1;
3166 ret = ipa_del_hdr(ipa_hdr);
3167 if (ret != 0)
3168 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Delete header failed: %d",
3169 ret);
3170
3171 cdf_mem_free(ipa_hdr);
3172}
3173
3174/**
3175 * hdd_ipa_add_header_info() - Add IPA header for a given interface
3176 * @hdd_ipa: Global HDD IPA context
3177 * @iface_context: Interface-specific HDD IPA context
3178 * @mac_addr: Interface MAC address
3179 *
3180 * Return: 0 on success, negativer errno value on error
3181 */
3182static int hdd_ipa_add_header_info(struct hdd_ipa_priv *hdd_ipa,
3183 struct hdd_ipa_iface_context *iface_context,
3184 uint8_t *mac_addr)
3185{
3186 hdd_adapter_t *adapter = iface_context->adapter;
3187 char *ifname;
3188 struct ipa_ioc_add_hdr *ipa_hdr = NULL;
3189 int ret = -EINVAL;
3190 struct hdd_ipa_tx_hdr *tx_hdr = NULL;
3191 struct hdd_ipa_uc_tx_hdr *uc_tx_hdr = NULL;
3192
3193 ifname = adapter->dev->name;
3194
3195 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Add Partial hdr: %s, %pM",
3196 ifname, mac_addr);
3197
3198 /* dynamically allocate the memory to add the hdrs */
3199 ipa_hdr = cdf_mem_malloc(sizeof(struct ipa_ioc_add_hdr)
3200 + sizeof(struct ipa_hdr_add));
3201 if (!ipa_hdr) {
3202 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3203 "%s: ipa_hdr allocation failed", ifname);
3204 ret = -ENOMEM;
3205 goto end;
3206 }
3207
3208 ipa_hdr->commit = 0;
3209 ipa_hdr->num_hdrs = 1;
3210
3211 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3212 uc_tx_hdr = (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3213 memcpy(uc_tx_hdr, &ipa_uc_tx_hdr, HDD_IPA_UC_WLAN_TX_HDR_LEN);
3214 memcpy(uc_tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3215 uc_tx_hdr->ipa_hd.vdev_id = iface_context->adapter->sessionId;
3216 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
3217 "ifname=%s, vdev_id=%d",
3218 ifname, uc_tx_hdr->ipa_hd.vdev_id);
3219 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3220 ifname, HDD_IPA_IPV4_NAME_EXT);
3221 ipa_hdr->hdr[0].hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
3222 ipa_hdr->hdr[0].type = IPA_HDR_L2_ETHERNET_II;
3223 ipa_hdr->hdr[0].is_partial = 1;
3224 ipa_hdr->hdr[0].hdr_hdl = 0;
3225 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3226 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3227
3228 ret = ipa_add_hdr(ipa_hdr);
3229 } else {
3230 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3231
3232 /* Set the Source MAC */
3233 memcpy(tx_hdr, &ipa_tx_hdr, HDD_IPA_WLAN_TX_HDR_LEN);
3234 memcpy(tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3235
3236 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3237 ifname, HDD_IPA_IPV4_NAME_EXT);
3238 ipa_hdr->hdr[0].hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
3239 ipa_hdr->hdr[0].is_partial = 1;
3240 ipa_hdr->hdr[0].hdr_hdl = 0;
3241 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3242 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3243
3244 /* Set the type to IPV4 in the header */
3245 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IP);
3246
3247 ret = ipa_add_hdr(ipa_hdr);
3248 }
3249 if (ret) {
3250 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s IPv4 add hdr failed: %d",
3251 ifname, ret);
3252 goto end;
3253 }
3254
3255 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv4 hdr_hdl: 0x%x",
3256 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3257
3258 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3259 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3260 ifname, HDD_IPA_IPV6_NAME_EXT);
3261
3262 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3263 uc_tx_hdr =
3264 (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3265 uc_tx_hdr->eth.h_proto = cpu_to_be16(ETH_P_IPV6);
3266 } else {
3267 /* Set the type to IPV6 in the header */
3268 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3269 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IPV6);
3270 }
3271
3272 ret = ipa_add_hdr(ipa_hdr);
3273 if (ret) {
3274 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3275 "%s: IPv6 add hdr failed: %d", ifname, ret);
3276 goto clean_ipv4_hdr;
3277 }
3278
3279 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv6 hdr_hdl: 0x%x",
3280 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3281 }
3282
3283 cdf_mem_free(ipa_hdr);
3284
3285 return ret;
3286
3287clean_ipv4_hdr:
3288 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3289 ifname, HDD_IPA_IPV4_NAME_EXT);
3290 hdd_ipa_remove_header(ipa_hdr->hdr[0].name);
3291end:
3292 if (ipa_hdr)
3293 cdf_mem_free(ipa_hdr);
3294
3295 return ret;
3296}
3297
3298/**
3299 * hdd_ipa_clean_hdr() - Cleanup IPA on a given adapter
3300 * @adapter: Adapter upon which IPA was previously configured
3301 *
3302 * Return: None
3303 */
3304static void hdd_ipa_clean_hdr(hdd_adapter_t *adapter)
3305{
3306 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3307 int ret;
3308 char name_ipa[IPA_RESOURCE_NAME_MAX];
3309
3310 /* Remove the headers */
3311 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3312 adapter->dev->name, HDD_IPA_IPV4_NAME_EXT);
3313 hdd_ipa_remove_header(name_ipa);
3314
3315 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3316 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3317 adapter->dev->name, HDD_IPA_IPV6_NAME_EXT);
3318 hdd_ipa_remove_header(name_ipa);
3319 }
3320 /* unregister the interface with IPA */
3321 ret = ipa_deregister_intf(adapter->dev->name);
3322 if (ret)
3323 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3324 "%s: ipa_deregister_intf fail: %d",
3325 adapter->dev->name, ret);
3326}
3327
3328/**
3329 * hdd_ipa_cleanup_iface() - Cleanup IPA on a given interface
3330 * @iface_context: interface-specific IPA context
3331 *
3332 * Return: None
3333 */
3334static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context)
3335{
3336 if (iface_context == NULL)
3337 return;
3338
3339 hdd_ipa_clean_hdr(iface_context->adapter);
3340
3341 cdf_spin_lock_bh(&iface_context->interface_lock);
3342 iface_context->adapter->ipa_context = NULL;
3343 iface_context->adapter = NULL;
3344 iface_context->tl_context = NULL;
3345 cdf_spin_unlock_bh(&iface_context->interface_lock);
3346 iface_context->ifa_address = 0;
3347 if (!iface_context->hdd_ipa->num_iface) {
3348 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3349 "NUM INTF 0, Invalid");
3350 CDF_ASSERT(0);
3351 }
3352 iface_context->hdd_ipa->num_iface--;
3353}
3354
3355/**
3356 * hdd_ipa_setup_iface() - Setup IPA on a given interface
3357 * @hdd_ipa: HDD IPA global context
3358 * @adapter: Interface upon which IPA is being setup
3359 * @sta_id: Station ID of the API instance
3360 *
3361 * Return: 0 on success, negative errno value on error
3362 */
3363static int hdd_ipa_setup_iface(struct hdd_ipa_priv *hdd_ipa,
3364 hdd_adapter_t *adapter, uint8_t sta_id)
3365{
3366 struct hdd_ipa_iface_context *iface_context = NULL;
3367 void *tl_context = NULL;
3368 int i, ret = 0;
3369
3370 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
3371 * channel change indication. Since these indications are sent by lower
3372 * layer as SAP updates and IPA doesn't have to do anything for these
3373 * updates so ignoring!
3374 */
3375 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
3376 return 0;
3377
3378 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3379 if (hdd_ipa->iface_context[i].adapter == NULL) {
3380 iface_context = &(hdd_ipa->iface_context[i]);
3381 break;
3382 }
3383 }
3384
3385 if (iface_context == NULL) {
3386 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3387 "All the IPA interfaces are in use");
3388 ret = -ENOMEM;
3389 goto end;
3390 }
3391
3392 adapter->ipa_context = iface_context;
3393 iface_context->adapter = adapter;
3394 iface_context->sta_id = sta_id;
3395 tl_context = ol_txrx_get_vdev_by_sta_id(sta_id);
3396
3397 if (tl_context == NULL) {
3398 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3399 "Not able to get TL context sta_id: %d", sta_id);
3400 ret = -EINVAL;
3401 goto end;
3402 }
3403
3404 iface_context->tl_context = tl_context;
3405
3406 ret = hdd_ipa_add_header_info(hdd_ipa, iface_context,
3407 adapter->dev->dev_addr);
3408
3409 if (ret)
3410 goto end;
3411
3412 /* Configure the TX and RX pipes filter rules */
3413 ret = hdd_ipa_register_interface(hdd_ipa, iface_context);
3414 if (ret)
3415 goto cleanup_header;
3416
3417 hdd_ipa->num_iface++;
3418 return ret;
3419
3420cleanup_header:
3421
3422 hdd_ipa_clean_hdr(adapter);
3423end:
3424 if (iface_context)
3425 hdd_ipa_cleanup_iface(iface_context);
3426 return ret;
3427}
3428
3429/**
3430 * hdd_ipa_msg_free_fn() - Free an IPA message
3431 * @buff: pointer to the IPA message
3432 * @len: length of the IPA message
3433 * @type: type of IPA message
3434 *
3435 * Return: None
3436 */
3437static void hdd_ipa_msg_free_fn(void *buff, uint32_t len, uint32_t type)
3438{
3439 hddLog(LOG1, "msg type:%d, len:%d", type, len);
3440 ghdd_ipa->stats.num_free_msg++;
3441 cdf_mem_free(buff);
3442}
3443
3444/**
3445 * hdd_ipa_send_mcc_scc_msg() - send IPA WLAN_SWITCH_TO_MCC/SCC message
3446 * @mcc_mode: 0=MCC/1=SCC
3447 *
3448 * Return: 0 on success, negative errno value on error
3449 */
3450int hdd_ipa_send_mcc_scc_msg(hdd_context_t *pHddCtx, bool mcc_mode)
3451{
3452 hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
3453 CDF_STATUS status;
3454 hdd_adapter_t *pAdapter;
3455 struct ipa_msg_meta meta;
3456 struct ipa_wlan_msg *msg;
3457 int ret;
3458
3459 if (!hdd_ipa_uc_sta_is_enabled(pHddCtx))
3460 return -EINVAL;
3461
3462 if (!pHddCtx->mcc_mode) {
3463 /* Flush TxRx queue for each adapter before switch to SCC */
3464 status = hdd_get_front_adapter(pHddCtx, &adapter_node);
3465 while (NULL != adapter_node && CDF_STATUS_SUCCESS == status) {
3466 pAdapter = adapter_node->pAdapter;
3467 if (pAdapter->device_mode == WLAN_HDD_INFRA_STATION ||
3468 pAdapter->device_mode == WLAN_HDD_SOFTAP) {
3469 hddLog(CDF_TRACE_LEVEL_INFO,
3470 "MCC->SCC: Flush TxRx queue(d_mode=%d)",
3471 pAdapter->device_mode);
3472 hdd_deinit_tx_rx(pAdapter);
3473 }
3474 status = hdd_get_next_adapter(
3475 pHddCtx, adapter_node, &next);
3476 adapter_node = next;
3477 }
3478 }
3479
3480 /* Send SCC/MCC Switching event to IPA */
3481 meta.msg_len = sizeof(*msg);
3482 msg = cdf_mem_malloc(meta.msg_len);
3483 if (msg == NULL) {
3484 hddLog(LOGE, "msg allocation failed");
3485 return -ENOMEM;
3486 }
3487
3488 meta.msg_type = mcc_mode ?
3489 WLAN_SWITCH_TO_MCC : WLAN_SWITCH_TO_SCC;
3490 hddLog(LOG1, "ipa_send_msg(Evt:%d)", meta.msg_type);
3491
3492 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3493
3494 if (ret) {
3495 hddLog(LOGE, "ipa_send_msg(Evt:%d) - fail=%d",
3496 meta.msg_type, ret);
3497 cdf_mem_free(msg);
3498 }
3499
3500 return ret;
3501}
3502
3503/**
3504 * hdd_ipa_wlan_event_to_str() - convert IPA WLAN event to string
3505 * @event: IPA WLAN event to be converted to a string
3506 *
3507 * Return: ASCII string representing the IPA WLAN event
3508 */
3509static inline char *hdd_ipa_wlan_event_to_str(enum ipa_wlan_event event)
3510{
3511 switch (event) {
3512 case WLAN_CLIENT_CONNECT:
3513 return "WLAN_CLIENT_CONNECT";
3514 case WLAN_CLIENT_DISCONNECT:
3515 return "WLAN_CLIENT_DISCONNECT";
3516 case WLAN_CLIENT_POWER_SAVE_MODE:
3517 return "WLAN_CLIENT_POWER_SAVE_MODE";
3518 case WLAN_CLIENT_NORMAL_MODE:
3519 return "WLAN_CLIENT_NORMAL_MODE";
3520 case SW_ROUTING_ENABLE:
3521 return "SW_ROUTING_ENABLE";
3522 case SW_ROUTING_DISABLE:
3523 return "SW_ROUTING_DISABLE";
3524 case WLAN_AP_CONNECT:
3525 return "WLAN_AP_CONNECT";
3526 case WLAN_AP_DISCONNECT:
3527 return "WLAN_AP_DISCONNECT";
3528 case WLAN_STA_CONNECT:
3529 return "WLAN_STA_CONNECT";
3530 case WLAN_STA_DISCONNECT:
3531 return "WLAN_STA_DISCONNECT";
3532 case WLAN_CLIENT_CONNECT_EX:
3533 return "WLAN_CLIENT_CONNECT_EX";
3534
3535 case IPA_WLAN_EVENT_MAX:
3536 default:
3537 return "UNKNOWN";
3538 }
3539}
3540
3541/**
3542 * hdd_ipa_wlan_evt() - IPA event handler
3543 * @adapter: adapter upon which the event was received
3544 * @sta_id: station id for the event
3545 * @type: the event type
3546 * @mac_address: MAC address associated with the event
3547 *
3548 * Return: 0 on success, negative errno value on error
3549 */
3550int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
3551 enum ipa_wlan_event type, uint8_t *mac_addr)
3552{
3553 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3554 struct ipa_msg_meta meta;
3555 struct ipa_wlan_msg *msg;
3556 struct ipa_wlan_msg_ex *msg_ex = NULL;
3557 int ret;
3558
3559 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: %s evt, MAC: %pM sta_id: %d",
3560 adapter->dev->name, hdd_ipa_wlan_event_to_str(type),
3561 mac_addr, sta_id);
3562
3563 if (type >= IPA_WLAN_EVENT_MAX)
3564 return -EINVAL;
3565
3566 if (WARN_ON(is_zero_ether_addr(mac_addr)))
3567 return -EINVAL;
3568
3569 if (!hdd_ipa || !hdd_ipa_is_enabled(hdd_ipa->hdd_ctx)) {
3570 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "IPA OFFLOAD NOT ENABLED");
3571 return -EINVAL;
3572 }
3573
3574 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3575 !hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
3576 (WLAN_HDD_SOFTAP != adapter->device_mode)) {
3577 return 0;
3578 }
3579
3580 /*
3581 * During IPA UC resource loading/unloading new events can be issued.
3582 * Store the events separately and handle them later.
3583 */
3584 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3585 ((hdd_ipa->resource_loading) ||
3586 (hdd_ipa->resource_unloading))) {
Yun Parkf19e07d2015-11-20 11:34:27 -08003587 unsigned int pending_event_count;
3588 struct ipa_uc_pending_event *pending_event = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003589
Yun Parkf19e07d2015-11-20 11:34:27 -08003590 hdd_err("IPA resource %s inprogress",
3591 hdd_ipa->resource_loading ? "load":"unload");
3592
3593 cdf_mutex_acquire(&hdd_ipa->event_lock);
3594
3595 cdf_list_size(&hdd_ipa->pending_event, &pending_event_count);
3596 if (pending_event_count >= HDD_IPA_MAX_PENDING_EVENT_COUNT) {
3597 hdd_notice("Reached max pending event count");
3598 cdf_list_remove_front(&hdd_ipa->pending_event,
3599 (cdf_list_node_t **)&pending_event);
3600 } else {
3601 pending_event =
3602 (struct ipa_uc_pending_event *)cdf_mem_malloc(
3603 sizeof(struct ipa_uc_pending_event));
3604 }
3605
3606 if (!pending_event) {
3607 hdd_err("Pending event memory alloc fail");
3608 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003609 return -ENOMEM;
3610 }
Yun Parkf19e07d2015-11-20 11:34:27 -08003611
3612 pending_event->adapter = adapter;
3613 pending_event->sta_id = sta_id;
3614 pending_event->type = type;
3615 cdf_mem_copy(pending_event->mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003616 mac_addr,
3617 CDF_MAC_ADDR_SIZE);
3618 cdf_list_insert_back(&hdd_ipa->pending_event,
Yun Parkf19e07d2015-11-20 11:34:27 -08003619 &pending_event->node);
3620
3621 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003622 return 0;
3623 }
3624
3625 hdd_ipa->stats.event[type]++;
3626
Leo Chang3bc8fed2015-11-13 10:59:47 -08003627 meta.msg_type = type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003628 switch (type) {
3629 case WLAN_STA_CONNECT:
3630 /* STA already connected and without disconnect, connect again
3631 * This is Roaming scenario
3632 */
3633 if (hdd_ipa->sta_connected)
3634 hdd_ipa_cleanup_iface(adapter->ipa_context);
3635
3636 if ((hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) &&
3637 (!hdd_ipa->sta_connected))
3638 hdd_ipa_uc_offload_enable_disable(adapter,
3639 SIR_STA_RX_DATA_OFFLOAD, 1);
3640
3641 cdf_mutex_acquire(&hdd_ipa->event_lock);
3642
3643 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3644 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3645 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3646 msg_ex->name, meta.msg_type);
3647 } else if ((!hdd_ipa->sap_num_connected_sta) &&
3648 (!hdd_ipa->sta_connected)) {
3649 /* Enable IPA UC TX PIPE when STA connected */
3650 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
Yun Park4cab6ee2015-10-27 11:43:40 -07003651 if (ret) {
3652 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003653 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3654 "handle 1st con ret %d", ret);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003655 hdd_ipa_uc_offload_enable_disable(adapter,
3656 SIR_STA_RX_DATA_OFFLOAD, 0);
3657 goto end;
3658 }
3659 }
3660 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3661 if (ret) {
3662 cdf_mutex_release(&hdd_ipa->event_lock);
3663 hdd_ipa_uc_offload_enable_disable(adapter,
3664 SIR_STA_RX_DATA_OFFLOAD, 0);
3665 goto end;
3666
3667#ifdef IPA_UC_OFFLOAD
3668 vdev_to_iface[adapter->sessionId] =
3669 ((struct hdd_ipa_iface_context *)
3670 (adapter->ipa_context))->iface_id;
3671#endif /* IPA_UC_OFFLOAD */
3672 }
3673
3674 cdf_mutex_release(&hdd_ipa->event_lock);
3675
3676 hdd_ipa->sta_connected = 1;
3677 break;
3678
3679 case WLAN_AP_CONNECT:
3680 /* For DFS channel we get two start_bss event (before and after
3681 * CAC). Also when ACS range includes both DFS and non DFS
3682 * channels, we could possibly change channel many times due to
3683 * RADAR detection and chosen channel may not be a DFS channels.
3684 * So dont return error here. Just discard the event.
3685 */
3686 if (adapter->ipa_context)
3687 return 0;
3688
3689 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3690 hdd_ipa_uc_offload_enable_disable(adapter,
3691 SIR_AP_RX_DATA_OFFLOAD, 1);
3692 }
3693 cdf_mutex_acquire(&hdd_ipa->event_lock);
3694 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3695 if (ret) {
3696 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3697 "%s: Evt: %d, Interface setup failed",
3698 msg_ex->name, meta.msg_type);
3699 cdf_mutex_release(&hdd_ipa->event_lock);
3700 goto end;
3701
3702#ifdef IPA_UC_OFFLOAD
3703 vdev_to_iface[adapter->sessionId] =
3704 ((struct hdd_ipa_iface_context *)
3705 (adapter->ipa_context))->iface_id;
3706#endif /* IPA_UC_OFFLOAD */
3707 }
3708 cdf_mutex_release(&hdd_ipa->event_lock);
3709 break;
3710
3711 case WLAN_STA_DISCONNECT:
3712 cdf_mutex_acquire(&hdd_ipa->event_lock);
3713 hdd_ipa_cleanup_iface(adapter->ipa_context);
3714
3715 if (!hdd_ipa->sta_connected) {
3716 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3717 "%s: Evt: %d, STA already disconnected",
3718 msg_ex->name, meta.msg_type);
3719 cdf_mutex_release(&hdd_ipa->event_lock);
3720 return -EINVAL;
3721 }
3722 hdd_ipa->sta_connected = 0;
3723 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3724 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3725 "%s: IPA UC OFFLOAD NOT ENABLED",
3726 msg_ex->name);
3727 } else {
3728 /* Disable IPA UC TX PIPE when STA disconnected */
3729 if ((!hdd_ipa->sap_num_connected_sta) ||
3730 ((!hdd_ipa->num_iface) &&
3731 (HDD_IPA_UC_NUM_WDI_PIPE ==
3732 hdd_ipa->activated_fw_pipe))) {
3733 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3734 }
3735 }
3736
3737 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
3738 hdd_ipa_uc_offload_enable_disable(adapter,
3739 SIR_STA_RX_DATA_OFFLOAD, 0);
3740 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3741 }
3742
3743 cdf_mutex_release(&hdd_ipa->event_lock);
3744 break;
3745
3746 case WLAN_AP_DISCONNECT:
3747 if (!adapter->ipa_context) {
3748 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3749 "%s: Evt: %d, SAP already disconnected",
3750 msg_ex->name, meta.msg_type);
3751 return -EINVAL;
3752 }
3753
3754 cdf_mutex_acquire(&hdd_ipa->event_lock);
3755 hdd_ipa_cleanup_iface(adapter->ipa_context);
3756 if ((!hdd_ipa->num_iface) &&
3757 (HDD_IPA_UC_NUM_WDI_PIPE ==
3758 hdd_ipa->activated_fw_pipe)) {
3759 if (hdd_ipa->hdd_ctx->isUnloadInProgress) {
3760 /*
3761 * We disable WDI pipes directly here since
3762 * IPA_OPCODE_TX/RX_SUSPEND message will not be
3763 * processed when unloading WLAN driver is in
3764 * progress
3765 */
3766 hdd_ipa_uc_disable_pipes(hdd_ipa);
3767 } else {
3768 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3769 "NO INTF left but still pipe clean up");
3770 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3771 }
3772 }
3773
3774 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3775 hdd_ipa_uc_offload_enable_disable(adapter,
3776 SIR_AP_RX_DATA_OFFLOAD, 0);
3777 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3778 }
3779 cdf_mutex_release(&hdd_ipa->event_lock);
3780 break;
3781
3782 case WLAN_CLIENT_CONNECT_EX:
3783 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%d %d",
3784 adapter->dev->ifindex, sta_id);
3785
3786 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3787 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3788 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3789 adapter->dev->name, meta.msg_type);
3790 return 0;
3791 }
3792
3793 cdf_mutex_acquire(&hdd_ipa->event_lock);
3794 if (hdd_ipa_uc_find_add_assoc_sta(hdd_ipa,
3795 true, sta_id)) {
3796 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3797 "%s: STA ID %d found, not valid",
3798 adapter->dev->name, sta_id);
3799 cdf_mutex_release(&hdd_ipa->event_lock);
3800 return 0;
3801 }
Yun Park312f71a2015-12-08 10:22:42 -08003802
3803 /* Enable IPA UC Data PIPEs when first STA connected */
3804 if ((0 == hdd_ipa->sap_num_connected_sta) &&
3805 (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) ||
3806 !hdd_ipa->sta_connected)) {
3807 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
3808 if (ret) {
3809 cdf_mutex_release(&hdd_ipa->event_lock);
3810 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3811 "%s: handle 1st con ret %d",
3812 adapter->dev->name, ret);
3813 return ret;
3814 }
3815 }
3816
3817 hdd_ipa->sap_num_connected_sta++;
3818 hdd_ipa->pending_cons_req = false;
3819
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003820 cdf_mutex_release(&hdd_ipa->event_lock);
3821
3822 meta.msg_type = type;
3823 meta.msg_len = (sizeof(struct ipa_wlan_msg_ex) +
3824 sizeof(struct ipa_wlan_hdr_attrib_val));
3825 msg_ex = cdf_mem_malloc(meta.msg_len);
3826
3827 if (msg_ex == NULL) {
3828 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3829 "msg_ex allocation failed");
3830 return -ENOMEM;
3831 }
3832 strlcpy(msg_ex->name, adapter->dev->name,
3833 IPA_RESOURCE_NAME_MAX);
3834 msg_ex->num_of_attribs = 1;
3835 msg_ex->attribs[0].attrib_type = WLAN_HDR_ATTRIB_MAC_ADDR;
3836 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3837 msg_ex->attribs[0].offset =
3838 HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3839 } else {
3840 msg_ex->attribs[0].offset =
3841 HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3842 }
3843 memcpy(msg_ex->attribs[0].u.mac_addr, mac_addr,
3844 IPA_MAC_ADDR_SIZE);
3845
3846 ret = ipa_send_msg(&meta, msg_ex, hdd_ipa_msg_free_fn);
3847
3848 if (ret) {
3849 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d : %d",
3850 msg_ex->name, meta.msg_type, ret);
3851 cdf_mem_free(msg_ex);
3852 return ret;
3853 }
3854 hdd_ipa->stats.num_send_msg++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003855 return ret;
3856
3857 case WLAN_CLIENT_DISCONNECT:
3858 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3859 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3860 "%s: IPA UC OFFLOAD NOT ENABLED",
3861 msg_ex->name);
3862 return 0;
3863 }
3864
3865 cdf_mutex_acquire(&hdd_ipa->event_lock);
3866 if (!hdd_ipa_uc_find_add_assoc_sta(hdd_ipa, false, sta_id)) {
3867 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3868 "%s: STA ID %d NOT found, not valid",
3869 msg_ex->name, sta_id);
3870 cdf_mutex_release(&hdd_ipa->event_lock);
3871 return 0;
3872 }
3873 hdd_ipa->sap_num_connected_sta--;
3874 /* Disable IPA UC TX PIPE when last STA disconnected */
3875 if (!hdd_ipa->sap_num_connected_sta
3876 && (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) ||
3877 !hdd_ipa->sta_connected)
3878 && (false == hdd_ipa->resource_unloading)
3879 && (HDD_IPA_UC_NUM_WDI_PIPE ==
3880 hdd_ipa->activated_fw_pipe))
3881 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3882 cdf_mutex_release(&hdd_ipa->event_lock);
3883 break;
3884
3885 default:
3886 return 0;
3887 }
3888
3889 meta.msg_len = sizeof(struct ipa_wlan_msg);
3890 msg = cdf_mem_malloc(meta.msg_len);
3891 if (msg == NULL) {
3892 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "msg allocation failed");
3893 return -ENOMEM;
3894 }
3895
3896 meta.msg_type = type;
3897 strlcpy(msg->name, adapter->dev->name, IPA_RESOURCE_NAME_MAX);
3898 memcpy(msg->mac_addr, mac_addr, ETH_ALEN);
3899
3900 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d",
3901 msg->name, meta.msg_type);
3902
3903 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3904
3905 if (ret) {
3906 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d fail:%d",
3907 msg->name, meta.msg_type, ret);
3908 cdf_mem_free(msg);
3909 return ret;
3910 }
3911
3912 hdd_ipa->stats.num_send_msg++;
3913
3914end:
3915 return ret;
3916}
3917
3918/**
3919 * hdd_ipa_rm_state_to_str() - Convert IPA RM state to string
3920 * @state: IPA RM state value
3921 *
3922 * Return: ASCII string representing the IPA RM state
3923 */
3924static inline char *hdd_ipa_rm_state_to_str(enum hdd_ipa_rm_state state)
3925{
3926 switch (state) {
3927 case HDD_IPA_RM_RELEASED:
3928 return "RELEASED";
3929 case HDD_IPA_RM_GRANT_PENDING:
3930 return "GRANT_PENDING";
3931 case HDD_IPA_RM_GRANTED:
3932 return "GRANTED";
3933 }
3934
3935 return "UNKNOWN";
3936}
3937
3938/**
3939 * hdd_ipa_init() - IPA initialization function
3940 * @hdd_ctx: HDD global context
3941 *
3942 * Allocate hdd_ipa resources, ipa pipe resource and register
3943 * wlan interface with IPA module.
3944 *
3945 * Return: CDF_STATUS enumeration
3946 */
3947CDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx)
3948{
3949 struct hdd_ipa_priv *hdd_ipa = NULL;
3950 int ret, i;
3951 struct hdd_ipa_iface_context *iface_context = NULL;
3952
3953 if (!hdd_ipa_is_enabled(hdd_ctx))
3954 return CDF_STATUS_SUCCESS;
3955
3956 hdd_ipa = cdf_mem_malloc(sizeof(*hdd_ipa));
3957 if (!hdd_ipa) {
3958 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL, "hdd_ipa allocation failed");
Leo Chang3bc8fed2015-11-13 10:59:47 -08003959 goto fail_return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003960 }
3961
3962 hdd_ctx->hdd_ipa = hdd_ipa;
3963 ghdd_ipa = hdd_ipa;
3964 hdd_ipa->hdd_ctx = hdd_ctx;
3965 hdd_ipa->num_iface = 0;
Leo Chang3bc8fed2015-11-13 10:59:47 -08003966 ol_txrx_ipa_uc_get_resource(cds_get_context(CDF_MODULE_ID_TXRX),
3967 &hdd_ipa->ce_sr_base_paddr,
3968 &hdd_ipa->ce_sr_ring_size,
3969 &hdd_ipa->ce_reg_paddr,
3970 &hdd_ipa->tx_comp_ring_base_paddr,
3971 &hdd_ipa->tx_comp_ring_size,
3972 &hdd_ipa->tx_num_alloc_buffer,
3973 &hdd_ipa->rx_rdy_ring_base_paddr,
3974 &hdd_ipa->rx_rdy_ring_size,
3975 &hdd_ipa->rx_proc_done_idx_paddr,
3976 &hdd_ipa->rx_proc_done_idx_vaddr,
3977 &hdd_ipa->rx2_rdy_ring_base_paddr,
3978 &hdd_ipa->rx2_rdy_ring_size,
3979 &hdd_ipa->rx2_proc_done_idx_paddr,
3980 &hdd_ipa->rx2_proc_done_idx_vaddr);
3981 if ((0 == hdd_ipa->ce_sr_base_paddr) ||
3982 (0 == hdd_ipa->tx_comp_ring_base_paddr) ||
3983 (0 == hdd_ipa->rx_rdy_ring_base_paddr) ||
3984 (0 == hdd_ipa->rx2_rdy_ring_base_paddr)) {
3985 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
3986 "IPA UC resource alloc fail");
3987 goto fail_get_resource;
3988 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003989
3990 /* Create the interface context */
3991 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3992 iface_context = &hdd_ipa->iface_context[i];
3993 iface_context->hdd_ipa = hdd_ipa;
3994 iface_context->cons_client =
3995 hdd_ipa_adapter_2_client[i].cons_client;
3996 iface_context->prod_client =
3997 hdd_ipa_adapter_2_client[i].prod_client;
3998 iface_context->iface_id = i;
3999 iface_context->adapter = NULL;
4000 cdf_spinlock_init(&iface_context->interface_lock);
4001 }
4002
4003#ifdef CONFIG_CNSS
4004 cnss_init_work(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
4005#else
4006 INIT_WORK(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
4007#endif
4008 cdf_spinlock_init(&hdd_ipa->pm_lock);
4009 cdf_nbuf_queue_init(&hdd_ipa->pm_queue_head);
4010
4011 ret = hdd_ipa_setup_rm(hdd_ipa);
4012 if (ret)
4013 goto fail_setup_rm;
4014
4015 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
4016 hdd_ipa_uc_rt_debug_init(hdd_ctx);
4017 cdf_mem_zero(&hdd_ipa->stats, sizeof(hdd_ipa->stats));
4018 hdd_ipa->sap_num_connected_sta = 0;
4019 hdd_ipa->ipa_tx_packets_diff = 0;
4020 hdd_ipa->ipa_rx_packets_diff = 0;
4021 hdd_ipa->ipa_p_tx_packets = 0;
4022 hdd_ipa->ipa_p_rx_packets = 0;
4023 hdd_ipa->resource_loading = false;
4024 hdd_ipa->resource_unloading = false;
4025 hdd_ipa->sta_connected = 0;
Leo Change3e49442015-10-26 20:07:13 -07004026 hdd_ipa->ipa_pipes_down = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004027 /* Setup IPA sys_pipe for MCC */
4028 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
4029 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
4030 if (ret)
4031 goto fail_create_sys_pipe;
4032 }
4033 hdd_ipa_uc_ol_init(hdd_ctx);
4034 } else {
4035 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
4036 if (ret)
4037 goto fail_create_sys_pipe;
4038 }
4039
4040 return CDF_STATUS_SUCCESS;
4041
4042fail_create_sys_pipe:
4043 hdd_ipa_destroy_rm_resource(hdd_ipa);
4044fail_setup_rm:
Leo Chang3bc8fed2015-11-13 10:59:47 -08004045 cdf_spinlock_destroy(&hdd_ipa->pm_lock);
4046fail_get_resource:
4047 cdf_mem_free(hdd_ipa);
4048 hdd_ctx->hdd_ipa = NULL;
4049 ghdd_ipa = NULL;
4050fail_return:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004051 return CDF_STATUS_E_FAILURE;
4052}
4053
4054/**
Yun Parkf19e07d2015-11-20 11:34:27 -08004055 * hdd_ipa_cleanup_pending_event() - Cleanup IPA pending event list
4056 * @hdd_ipa: pointer to HDD IPA struct
4057 *
4058 * Return: none
4059 */
4060void hdd_ipa_cleanup_pending_event(struct hdd_ipa_priv *hdd_ipa)
4061{
4062 struct ipa_uc_pending_event *pending_event = NULL;
4063
4064 while (cdf_list_remove_front(&hdd_ipa->pending_event,
4065 (cdf_list_node_t **)&pending_event) == CDF_STATUS_SUCCESS) {
4066 cdf_mem_free(pending_event);
4067 }
4068
4069 cdf_list_destroy(&hdd_ipa->pending_event);
4070}
4071
4072/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004073 * hdd_ipa_cleanup - IPA cleanup function
4074 * @hdd_ctx: HDD global context
4075 *
4076 * Return: CDF_STATUS enumeration
4077 */
4078CDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx)
4079{
4080 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
4081 int i;
4082 struct hdd_ipa_iface_context *iface_context = NULL;
4083 cdf_nbuf_t skb;
4084 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
4085
4086 if (!hdd_ipa_is_enabled(hdd_ctx))
4087 return CDF_STATUS_SUCCESS;
4088
4089 if (!hdd_ipa_uc_is_enabled(hdd_ctx)) {
4090 unregister_inetaddr_notifier(&hdd_ipa->ipv4_notifier);
4091 hdd_ipa_teardown_sys_pipe(hdd_ipa);
4092 }
4093
4094 /* Teardown IPA sys_pipe for MCC */
4095 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx))
4096 hdd_ipa_teardown_sys_pipe(hdd_ipa);
4097
4098 hdd_ipa_destroy_rm_resource(hdd_ipa);
4099
4100#ifdef WLAN_OPEN_SOURCE
4101 cancel_work_sync(&hdd_ipa->pm_work);
4102#endif
4103
4104 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
4105
4106 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
4107 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
4108
4109 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
4110 ipa_free_skb(pm_tx_cb->ipa_tx_desc);
4111
4112 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
4113 }
4114 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
4115
4116 cdf_spinlock_destroy(&hdd_ipa->pm_lock);
4117
4118 /* destory the interface lock */
4119 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
4120 iface_context = &hdd_ipa->iface_context[i];
4121 cdf_spinlock_destroy(&iface_context->interface_lock);
4122 }
4123
4124 /* This should never hit but still make sure that there are no pending
4125 * descriptor in IPA hardware
4126 */
4127 if (hdd_ipa->pending_hw_desc_cnt != 0) {
4128 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
4129 "IPA Pending write done: %d Waiting!",
4130 hdd_ipa->pending_hw_desc_cnt);
4131
4132 for (i = 0; hdd_ipa->pending_hw_desc_cnt != 0 && i < 10; i++) {
4133 usleep_range(100, 100);
4134 }
4135
4136 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
4137 "IPA Pending write done: desc: %d %s(%d)!",
4138 hdd_ipa->pending_hw_desc_cnt,
4139 hdd_ipa->pending_hw_desc_cnt == 0 ? "completed"
4140 : "leak", i);
4141 }
4142 if (hdd_ipa_uc_is_enabled(hdd_ctx)) {
4143 hdd_ipa_uc_rt_debug_deinit(hdd_ctx);
4144 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
4145 "%s: Disconnect TX PIPE", __func__);
4146 ipa_disconnect_wdi_pipe(hdd_ipa->tx_pipe_handle);
4147 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
4148 "%s: Disconnect RX PIPE", __func__);
4149 ipa_disconnect_wdi_pipe(hdd_ipa->rx_pipe_handle);
4150 cdf_mutex_destroy(&hdd_ipa->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08004151 cdf_mutex_destroy(&hdd_ipa->ipa_lock);
Yun Parkf19e07d2015-11-20 11:34:27 -08004152 hdd_ipa_cleanup_pending_event(hdd_ipa);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004153
4154#ifdef WLAN_OPEN_SOURCE
4155 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
4156 cancel_work_sync(&hdd_ipa->uc_op_work[i].work);
4157 hdd_ipa->uc_op_work[i].msg = NULL;
4158 }
4159#endif
4160 }
4161
4162 cdf_mem_free(hdd_ipa);
4163 hdd_ctx->hdd_ipa = NULL;
4164
4165 return CDF_STATUS_SUCCESS;
4166}
4167#endif /* IPA_OFFLOAD */