blob: 5d6cb265c65c067aaca21f195cc47f683bba7add [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Prashanth Bhatta9e143052015-12-04 11:56:47 -08002 * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wlan_hdd_ipa.c
30 *
31 * WLAN HDD and ipa interface implementation
32 * Originally written by Qualcomm Atheros, Inc
33 */
34
35#ifdef IPA_OFFLOAD
36
37/* Include Files */
38#include <wlan_hdd_includes.h>
39#include <wlan_hdd_ipa.h>
40
41#include <linux/etherdevice.h>
42#include <linux/atomic.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/debugfs.h>
47#include <linux/inetdevice.h>
48#include <linux/ip.h>
49#include <wlan_hdd_softap_tx_rx.h>
50#include <ol_txrx_osif_api.h>
51
52#include "cds_sched.h"
53
54#include "wma.h"
55#include "wma_api.h"
56
57#define HDD_IPA_DESC_BUFFER_RATIO 4
58#define HDD_IPA_IPV4_NAME_EXT "_ipv4"
59#define HDD_IPA_IPV6_NAME_EXT "_ipv6"
60
61#define HDD_IPA_RX_INACTIVITY_MSEC_DELAY 1000
62#define HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET 12
63#define HDD_IPA_UC_WLAN_8023_HDR_SIZE 14
64/* WDI TX and RX PIPE */
65#define HDD_IPA_UC_NUM_WDI_PIPE 2
66#define HDD_IPA_UC_MAX_PENDING_EVENT 33
67
68#define HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE 32000
69#define HDD_IPA_UC_RT_DEBUG_PERIOD 300
70#define HDD_IPA_UC_RT_DEBUG_BUF_COUNT 30
71#define HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL 10000
72
73#define HDD_IPA_WLAN_HDR_DES_MAC_OFFSET 0
74#define HDD_IPA_MAX_IFACE 3
75#define HDD_IPA_MAX_SYSBAM_PIPE 4
76#define HDD_IPA_RX_PIPE HDD_IPA_MAX_IFACE
77#define HDD_IPA_ENABLE_MASK BIT(0)
78#define HDD_IPA_PRE_FILTER_ENABLE_MASK BIT(1)
79#define HDD_IPA_IPV6_ENABLE_MASK BIT(2)
80#define HDD_IPA_RM_ENABLE_MASK BIT(3)
81#define HDD_IPA_CLK_SCALING_ENABLE_MASK BIT(4)
82#define HDD_IPA_UC_ENABLE_MASK BIT(5)
83#define HDD_IPA_UC_STA_ENABLE_MASK BIT(6)
84#define HDD_IPA_REAL_TIME_DEBUGGING BIT(8)
85
Yun Parkf19e07d2015-11-20 11:34:27 -080086#define HDD_IPA_MAX_PENDING_EVENT_COUNT 20
87
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080088typedef enum {
89 HDD_IPA_UC_OPCODE_TX_SUSPEND = 0,
90 HDD_IPA_UC_OPCODE_TX_RESUME = 1,
91 HDD_IPA_UC_OPCODE_RX_SUSPEND = 2,
92 HDD_IPA_UC_OPCODE_RX_RESUME = 3,
93 HDD_IPA_UC_OPCODE_STATS = 4,
94 /* keep this last */
95 HDD_IPA_UC_OPCODE_MAX
96} hdd_ipa_uc_op_code;
97
98/**
99 * enum - Reason codes for stat query
100 *
101 * @HDD_IPA_UC_STAT_REASON_NONE: Initial value
102 * @HDD_IPA_UC_STAT_REASON_DEBUG: For debug/info
103 * @HDD_IPA_UC_STAT_REASON_BW_CAL: For bandwidth calibration
104 */
105enum {
106 HDD_IPA_UC_STAT_REASON_NONE,
107 HDD_IPA_UC_STAT_REASON_DEBUG,
108 HDD_IPA_UC_STAT_REASON_BW_CAL
109};
110
111/**
112 * enum hdd_ipa_rm_state - IPA resource manager state
113 * @HDD_IPA_RM_RELEASED: PROD pipe resource released
114 * @HDD_IPA_RM_GRANT_PENDING: PROD pipe resource requested but not granted yet
115 * @HDD_IPA_RM_GRANTED: PROD pipe resource granted
116 */
117enum hdd_ipa_rm_state {
118 HDD_IPA_RM_RELEASED,
119 HDD_IPA_RM_GRANT_PENDING,
120 HDD_IPA_RM_GRANTED,
121};
122
123struct llc_snap_hdr {
124 uint8_t dsap;
125 uint8_t ssap;
126 uint8_t resv[4];
127 __be16 eth_type;
128} __packed;
129
Leo Chang3bc8fed2015-11-13 10:59:47 -0800130/**
131 * struct hdd_ipa_tx_hdr - header type which IPA should handle to TX packet
132 * @eth: ether II header
133 * @llc_snap: LLC snap header
134 *
135 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136struct hdd_ipa_tx_hdr {
137 struct ethhdr eth;
138 struct llc_snap_hdr llc_snap;
139} __packed;
140
Leo Chang3bc8fed2015-11-13 10:59:47 -0800141/**
142 * struct frag_header - fragment header type registered to IPA hardware
143 * @length: fragment length
144 * @reserved1: Reserved not used
145 * @reserved2: Reserved not used
146 *
147 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800148struct frag_header {
Leo Chang3bc8fed2015-11-13 10:59:47 -0800149 uint16_t length;
150 uint32_t reserved1;
151 uint32_t reserved2;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800152} __packed;
153
Leo Chang3bc8fed2015-11-13 10:59:47 -0800154/**
155 * struct ipa_header - ipa header type registered to IPA hardware
156 * @vdev_id: vdev id
157 * @reserved: Reserved not used
158 *
159 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800160struct ipa_header {
161 uint32_t
162 vdev_id:8, /* vdev_id field is LSB of IPA DESC */
163 reserved:24;
164} __packed;
165
Leo Chang3bc8fed2015-11-13 10:59:47 -0800166/**
167 * struct hdd_ipa_uc_tx_hdr - full tx header registered to IPA hardware
168 * @frag_hd: fragment header
169 * @ipa_hd: ipa header
170 * @eth: ether II header
171 *
172 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173struct hdd_ipa_uc_tx_hdr {
174 struct frag_header frag_hd;
175 struct ipa_header ipa_hd;
176 struct ethhdr eth;
177} __packed;
178
179#define HDD_IPA_WLAN_FRAG_HEADER sizeof(struct frag_header)
180#define HDD_IPA_WLAN_IPA_HEADER sizeof(struct frag_header)
181
182/**
183 * struct hdd_ipa_cld_hdr - IPA CLD Header
184 * @reserved: reserved fields
185 * @iface_id: interface ID
186 * @sta_id: Station ID
187 *
188 * Packed 32-bit structure
189 * +----------+----------+--------------+--------+
190 * | Reserved | QCMAP ID | interface id | STA ID |
191 * +----------+----------+--------------+--------+
192 */
193struct hdd_ipa_cld_hdr {
194 uint8_t reserved[2];
195 uint8_t iface_id;
196 uint8_t sta_id;
197} __packed;
198
199struct hdd_ipa_rx_hdr {
200 struct hdd_ipa_cld_hdr cld_hdr;
201 struct ethhdr eth;
202} __packed;
203
204struct hdd_ipa_pm_tx_cb {
205 struct hdd_ipa_iface_context *iface_context;
206 struct ipa_rx_data *ipa_tx_desc;
207};
208
209struct hdd_ipa_uc_rx_hdr {
210 struct ethhdr eth;
211} __packed;
212
213struct hdd_ipa_sys_pipe {
214 uint32_t conn_hdl;
215 uint8_t conn_hdl_valid;
216 struct ipa_sys_connect_params ipa_sys_params;
217};
218
219struct hdd_ipa_iface_stats {
220 uint64_t num_tx;
221 uint64_t num_tx_drop;
222 uint64_t num_tx_err;
223 uint64_t num_tx_cac_drop;
224 uint64_t num_rx_prefilter;
225 uint64_t num_rx_ipa_excep;
226 uint64_t num_rx_recv;
227 uint64_t num_rx_recv_mul;
228 uint64_t num_rx_send_desc_err;
229 uint64_t max_rx_mul;
230};
231
232struct hdd_ipa_priv;
233
234struct hdd_ipa_iface_context {
235 struct hdd_ipa_priv *hdd_ipa;
236 hdd_adapter_t *adapter;
237 void *tl_context;
238
239 enum ipa_client_type cons_client;
240 enum ipa_client_type prod_client;
241
242 uint8_t iface_id; /* This iface ID */
243 uint8_t sta_id; /* This iface station ID */
244 cdf_spinlock_t interface_lock;
245 uint32_t ifa_address;
246 struct hdd_ipa_iface_stats stats;
247};
248
249struct hdd_ipa_stats {
250 uint32_t event[IPA_WLAN_EVENT_MAX];
251 uint64_t num_send_msg;
252 uint64_t num_free_msg;
253
254 uint64_t num_rm_grant;
255 uint64_t num_rm_release;
256 uint64_t num_rm_grant_imm;
257 uint64_t num_cons_perf_req;
258 uint64_t num_prod_perf_req;
259
260 uint64_t num_rx_drop;
261 uint64_t num_rx_ipa_tx_dp;
262 uint64_t num_rx_ipa_splice;
263 uint64_t num_rx_ipa_loop;
264 uint64_t num_rx_ipa_tx_dp_err;
265 uint64_t num_rx_ipa_write_done;
266 uint64_t num_max_ipa_tx_mul;
267 uint64_t num_rx_ipa_hw_maxed_out;
268 uint64_t max_pend_q_cnt;
269
270 uint64_t num_tx_comp_cnt;
271 uint64_t num_tx_queued;
272 uint64_t num_tx_dequeued;
273 uint64_t num_max_pm_queue;
274
275 uint64_t num_freeq_empty;
276 uint64_t num_pri_freeq_empty;
277 uint64_t num_rx_excep;
278 uint64_t num_tx_bcmc;
279 uint64_t num_tx_bcmc_err;
280};
281
282struct ipa_uc_stas_map {
283 bool is_reserved;
284 uint8_t sta_id;
285};
286struct op_msg_type {
287 uint8_t msg_t;
288 uint8_t rsvd;
289 uint16_t op_code;
290 uint16_t len;
291 uint16_t rsvd_snd;
292};
293
294struct ipa_uc_fw_stats {
295 uint32_t tx_comp_ring_base;
296 uint32_t tx_comp_ring_size;
297 uint32_t tx_comp_ring_dbell_addr;
298 uint32_t tx_comp_ring_dbell_ind_val;
299 uint32_t tx_comp_ring_dbell_cached_val;
300 uint32_t tx_pkts_enqueued;
301 uint32_t tx_pkts_completed;
302 uint32_t tx_is_suspend;
303 uint32_t tx_reserved;
304 uint32_t rx_ind_ring_base;
305 uint32_t rx_ind_ring_size;
306 uint32_t rx_ind_ring_dbell_addr;
307 uint32_t rx_ind_ring_dbell_ind_val;
308 uint32_t rx_ind_ring_dbell_ind_cached_val;
309 uint32_t rx_ind_ring_rdidx_addr;
310 uint32_t rx_ind_ring_rd_idx_cached_val;
311 uint32_t rx_refill_idx;
312 uint32_t rx_num_pkts_indicated;
313 uint32_t rx_buf_refilled;
314 uint32_t rx_num_ind_drop_no_space;
315 uint32_t rx_num_ind_drop_no_buf;
316 uint32_t rx_is_suspend;
317 uint32_t rx_reserved;
318};
319
320struct ipa_uc_pending_event {
321 cdf_list_node_t node;
322 hdd_adapter_t *adapter;
323 enum ipa_wlan_event type;
324 uint8_t sta_id;
325 uint8_t mac_addr[CDF_MAC_ADDR_SIZE];
326};
327
328/**
329 * struct uc_rm_work_struct
330 * @work: uC RM work
331 * @event: IPA RM event
332 */
333struct uc_rm_work_struct {
334 struct work_struct work;
335 enum ipa_rm_event event;
336};
337
338/**
339 * struct uc_op_work_struct
340 * @work: uC OP work
341 * @msg: OP message
342 */
343struct uc_op_work_struct {
344 struct work_struct work;
345 struct op_msg_type *msg;
346};
347static uint8_t vdev_to_iface[CSR_ROAM_SESSION_MAX];
348
349/**
350 * struct uc_rt_debug_info
351 * @time: system time
352 * @ipa_excep_count: IPA exception packet count
353 * @rx_drop_count: IPA Rx drop packet count
354 * @net_sent_count: IPA Rx packet sent to network stack count
355 * @rx_discard_count: IPA Rx discard packet count
356 * @rx_mcbc_count: IPA Rx BCMC packet count
357 * @tx_mcbc_count: IPA Tx BCMC packet countt
358 * @tx_fwd_count: IPA Tx forward packet count
359 * @rx_destructor_call: IPA Rx packet destructor count
360 */
361struct uc_rt_debug_info {
362 v_TIME_t time;
363 uint64_t ipa_excep_count;
364 uint64_t rx_drop_count;
365 uint64_t net_sent_count;
366 uint64_t rx_discard_count;
367 uint64_t rx_mcbc_count;
368 uint64_t tx_mcbc_count;
369 uint64_t tx_fwd_count;
370 uint64_t rx_destructor_call;
371};
372
373struct hdd_ipa_priv {
374 struct hdd_ipa_sys_pipe sys_pipe[HDD_IPA_MAX_SYSBAM_PIPE];
375 struct hdd_ipa_iface_context iface_context[HDD_IPA_MAX_IFACE];
376 uint8_t num_iface;
377 enum hdd_ipa_rm_state rm_state;
378 /*
379 * IPA driver can send RM notifications with IRQ disabled so using cdf
380 * APIs as it is taken care gracefully. Without this, kernel would throw
381 * an warning if spin_lock_bh is used while IRQ is disabled
382 */
383 cdf_spinlock_t rm_lock;
384 struct uc_rm_work_struct uc_rm_work;
385 struct uc_op_work_struct uc_op_work[HDD_IPA_UC_OPCODE_MAX];
386 cdf_wake_lock_t wake_lock;
387 struct delayed_work wake_lock_work;
388 bool wake_lock_released;
389
390 enum ipa_client_type prod_client;
391
392 atomic_t tx_ref_cnt;
393 cdf_nbuf_queue_t pm_queue_head;
394 struct work_struct pm_work;
395 cdf_spinlock_t pm_lock;
396 bool suspended;
397
398 uint32_t pending_hw_desc_cnt;
399 uint32_t hw_desc_cnt;
400 spinlock_t q_lock;
401 uint32_t freeq_cnt;
402 struct list_head free_desc_head;
403
404 uint32_t pend_q_cnt;
405 struct list_head pend_desc_head;
406
407 hdd_context_t *hdd_ctx;
408
409 struct dentry *debugfs_dir;
410 struct hdd_ipa_stats stats;
411
412 struct notifier_block ipv4_notifier;
413 uint32_t curr_prod_bw;
414 uint32_t curr_cons_bw;
415
416 uint8_t activated_fw_pipe;
417 uint8_t sap_num_connected_sta;
418 uint8_t sta_connected;
419 uint32_t tx_pipe_handle;
420 uint32_t rx_pipe_handle;
421 bool resource_loading;
422 bool resource_unloading;
423 bool pending_cons_req;
424 struct ipa_uc_stas_map assoc_stas_map[WLAN_MAX_STA_COUNT];
425 cdf_list_t pending_event;
426 cdf_mutex_t event_lock;
Leo Change3e49442015-10-26 20:07:13 -0700427 bool ipa_pipes_down;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800428 uint32_t ipa_tx_packets_diff;
429 uint32_t ipa_rx_packets_diff;
430 uint32_t ipa_p_tx_packets;
431 uint32_t ipa_p_rx_packets;
432 uint32_t stat_req_reason;
433 uint64_t ipa_tx_forward;
434 uint64_t ipa_rx_discard;
435 uint64_t ipa_rx_net_send_count;
436 uint64_t ipa_rx_internel_drop_count;
437 uint64_t ipa_rx_destructor_count;
438 cdf_mc_timer_t rt_debug_timer;
439 struct uc_rt_debug_info rt_bug_buffer[HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
440 unsigned int rt_buf_fill_index;
441 cdf_mc_timer_t rt_debug_fill_timer;
442 cdf_mutex_t rt_debug_lock;
Yun Parke59b3912015-11-09 13:19:06 -0800443 cdf_mutex_t ipa_lock;
Leo Chang3bc8fed2015-11-13 10:59:47 -0800444
445 /* CE resources */
446 cdf_dma_addr_t ce_sr_base_paddr;
447 uint32_t ce_sr_ring_size;
448 cdf_dma_addr_t ce_reg_paddr;
449
450 /* WLAN TX:IPA->WLAN */
451 cdf_dma_addr_t tx_comp_ring_base_paddr;
452 uint32_t tx_comp_ring_size;
453 uint32_t tx_num_alloc_buffer;
454
455 /* WLAN RX:WLAN->IPA */
456 cdf_dma_addr_t rx_rdy_ring_base_paddr;
457 uint32_t rx_rdy_ring_size;
458 cdf_dma_addr_t rx_proc_done_idx_paddr;
459 void *rx_proc_done_idx_vaddr;
460
461 /* WLAN RX2:WLAN->IPA */
462 cdf_dma_addr_t rx2_rdy_ring_base_paddr;
463 uint32_t rx2_rdy_ring_size;
464 cdf_dma_addr_t rx2_proc_done_idx_paddr;
465 void *rx2_proc_done_idx_vaddr;
466
467 /* IPA UC doorbell registers paddr */
468 cdf_dma_addr_t tx_comp_doorbell_paddr;
469 cdf_dma_addr_t rx_ready_doorbell_paddr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800470};
471
472#define HDD_IPA_WLAN_CLD_HDR_LEN sizeof(struct hdd_ipa_cld_hdr)
473#define HDD_IPA_UC_WLAN_CLD_HDR_LEN 0
474#define HDD_IPA_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_tx_hdr)
475#define HDD_IPA_UC_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_uc_tx_hdr)
476#define HDD_IPA_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_rx_hdr)
477#define HDD_IPA_UC_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_uc_rx_hdr)
478
Leo Chang3bc8fed2015-11-13 10:59:47 -0800479#define HDD_IPA_FW_RX_DESC_DISCARD_M 0x1
480#define HDD_IPA_FW_RX_DESC_FORWARD_M 0x2
481
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800482#define HDD_IPA_GET_IFACE_ID(_data) \
483 (((struct hdd_ipa_cld_hdr *) (_data))->iface_id)
484
485#define HDD_IPA_LOG(LVL, fmt, args ...) \
486 CDF_TRACE(CDF_MODULE_ID_HDD, LVL, \
487 "%s:%d: "fmt, __func__, __LINE__, ## args)
488
489#define HDD_IPA_DBG_DUMP(_lvl, _prefix, _buf, _len) \
490 do { \
491 CDF_TRACE(CDF_MODULE_ID_HDD, _lvl, "%s:", _prefix); \
492 CDF_TRACE_HEX_DUMP(CDF_MODULE_ID_HDD, _lvl, _buf, _len); \
493 } while (0)
494
495#define HDD_IPA_IS_CONFIG_ENABLED(_hdd_ctx, _mask) \
496 (((_hdd_ctx)->config->IpaConfig & (_mask)) == (_mask))
497
498#define HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa) \
499 do { \
500 hdd_ipa->ipa_rx_internel_drop_count++; \
501 } while (0)
502#define HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa) \
503 do { \
504 hdd_ipa->ipa_rx_net_send_count++; \
505 } while (0)
506#define HDD_BW_GET_DIFF(_x, _y) (unsigned long)((ULONG_MAX - (_y)) + (_x) + 1)
507
Leo Chang3bc8fed2015-11-13 10:59:47 -0800508/* Temporary macro to make a build without IPA V2 */
509#ifdef IPA_V2
510#define HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt) \
511do { \
512 pipe_in.u.ul.rdy_ring_rp_va = ipa_ctxt->rx_proc_done_idx_vaddr; \
513 pipe_in.u.ul.rdy_comp_ring_base_pa = ipa_ctxt->rx2_rdy_ring_base_paddr;\
514 pipe_in.u.ul.rdy_comp_ring_size = ipa_ctxt->rx2_rdy_ring_size; \
515 pipe_in.u.ul.rdy_comp_ring_wp_pa = ipa_ctxt->rx2_proc_done_idx_paddr; \
516 pipe_in.u.ul.rdy_comp_ring_wp_va = ipa_ctxt->rx2_proc_done_idx_vaddr; \
517} while (0)
518#else
519/* Do nothing */
520#define HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt)
521#endif /* IPA_V2 */
522
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800523static struct hdd_ipa_adapter_2_client {
524 enum ipa_client_type cons_client;
525 enum ipa_client_type prod_client;
526} hdd_ipa_adapter_2_client[HDD_IPA_MAX_IFACE] = {
527 {
528 IPA_CLIENT_WLAN2_CONS, IPA_CLIENT_WLAN1_PROD
529 }, {
530 IPA_CLIENT_WLAN3_CONS, IPA_CLIENT_WLAN1_PROD
531 }, {
532 IPA_CLIENT_WLAN4_CONS, IPA_CLIENT_WLAN1_PROD
533 },
534};
535
536/* For Tx pipes, use Ethernet-II Header format */
537struct hdd_ipa_uc_tx_hdr ipa_uc_tx_hdr = {
538 {
Leo Chang3bc8fed2015-11-13 10:59:47 -0800539 0x0000,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800540 0x00000000,
541 0x00000000
542 },
543 {
544 0x00000000
545 },
546 {
547 {0x00, 0x03, 0x7f, 0xaa, 0xbb, 0xcc},
548 {0x00, 0x03, 0x7f, 0xdd, 0xee, 0xff},
549 0x0008
550 }
551};
552
553/* For Tx pipes, use 802.3 Header format */
554static struct hdd_ipa_tx_hdr ipa_tx_hdr = {
555 {
556 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
557 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
558 0x00 /* length can be zero */
559 },
560 {
561 /* LLC SNAP header 8 bytes */
562 0xaa, 0xaa,
563 {0x03, 0x00, 0x00, 0x00},
564 0x0008 /* type value(2 bytes) ,filled by wlan */
565 /* 0x0800 - IPV4, 0x86dd - IPV6 */
566 }
567};
568
569static const char *op_string[] = {
570 "TX_SUSPEND",
571 "TX_RESUME",
572 "RX_SUSPEND",
573 "RX_RESUME",
574 "STATS",
575};
576
577static struct hdd_ipa_priv *ghdd_ipa;
578
579/* Local Function Prototypes */
580static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
581 unsigned long data);
582static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
583 unsigned long data);
584
585static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context);
586
587/**
588 * hdd_ipa_is_enabled() - Is IPA enabled?
589 * @hdd_ctx: Global HDD context
590 *
591 * Return: true if IPA is enabled, false otherwise
592 */
593bool hdd_ipa_is_enabled(hdd_context_t *hdd_ctx)
594{
595 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_ENABLE_MASK);
596}
597
598/**
599 * hdd_ipa_uc_is_enabled() - Is IPA uC offload enabled?
600 * @hdd_ctx: Global HDD context
601 *
602 * Return: true if IPA uC offload is enabled, false otherwise
603 */
604bool hdd_ipa_uc_is_enabled(hdd_context_t *hdd_ctx)
605{
606 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_ENABLE_MASK);
607}
608
609/**
610 * hdd_ipa_uc_sta_is_enabled() - Is STA mode IPA uC offload enabled?
611 * @hdd_ctx: Global HDD context
612 *
613 * Return: true if STA mode IPA uC offload is enabled, false otherwise
614 */
615static inline bool hdd_ipa_uc_sta_is_enabled(hdd_context_t *hdd_ctx)
616{
617 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_STA_ENABLE_MASK);
618}
619
620/**
621 * hdd_ipa_is_pre_filter_enabled() - Is IPA pre-filter enabled?
622 * @hdd_ipa: Global HDD IPA context
623 *
624 * Return: true if pre-filter is enabled, otherwise false
625 */
626static inline bool hdd_ipa_is_pre_filter_enabled(hdd_context_t *hdd_ctx)
627{
628 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
629 HDD_IPA_PRE_FILTER_ENABLE_MASK);
630}
631
632/**
633 * hdd_ipa_is_ipv6_enabled() - Is IPA IPv6 enabled?
634 * @hdd_ipa: Global HDD IPA context
635 *
636 * Return: true if IPv6 is enabled, otherwise false
637 */
638static inline bool hdd_ipa_is_ipv6_enabled(hdd_context_t *hdd_ctx)
639{
640 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_IPV6_ENABLE_MASK);
641}
642
643/**
644 * hdd_ipa_is_rm_enabled() - Is IPA resource manager enabled?
645 * @hdd_ipa: Global HDD IPA context
646 *
647 * Return: true if resource manager is enabled, otherwise false
648 */
649static inline bool hdd_ipa_is_rm_enabled(hdd_context_t *hdd_ctx)
650{
651 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_RM_ENABLE_MASK);
652}
653
654/**
655 * hdd_ipa_is_rt_debugging_enabled() - Is IPA real-time debug enabled?
656 * @hdd_ipa: Global HDD IPA context
657 *
658 * Return: true if resource manager is enabled, otherwise false
659 */
660static inline bool hdd_ipa_is_rt_debugging_enabled(hdd_context_t *hdd_ctx)
661{
662 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_REAL_TIME_DEBUGGING);
663}
664
665/**
666 * hdd_ipa_is_clk_scaling_enabled() - Is IPA clock scaling enabled?
667 * @hdd_ipa: Global HDD IPA context
668 *
669 * Return: true if clock scaling is enabled, otherwise false
670 */
671static inline bool hdd_ipa_is_clk_scaling_enabled(hdd_context_t *hdd_ctx)
672{
673 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
674 HDD_IPA_CLK_SCALING_ENABLE_MASK |
675 HDD_IPA_RM_ENABLE_MASK);
676}
677
678/**
679 * hdd_ipa_uc_rt_debug_host_fill - fill rt debug buffer
680 * @ctext: pointer to hdd context.
681 *
682 * If rt debug enabled, periodically called, and fill debug buffer
683 *
684 * Return: none
685 */
686static void hdd_ipa_uc_rt_debug_host_fill(void *ctext)
687{
688 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
689 struct hdd_ipa_priv *hdd_ipa;
690 struct uc_rt_debug_info *dump_info = NULL;
691
692 if (wlan_hdd_validate_context(hdd_ctx))
693 return;
694
695 if (!hdd_ctx->hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
696 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
697 "%s: IPA UC is not enabled", __func__);
698 return;
699 }
700
701 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
702
703 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
704 dump_info = &hdd_ipa->rt_bug_buffer[
705 hdd_ipa->rt_buf_fill_index % HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
706
707 dump_info->time = cdf_mc_timer_get_system_time();
708 dump_info->ipa_excep_count = hdd_ipa->stats.num_rx_excep;
709 dump_info->rx_drop_count = hdd_ipa->ipa_rx_internel_drop_count;
710 dump_info->net_sent_count = hdd_ipa->ipa_rx_net_send_count;
711 dump_info->rx_discard_count = hdd_ipa->ipa_rx_discard;
712 dump_info->tx_mcbc_count = hdd_ipa->stats.num_tx_bcmc;
713 dump_info->tx_fwd_count = hdd_ipa->ipa_tx_forward;
714 dump_info->rx_destructor_call = hdd_ipa->ipa_rx_destructor_count;
715 hdd_ipa->rt_buf_fill_index++;
716 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
717
718 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
719 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
720}
721
722/**
723 * hdd_ipa_uc_rt_debug_host_dump - dump rt debug buffer
724 * @hdd_ctx: pointer to hdd context.
725 *
726 * If rt debug enabled, dump debug buffer contents based on requirement
727 *
728 * Return: none
729 */
730void hdd_ipa_uc_rt_debug_host_dump(hdd_context_t *hdd_ctx)
731{
732 struct hdd_ipa_priv *hdd_ipa;
733 unsigned int dump_count;
734 unsigned int dump_index;
735 struct uc_rt_debug_info *dump_info = NULL;
736
737 if (wlan_hdd_validate_context(hdd_ctx))
738 return;
739
740 hdd_ipa = hdd_ctx->hdd_ipa;
741 if (!hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
742 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
743 "%s: IPA UC is not enabled", __func__);
744 return;
745 }
746
747 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
748 "========= WLAN-IPA DEBUG BUF DUMP ==========\n");
749 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
750 " TM : EXEP : DROP : NETS : MCBC : TXFD : DSTR : DSCD\n");
751
752 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
753 for (dump_count = 0;
754 dump_count < HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
755 dump_count++) {
756 dump_index = (hdd_ipa->rt_buf_fill_index + dump_count) %
757 HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
758 dump_info = &hdd_ipa->rt_bug_buffer[dump_index];
759 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
760 "%12lu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu\n",
761 dump_info->time, dump_info->ipa_excep_count,
762 dump_info->rx_drop_count, dump_info->net_sent_count,
763 dump_info->tx_mcbc_count, dump_info->tx_fwd_count,
764 dump_info->rx_destructor_call,
765 dump_info->rx_discard_count);
766 }
767 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
768 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
769 "======= WLAN-IPA DEBUG BUF DUMP END ========\n");
770}
771
772/**
773 * hdd_ipa_uc_rt_debug_handler - periodic memory health monitor handler
774 * @ctext: pointer to hdd context.
775 *
776 * periodically called by timer expire
777 * will try to alloc dummy memory and detect out of memory condition
778 * if out of memory detected, dump wlan-ipa stats
779 *
780 * Return: none
781 */
782static void hdd_ipa_uc_rt_debug_handler(void *ctext)
783{
784 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
785 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
786 void *dummy_ptr = NULL;
787
788 if (wlan_hdd_validate_context(hdd_ctx))
789 return;
790
791 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
792 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
793 "%s: IPA RT debug is not enabled", __func__);
794 return;
795 }
796
797 /* Allocate dummy buffer periodically and free immediately. this will
798 * proactively detect OOM and if allocation fails dump ipa stats
799 */
800 dummy_ptr = kmalloc(HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE,
801 GFP_KERNEL | GFP_ATOMIC);
802 if (!dummy_ptr) {
803 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
804 "%s: Dummy alloc fail", __func__);
805 hdd_ipa_uc_rt_debug_host_dump(hdd_ctx);
806 hdd_ipa_uc_stat_request(
807 hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP), 1);
808 } else {
809 kfree(dummy_ptr);
810 }
811
812 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
813 HDD_IPA_UC_RT_DEBUG_PERIOD);
814}
815
816/**
817 * hdd_ipa_uc_rt_debug_destructor - called by data packet free
818 * @skb: packet pinter
819 *
820 * when free data packet, will be invoked by wlan client and will increase
821 * free counter
822 *
823 * Return: none
824 */
825void hdd_ipa_uc_rt_debug_destructor(struct sk_buff *skb)
826{
827 if (!ghdd_ipa) {
828 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
829 "%s: invalid hdd context", __func__);
830 return;
831 }
832
833 ghdd_ipa->ipa_rx_destructor_count++;
834}
835
836/**
837 * hdd_ipa_uc_rt_debug_deinit - remove resources to handle rt debugging
838 * @hdd_ctx: hdd main context
839 *
840 * free all rt debugging resources
841 *
842 * Return: none
843 */
844static void hdd_ipa_uc_rt_debug_deinit(hdd_context_t *hdd_ctx)
845{
846 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
847
848 if (CDF_TIMER_STATE_STOPPED !=
849 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_fill_timer)) {
850 cdf_mc_timer_stop(&hdd_ipa->rt_debug_fill_timer);
851 }
852 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_fill_timer);
853 cdf_mutex_destroy(&hdd_ipa->rt_debug_lock);
854
855 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
856 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
857 "%s: IPA RT debug is not enabled", __func__);
858 return;
859 }
860
861 if (CDF_TIMER_STATE_STOPPED !=
862 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_timer)) {
863 cdf_mc_timer_stop(&hdd_ipa->rt_debug_timer);
864 }
865 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_timer);
866}
867
868/**
869 * hdd_ipa_uc_rt_debug_init - intialize resources to handle rt debugging
870 * @hdd_ctx: hdd main context
871 *
872 * alloc and initialize all rt debugging resources
873 *
874 * Return: none
875 */
876static void hdd_ipa_uc_rt_debug_init(hdd_context_t *hdd_ctx)
877{
878 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
879
880 cdf_mutex_init(&hdd_ipa->rt_debug_lock);
881 cdf_mc_timer_init(&hdd_ipa->rt_debug_fill_timer, CDF_TIMER_TYPE_SW,
882 hdd_ipa_uc_rt_debug_host_fill, (void *)hdd_ctx);
883 hdd_ipa->rt_buf_fill_index = 0;
884 cdf_mem_zero(hdd_ipa->rt_bug_buffer,
885 sizeof(struct uc_rt_debug_info) *
886 HDD_IPA_UC_RT_DEBUG_BUF_COUNT);
887 hdd_ipa->ipa_tx_forward = 0;
888 hdd_ipa->ipa_rx_discard = 0;
889 hdd_ipa->ipa_rx_net_send_count = 0;
890 hdd_ipa->ipa_rx_internel_drop_count = 0;
891 hdd_ipa->ipa_rx_destructor_count = 0;
892
893 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
894 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
895
896 /* Reatime debug enable on feature enable */
897 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
898 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
899 "%s: IPA RT debug is not enabled", __func__);
900 return;
901 }
902 cdf_mc_timer_init(&hdd_ipa->rt_debug_timer, CDF_TIMER_TYPE_SW,
903 hdd_ipa_uc_rt_debug_handler, (void *)hdd_ctx);
904 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
905 HDD_IPA_UC_RT_DEBUG_PERIOD);
906
907}
908
909/**
910 * hdd_ipa_uc_stat_query() - Query the IPA stats
911 * @hdd_ctx: Global HDD context
912 * @ipa_tx_diff: tx packet count diff from previous
913 * tx packet count
914 * @ipa_rx_diff: rx packet count diff from previous
915 * rx packet count
916 *
917 * Return: true if IPA is enabled, false otherwise
918 */
919void hdd_ipa_uc_stat_query(hdd_context_t *pHddCtx,
920 uint32_t *ipa_tx_diff, uint32_t *ipa_rx_diff)
921{
922 struct hdd_ipa_priv *hdd_ipa;
923
924 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
925 *ipa_tx_diff = 0;
926 *ipa_rx_diff = 0;
927
928 if (!hdd_ipa_is_enabled(pHddCtx) ||
929 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
930 return;
931 }
932
Yun Parke59b3912015-11-09 13:19:06 -0800933 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800934 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
935 (false == hdd_ipa->resource_loading)) {
936 *ipa_tx_diff = hdd_ipa->ipa_tx_packets_diff;
937 *ipa_rx_diff = hdd_ipa->ipa_rx_packets_diff;
938 HDD_IPA_LOG(LOG1, "STAT Query TX DIFF %d, RX DIFF %d",
939 *ipa_tx_diff, *ipa_rx_diff);
940 }
Yun Parke59b3912015-11-09 13:19:06 -0800941 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800942 return;
943}
944
945/**
946 * hdd_ipa_uc_stat_request() - Get IPA stats from IPA.
947 * @adapter: network adapter
948 * @reason: STAT REQ Reason
949 *
950 * Return: None
951 */
952void hdd_ipa_uc_stat_request(hdd_adapter_t *adapter, uint8_t reason)
953{
954 hdd_context_t *pHddCtx;
955 struct hdd_ipa_priv *hdd_ipa;
956
957 if (!adapter) {
958 return;
959 }
960
961 pHddCtx = (hdd_context_t *)adapter->pHddCtx;
962 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
963 if (!hdd_ipa_is_enabled(pHddCtx) ||
964 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
965 return;
966 }
967
968 HDD_IPA_LOG(LOG1, "STAT REQ Reason %d", reason);
Yun Parke59b3912015-11-09 13:19:06 -0800969 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800970 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
971 (false == hdd_ipa->resource_loading)) {
972 hdd_ipa->stat_req_reason = reason;
973 wma_cli_set_command(
974 (int)adapter->sessionId,
975 (int)WMA_VDEV_TXRX_GET_IPA_UC_FW_STATS_CMDID,
976 0, VDEV_CMD);
977 }
Yun Parke59b3912015-11-09 13:19:06 -0800978 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800979}
980
981/**
982 * hdd_ipa_uc_find_add_assoc_sta() - Find associated station
983 * @hdd_ipa: Global HDD IPA context
984 * @sta_add: Should station be added
985 * @sta_id: ID of the station being queried
986 *
987 * Return: true if the station was found
988 */
989static bool hdd_ipa_uc_find_add_assoc_sta(struct hdd_ipa_priv *hdd_ipa,
990 bool sta_add, uint8_t sta_id)
991{
992 bool sta_found = false;
993 uint8_t idx;
994 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
995 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
996 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
997 sta_found = true;
998 break;
999 }
1000 }
1001 if (sta_add && sta_found) {
1002 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1003 "%s: STA ID %d already exist, cannot add",
1004 __func__, sta_id);
1005 return sta_found;
1006 }
1007 if (sta_add) {
1008 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1009 if (!hdd_ipa->assoc_stas_map[idx].is_reserved) {
1010 hdd_ipa->assoc_stas_map[idx].is_reserved = true;
1011 hdd_ipa->assoc_stas_map[idx].sta_id = sta_id;
1012 return sta_found;
1013 }
1014 }
1015 }
1016 if (!sta_add && !sta_found) {
1017 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1018 "%s: STA ID %d does not exist, cannot delete",
1019 __func__, sta_id);
1020 return sta_found;
1021 }
1022 if (!sta_add) {
1023 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1024 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
1025 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
1026 hdd_ipa->assoc_stas_map[idx].is_reserved =
1027 false;
1028 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
1029 return sta_found;
1030 }
1031 }
1032 }
1033 return sta_found;
1034}
1035
1036/**
1037 * hdd_ipa_uc_enable_pipes() - Enable IPA uC pipes
1038 * @hdd_ipa: Global HDD IPA context
1039 *
1040 * Return: 0 on success, negative errno if error
1041 */
1042static int hdd_ipa_uc_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
1043{
1044 int result;
1045 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
1046
1047 /* ACTIVATE TX PIPE */
Yun Park4cab6ee2015-10-27 11:43:40 -07001048 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1049 "%s: Enable TX PIPE(tx_pipe_handle=%d)",
1050 __func__, hdd_ipa->tx_pipe_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001051 result = ipa_enable_wdi_pipe(hdd_ipa->tx_pipe_handle);
1052 if (result) {
1053 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1054 "%s: Enable TX PIPE fail, code %d",
1055 __func__, result);
1056 return result;
1057 }
1058 result = ipa_resume_wdi_pipe(hdd_ipa->tx_pipe_handle);
1059 if (result) {
1060 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1061 "%s: Resume TX PIPE fail, code %d",
1062 __func__, result);
1063 return result;
1064 }
1065 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, true);
1066
1067 /* ACTIVATE RX PIPE */
Yun Park4cab6ee2015-10-27 11:43:40 -07001068 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1069 "%s: Enable RX PIPE(rx_pipe_handle=%d)",
1070 __func__, hdd_ipa->rx_pipe_handle);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001071 result = ipa_enable_wdi_pipe(hdd_ipa->rx_pipe_handle);
1072 if (result) {
1073 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1074 "%s: Enable RX PIPE fail, code %d",
1075 __func__, result);
1076 return result;
1077 }
1078 result = ipa_resume_wdi_pipe(hdd_ipa->rx_pipe_handle);
1079 if (result) {
1080 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1081 "%s: Resume RX PIPE fail, code %d",
1082 __func__, result);
1083 return result;
1084 }
1085 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, false);
Leo Change3e49442015-10-26 20:07:13 -07001086 hdd_ipa->ipa_pipes_down = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001087 return 0;
1088}
1089
1090/**
1091 * hdd_ipa_uc_disable_pipes() - Disable IPA uC pipes
1092 * @hdd_ipa: Global HDD IPA context
1093 *
1094 * Return: 0 on success, negative errno if error
1095 */
1096static int hdd_ipa_uc_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
1097{
1098 int result;
1099
Leo Change3e49442015-10-26 20:07:13 -07001100 hdd_ipa->ipa_pipes_down = true;
1101
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001102 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable RX PIPE", __func__);
1103 result = ipa_suspend_wdi_pipe(hdd_ipa->rx_pipe_handle);
1104 if (result) {
1105 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1106 "%s: Suspend RX PIPE fail, code %d",
1107 __func__, result);
1108 return result;
1109 }
1110 result = ipa_disable_wdi_pipe(hdd_ipa->rx_pipe_handle);
1111 if (result) {
1112 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1113 "%s: Disable RX PIPE fail, code %d",
1114 __func__, result);
1115 return result;
1116 }
1117
1118 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable TX PIPE", __func__);
1119 result = ipa_suspend_wdi_pipe(hdd_ipa->tx_pipe_handle);
1120 if (result) {
1121 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1122 "%s: Suspend TX PIPE fail, code %d",
1123 __func__, result);
1124 return result;
1125 }
1126 result = ipa_disable_wdi_pipe(hdd_ipa->tx_pipe_handle);
1127 if (result) {
1128 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1129 "%s: Disable TX PIPE fail, code %d",
1130 __func__, result);
1131 return result;
1132 }
1133
1134 return 0;
1135}
1136
1137/**
1138 * hdd_ipa_uc_handle_first_con() - Handle first uC IPA connection
1139 * @hdd_ipa: Global HDD IPA context
1140 *
1141 * Return: 0 on success, negative errno if error
1142 */
1143static int hdd_ipa_uc_handle_first_con(struct hdd_ipa_priv *hdd_ipa)
1144{
1145 hdd_ipa->activated_fw_pipe = 0;
1146 hdd_ipa->resource_loading = true;
Yun Park4cab6ee2015-10-27 11:43:40 -07001147
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001148 /* If RM feature enabled
1149 * Request PROD Resource first
1150 * PROD resource may return sync or async manners */
Yun Park4cab6ee2015-10-27 11:43:40 -07001151 if (hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx)) {
1152 if (!ipa_rm_request_resource(IPA_RM_RESOURCE_WLAN_PROD)) {
1153 /* RM PROD request sync return
1154 * enable pipe immediately
1155 */
1156 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1157 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1158 "%s: IPA WDI Pipe activation failed",
1159 __func__);
1160 hdd_ipa->resource_loading = false;
1161 return -EBUSY;
1162 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001163 }
1164 } else {
1165 /* RM Disabled
Yun Park4cab6ee2015-10-27 11:43:40 -07001166 * Just enabled all the PIPEs
1167 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001168 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1169 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
Yun Park4cab6ee2015-10-27 11:43:40 -07001170 "%s: IPA WDI Pipe activation failed",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001171 __func__);
1172 hdd_ipa->resource_loading = false;
1173 return -EBUSY;
1174 }
1175 hdd_ipa->resource_loading = false;
1176 }
Yun Park4cab6ee2015-10-27 11:43:40 -07001177
1178 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1179 "%s: IPA WDI Pipes activated successfully", __func__);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001180 return 0;
1181}
1182
1183/**
1184 * hdd_ipa_uc_handle_last_discon() - Handle last uC IPA disconnection
1185 * @hdd_ipa: Global HDD IPA context
1186 *
1187 * Return: None
1188 */
1189static void hdd_ipa_uc_handle_last_discon(struct hdd_ipa_priv *hdd_ipa)
1190{
1191 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
1192
1193 hdd_ipa->resource_unloading = true;
1194 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW RX PIPE", __func__);
1195 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, false);
1196 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW TX PIPE", __func__);
1197 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, true);
1198}
1199
1200/**
1201 * hdd_ipa_uc_rm_notify_handler() - IPA uC resource notification handler
1202 * @context: User context registered with TL (the IPA Global context is
1203 * registered
1204 * @rxpkt: Packet containing the notification
1205 * @staid: ID of the station associated with the packet
1206 *
1207 * Return: None
1208 */
1209static void
1210hdd_ipa_uc_rm_notify_handler(void *context, enum ipa_rm_event event)
1211{
1212 struct hdd_ipa_priv *hdd_ipa = context;
1213 CDF_STATUS status = CDF_STATUS_SUCCESS;
1214
1215 /*
1216 * When SSR is going on or driver is unloading, just return.
1217 */
1218 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
1219 if (0 != status) {
1220 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1221 return;
1222 }
1223
1224 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
1225 return;
1226
1227 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s, event code %d",
1228 __func__, event);
1229
1230 switch (event) {
1231 case IPA_RM_RESOURCE_GRANTED:
1232 /* Differed RM Granted */
1233 hdd_ipa_uc_enable_pipes(hdd_ipa);
Yun Parke59b3912015-11-09 13:19:06 -08001234 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001235 if ((false == hdd_ipa->resource_unloading) &&
1236 (!hdd_ipa->activated_fw_pipe)) {
1237 hdd_ipa_uc_enable_pipes(hdd_ipa);
1238 }
Yun Parke59b3912015-11-09 13:19:06 -08001239 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001240 break;
1241
1242 case IPA_RM_RESOURCE_RELEASED:
1243 /* Differed RM Released */
1244 hdd_ipa->resource_unloading = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001245 break;
1246
1247 default:
1248 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1249 "%s, invalid event code %d", __func__, event);
1250 break;
1251 }
1252}
1253
1254/**
1255 * hdd_ipa_uc_rm_notify_defer() - Defer IPA uC notification
1256 * @hdd_ipa: Global HDD IPA context
1257 * @event: IPA resource manager event to be deferred
1258 *
1259 * This function is called when a resource manager event is received
1260 * from firmware in interrupt context. This function will defer the
1261 * handling to the OL RX thread
1262 *
1263 * Return: None
1264 */
1265static void hdd_ipa_uc_rm_notify_defer(struct work_struct *work)
1266{
1267 enum ipa_rm_event event;
1268 struct uc_rm_work_struct *uc_rm_work = container_of(work,
1269 struct uc_rm_work_struct, work);
1270 struct hdd_ipa_priv *hdd_ipa = container_of(uc_rm_work,
1271 struct hdd_ipa_priv, uc_rm_work);
1272
1273 cds_ssr_protect(__func__);
1274 event = uc_rm_work->event;
1275 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1276 "%s, posted event %d", __func__, event);
1277
1278 hdd_ipa_uc_rm_notify_handler(hdd_ipa, event);
1279 cds_ssr_unprotect(__func__);
1280
1281 return;
1282}
1283
1284/**
1285 * hdd_ipa_uc_proc_pending_event() - Process IPA uC pending events
1286 * @hdd_ipa: Global HDD IPA context
1287 *
1288 * Return: None
1289 */
1290static void hdd_ipa_uc_proc_pending_event(struct hdd_ipa_priv *hdd_ipa)
1291{
1292 unsigned int pending_event_count;
1293 struct ipa_uc_pending_event *pending_event = NULL;
1294
1295 cdf_list_size(&hdd_ipa->pending_event, &pending_event_count);
1296 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1297 "%s, Pending Event Count %d", __func__, pending_event_count);
1298 if (!pending_event_count) {
1299 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1300 "%s, No Pending Event", __func__);
1301 return;
1302 }
1303
1304 cdf_list_remove_front(&hdd_ipa->pending_event,
1305 (cdf_list_node_t **)&pending_event);
1306 while (pending_event != NULL) {
1307 hdd_ipa_wlan_evt(pending_event->adapter,
1308 pending_event->type,
1309 pending_event->sta_id,
1310 pending_event->mac_addr);
1311 cdf_mem_free(pending_event);
1312 pending_event = NULL;
1313 cdf_list_remove_front(&hdd_ipa->pending_event,
1314 (cdf_list_node_t **)&pending_event);
1315 }
1316}
1317
1318/**
1319 * hdd_ipa_uc_op_cb() - IPA uC operation callback
1320 * @op_msg: operation message received from firmware
1321 * @usr_ctxt: user context registered with TL (we register the HDD Global
1322 * context)
1323 *
1324 * Return: None
1325 */
1326static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
1327{
1328 struct op_msg_type *msg = op_msg;
1329 struct ipa_uc_fw_stats *uc_fw_stat;
1330 struct IpaHwStatsWDIInfoData_t ipa_stat;
1331 struct hdd_ipa_priv *hdd_ipa;
1332 hdd_context_t *hdd_ctx;
1333 CDF_STATUS status = CDF_STATUS_SUCCESS;
1334
1335 if (!op_msg || !usr_ctxt) {
1336 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s, INVALID ARG", __func__);
1337 return;
1338 }
1339
1340 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1341 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1342 "%s, INVALID OPCODE %d", __func__, msg->op_code);
1343 return;
1344 }
1345
1346 hdd_ctx = (hdd_context_t *) usr_ctxt;
1347
1348 /*
1349 * When SSR is going on or driver is unloading, just return.
1350 */
1351 status = wlan_hdd_validate_context(hdd_ctx);
1352 if (0 != status) {
1353 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1354 cdf_mem_free(op_msg);
1355 return;
1356 }
1357
1358 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1359
1360 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
1361 "%s, OPCODE %s", __func__, op_string[msg->op_code]);
1362
1363 if ((HDD_IPA_UC_OPCODE_TX_RESUME == msg->op_code) ||
1364 (HDD_IPA_UC_OPCODE_RX_RESUME == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001365 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001366 hdd_ipa->activated_fw_pipe++;
1367 if (HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) {
1368 hdd_ipa->resource_loading = false;
1369 hdd_ipa_uc_proc_pending_event(hdd_ipa);
Yun Parkccc6d7a2015-12-02 14:50:13 -08001370 if (hdd_ipa->pending_cons_req)
1371 ipa_rm_notify_completion(
1372 IPA_RM_RESOURCE_GRANTED,
1373 IPA_RM_RESOURCE_WLAN_CONS);
Yun Park5b635012015-12-02 15:05:01 -08001374 hdd_ipa->pending_cons_req = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001375 }
Yun Parke59b3912015-11-09 13:19:06 -08001376 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001377 }
1378
1379 if ((HDD_IPA_UC_OPCODE_TX_SUSPEND == msg->op_code) ||
1380 (HDD_IPA_UC_OPCODE_RX_SUSPEND == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001381 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001382 hdd_ipa->activated_fw_pipe--;
1383 if (!hdd_ipa->activated_fw_pipe) {
1384 hdd_ipa_uc_disable_pipes(hdd_ipa);
Yun Park5b635012015-12-02 15:05:01 -08001385 if (hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
1386 ipa_rm_release_resource(
1387 IPA_RM_RESOURCE_WLAN_PROD);
1388 /* Sync return success from IPA
1389 * Enable/resume all the PIPEs */
1390 hdd_ipa->resource_unloading = false;
1391 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1392 hdd_ipa->pending_cons_req = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001393 }
Yun Parke59b3912015-11-09 13:19:06 -08001394 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001395 }
1396
1397 if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1398 (HDD_IPA_UC_STAT_REASON_DEBUG == hdd_ipa->stat_req_reason)) {
1399
1400 /* STATs from host */
1401 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1402 "==== IPA_UC WLAN_HOST CE ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001403 "CE RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001404 "CE RING SIZE: %d\n"
1405 "CE REG ADDR : 0x%llx",
Manikandan Mohan22b83722015-12-15 15:03:23 -08001406 (unsigned long long)hdd_ipa->ce_sr_base_paddr,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001407 hdd_ipa->ce_sr_ring_size,
Manikandan Mohan22b83722015-12-15 15:03:23 -08001408 (unsigned long long)hdd_ipa->ce_reg_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001409 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1410 "==== IPA_UC WLAN_HOST TX ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001411 "COMP RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001412 "COMP RING SIZE: %d\n"
1413 "NUM ALLOC BUF: %d\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001414 "COMP RING DBELL : 0x%llx",
Manikandan Mohan22b83722015-12-15 15:03:23 -08001415 (unsigned long long)hdd_ipa->tx_comp_ring_base_paddr,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001416 hdd_ipa->tx_comp_ring_size,
1417 hdd_ipa->tx_num_alloc_buffer,
Manikandan Mohan22b83722015-12-15 15:03:23 -08001418 (unsigned long long)hdd_ipa->tx_comp_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001419 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1420 "==== IPA_UC WLAN_HOST RX ====\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001421 "IND RING BASE: 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001422 "IND RING SIZE: %d\n"
Leo Chang3bc8fed2015-11-13 10:59:47 -08001423 "IND RING DBELL : 0x%llx\n"
1424 "PROC DONE IND ADDR : 0x%llx\n"
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001425 "NUM EXCP PKT : %llu\n"
1426 "NUM TX BCMC : %llu\n"
1427 "NUM TX BCMC ERR : %llu",
Manikandan Mohan22b83722015-12-15 15:03:23 -08001428 (unsigned long long)hdd_ipa->rx_rdy_ring_base_paddr,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001429 hdd_ipa->rx_rdy_ring_size,
Manikandan Mohan22b83722015-12-15 15:03:23 -08001430 (unsigned long long)hdd_ipa->rx_ready_doorbell_paddr,
1431 (unsigned long long)hdd_ipa->rx_proc_done_idx_paddr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001432 hdd_ipa->stats.num_rx_excep,
1433 hdd_ipa->stats.num_tx_bcmc,
Manikandan Mohan22b83722015-12-15 15:03:23 -08001434 (unsigned long long)hdd_ipa->stats.num_tx_bcmc_err);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001435 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1436 "==== IPA_UC WLAN_HOST CONTROL ====\n"
1437 "SAP NUM STAs: %d\n"
1438 "STA CONNECTED: %d\n"
1439 "TX PIPE HDL: %d\n"
1440 "RX PIPE HDL : %d\n"
1441 "RSC LOADING : %d\n"
1442 "RSC UNLOADING : %d\n"
1443 "PNDNG CNS RQT : %d",
1444 hdd_ipa->sap_num_connected_sta,
1445 hdd_ipa->sta_connected,
1446 hdd_ipa->tx_pipe_handle,
1447 hdd_ipa->rx_pipe_handle,
1448 (unsigned int)hdd_ipa->resource_loading,
1449 (unsigned int)hdd_ipa->resource_unloading,
1450 (unsigned int)hdd_ipa->pending_cons_req);
1451
1452 /* STATs from FW */
1453 uc_fw_stat = (struct ipa_uc_fw_stats *)
1454 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
1455 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1456 "==== IPA_UC WLAN_FW TX ====\n"
1457 "COMP RING BASE: 0x%x\n"
1458 "COMP RING SIZE: %d\n"
1459 "COMP RING DBELL : 0x%x\n"
1460 "COMP RING DBELL IND VAL : %d\n"
1461 "COMP RING DBELL CACHED VAL : %d\n"
1462 "COMP RING DBELL CACHED VAL : %d\n"
1463 "PKTS ENQ : %d\n"
1464 "PKTS COMP : %d\n"
1465 "IS SUSPEND : %d\n"
1466 "RSVD : 0x%x",
1467 uc_fw_stat->tx_comp_ring_base,
1468 uc_fw_stat->tx_comp_ring_size,
1469 uc_fw_stat->tx_comp_ring_dbell_addr,
1470 uc_fw_stat->tx_comp_ring_dbell_ind_val,
1471 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1472 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1473 uc_fw_stat->tx_pkts_enqueued,
1474 uc_fw_stat->tx_pkts_completed,
1475 uc_fw_stat->tx_is_suspend, uc_fw_stat->tx_reserved);
1476 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1477 "==== IPA_UC WLAN_FW RX ====\n"
1478 "IND RING BASE: 0x%x\n"
1479 "IND RING SIZE: %d\n"
1480 "IND RING DBELL : 0x%x\n"
1481 "IND RING DBELL IND VAL : %d\n"
1482 "IND RING DBELL CACHED VAL : %d\n"
1483 "RDY IND ADDR : 0x%x\n"
1484 "RDY IND CACHE VAL : %d\n"
1485 "RFIL IND : %d\n"
1486 "NUM PKT INDICAT : %d\n"
1487 "BUF REFIL : %d\n"
1488 "NUM DROP NO SPC : %d\n"
1489 "NUM DROP NO BUF : %d\n"
1490 "IS SUSPND : %d\n"
1491 "RSVD : 0x%x\n",
1492 uc_fw_stat->rx_ind_ring_base,
1493 uc_fw_stat->rx_ind_ring_size,
1494 uc_fw_stat->rx_ind_ring_dbell_addr,
1495 uc_fw_stat->rx_ind_ring_dbell_ind_val,
1496 uc_fw_stat->rx_ind_ring_dbell_ind_cached_val,
1497 uc_fw_stat->rx_ind_ring_rdidx_addr,
1498 uc_fw_stat->rx_ind_ring_rd_idx_cached_val,
1499 uc_fw_stat->rx_refill_idx,
1500 uc_fw_stat->rx_num_pkts_indicated,
1501 uc_fw_stat->rx_buf_refilled,
1502 uc_fw_stat->rx_num_ind_drop_no_space,
1503 uc_fw_stat->rx_num_ind_drop_no_buf,
1504 uc_fw_stat->rx_is_suspend, uc_fw_stat->rx_reserved);
1505 /* STATs from IPA */
1506 ipa_get_wdi_stats(&ipa_stat);
1507 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1508 "==== IPA_UC IPA TX ====\n"
1509 "NUM PROCD : %d\n"
1510 "CE DBELL : 0x%x\n"
1511 "NUM DBELL FIRED : %d\n"
1512 "COMP RNG FULL : %d\n"
1513 "COMP RNG EMPT : %d\n"
1514 "COMP RNG USE HGH : %d\n"
1515 "COMP RNG USE LOW : %d\n"
1516 "BAM FIFO FULL : %d\n"
1517 "BAM FIFO EMPT : %d\n"
1518 "BAM FIFO USE HGH : %d\n"
1519 "BAM FIFO USE LOW : %d\n"
1520 "NUM DBELL : %d\n"
1521 "NUM UNEXP DBELL : %d\n"
1522 "NUM BAM INT HDL : 0x%x\n"
1523 "NUM BAM INT NON-RUN : 0x%x\n"
1524 "NUM QMB INT HDL : 0x%x",
1525 ipa_stat.tx_ch_stats.num_pkts_processed,
1526 ipa_stat.tx_ch_stats.copy_engine_doorbell_value,
1527 ipa_stat.tx_ch_stats.num_db_fired,
1528 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringFull,
1529 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
1530 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
1531 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
1532 ipa_stat.tx_ch_stats.bam_stats.bamFifoFull,
1533 ipa_stat.tx_ch_stats.bam_stats.bamFifoEmpty,
1534 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageHigh,
1535 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageLow,
1536 ipa_stat.tx_ch_stats.num_db,
1537 ipa_stat.tx_ch_stats.num_unexpected_db,
1538 ipa_stat.tx_ch_stats.num_bam_int_handled,
1539 ipa_stat.tx_ch_stats.
1540 num_bam_int_in_non_runnning_state,
1541 ipa_stat.tx_ch_stats.num_qmb_int_handled);
1542
1543 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1544 "==== IPA_UC IPA RX ====\n"
1545 "MAX OST PKT : %d\n"
1546 "NUM PKT PRCSD : %d\n"
1547 "RNG RP : 0x%x\n"
1548 "COMP RNG FULL : %d\n"
1549 "COMP RNG EMPT : %d\n"
1550 "COMP RNG USE HGH : %d\n"
1551 "COMP RNG USE LOW : %d\n"
1552 "BAM FIFO FULL : %d\n"
1553 "BAM FIFO EMPT : %d\n"
1554 "BAM FIFO USE HGH : %d\n"
1555 "BAM FIFO USE LOW : %d\n"
1556 "NUM DB : %d\n"
1557 "NUM UNEXP DB : %d\n"
1558 "NUM BAM INT HNDL : 0x%x\n",
1559 ipa_stat.rx_ch_stats.max_outstanding_pkts,
1560 ipa_stat.rx_ch_stats.num_pkts_processed,
1561 ipa_stat.rx_ch_stats.rx_ring_rp_value,
1562 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringFull,
1563 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
1564 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
1565 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
1566 ipa_stat.rx_ch_stats.bam_stats.bamFifoFull,
1567 ipa_stat.rx_ch_stats.bam_stats.bamFifoEmpty,
1568 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageHigh,
1569 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageLow,
1570 ipa_stat.rx_ch_stats.num_db,
1571 ipa_stat.rx_ch_stats.num_unexpected_db,
1572 ipa_stat.rx_ch_stats.num_bam_int_handled);
1573 } else if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1574 (HDD_IPA_UC_STAT_REASON_BW_CAL == hdd_ipa->stat_req_reason)) {
1575 /* STATs from FW */
1576 uc_fw_stat = (struct ipa_uc_fw_stats *)
1577 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
Yun Parke59b3912015-11-09 13:19:06 -08001578 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001579 hdd_ipa->ipa_tx_packets_diff = HDD_BW_GET_DIFF(
1580 uc_fw_stat->tx_pkts_completed,
1581 hdd_ipa->ipa_p_tx_packets);
1582 hdd_ipa->ipa_rx_packets_diff = HDD_BW_GET_DIFF(
1583 (uc_fw_stat->rx_num_ind_drop_no_space +
1584 uc_fw_stat->rx_num_ind_drop_no_buf +
1585 uc_fw_stat->rx_num_pkts_indicated),
1586 hdd_ipa->ipa_p_rx_packets);
1587
1588 hdd_ipa->ipa_p_tx_packets = uc_fw_stat->tx_pkts_completed;
1589 hdd_ipa->ipa_p_rx_packets =
1590 (uc_fw_stat->rx_num_ind_drop_no_space +
1591 uc_fw_stat->rx_num_ind_drop_no_buf +
1592 uc_fw_stat->rx_num_pkts_indicated);
Yun Parke59b3912015-11-09 13:19:06 -08001593 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001594 } else {
1595 HDD_IPA_LOG(LOGE, "INVALID REASON %d",
1596 hdd_ipa->stat_req_reason);
1597 }
1598 cdf_mem_free(op_msg);
1599}
1600
1601
1602/**
1603 * hdd_ipa_uc_offload_enable_disable() - wdi enable/disable notify to fw
1604 * @adapter: device adapter instance
1605 * @offload_type: MCC or SCC
1606 * @enable: TX offload enable or disable
1607 *
1608 * Return: none
1609 */
1610static void hdd_ipa_uc_offload_enable_disable(hdd_adapter_t *adapter,
1611 uint32_t offload_type, uint32_t enable)
1612{
1613 struct sir_ipa_offload_enable_disable ipa_offload_enable_disable;
1614
1615 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1616 * channel change indication. Since these indications are sent by lower
1617 * layer as SAP updates and IPA doesn't have to do anything for these
1618 * updates so ignoring!
1619 */
1620 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
1621 return;
1622
1623 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1624 * channel change indication. Since these indications are sent by lower
1625 * layer as SAP updates and IPA doesn't have to do anything for these
1626 * updates so ignoring!
1627 */
1628 if (adapter->ipa_context)
1629 return;
1630
1631 cdf_mem_zero(&ipa_offload_enable_disable,
1632 sizeof(ipa_offload_enable_disable));
1633 ipa_offload_enable_disable.offload_type = offload_type;
1634 ipa_offload_enable_disable.vdev_id = adapter->sessionId;
1635 ipa_offload_enable_disable.enable = enable;
1636
1637 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1638 "%s: offload_type=%d, vdev_id=%d, enable=%d", __func__,
1639 ipa_offload_enable_disable.offload_type,
1640 ipa_offload_enable_disable.vdev_id,
1641 ipa_offload_enable_disable.enable);
1642
1643 if (CDF_STATUS_SUCCESS !=
1644 sme_ipa_offload_enable_disable(WLAN_HDD_GET_HAL_CTX(adapter),
1645 adapter->sessionId, &ipa_offload_enable_disable)) {
1646 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1647 "%s: Failure to enable IPA offload \
1648 (offload_type=%d, vdev_id=%d, enable=%d)", __func__,
1649 ipa_offload_enable_disable.offload_type,
1650 ipa_offload_enable_disable.vdev_id,
1651 ipa_offload_enable_disable.enable);
1652 }
1653}
1654
1655/**
1656 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1657 * @work: uC OP work
1658 *
1659 * Return: None
1660 */
1661static void hdd_ipa_uc_fw_op_event_handler(struct work_struct *work)
1662{
1663 struct op_msg_type *msg;
1664 struct uc_op_work_struct *uc_op_work = container_of(work,
1665 struct uc_op_work_struct, work);
1666 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1667
1668 cds_ssr_protect(__func__);
1669
1670 msg = uc_op_work->msg;
1671 uc_op_work->msg = NULL;
1672 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1673 "%s, posted msg %d", __func__, msg->op_code);
1674
1675 hdd_ipa_uc_op_cb(msg, hdd_ipa->hdd_ctx);
1676
1677 cds_ssr_unprotect(__func__);
1678
1679 return;
1680}
1681
1682/**
1683 * hdd_ipa_uc_op_event_handler() - Adapter lookup
1684 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1685 * @op_msg: operation message received from firmware
1686 * @hdd_ctx: Global HDD context
1687 *
1688 * Return: None
1689 */
1690static void hdd_ipa_uc_op_event_handler(uint8_t *op_msg, void *hdd_ctx)
1691{
1692 struct hdd_ipa_priv *hdd_ipa;
1693 struct op_msg_type *msg;
1694 struct uc_op_work_struct *uc_op_work;
1695 CDF_STATUS status = CDF_STATUS_SUCCESS;
1696
1697 status = wlan_hdd_validate_context(hdd_ctx);
1698 if (0 != status) {
1699 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1700 goto end;
1701 }
1702
1703 msg = (struct op_msg_type *)op_msg;
1704 hdd_ipa = ((hdd_context_t *)hdd_ctx)->hdd_ipa;
1705
1706 if (unlikely(!hdd_ipa))
1707 goto end;
1708
1709 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1710 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s: Invalid OP Code (%d)",
1711 __func__, msg->op_code);
1712 goto end;
1713 }
1714
1715 uc_op_work = &hdd_ipa->uc_op_work[msg->op_code];
1716 if (uc_op_work->msg)
1717 /* When the same uC OPCODE is already pended, just return */
1718 goto end;
1719
1720 uc_op_work->msg = msg;
1721 schedule_work(&uc_op_work->work);
1722 return;
1723
1724end:
1725 cdf_mem_free(op_msg);
1726}
1727
1728/**
Rajeev Kumar217f2172016-01-06 18:11:55 -08001729 * hdd_ipa_init_uc_op_work - init ipa uc op work
1730 * @work: struct work_struct
1731 * @work_handler: work_handler
1732 *
1733 * Return: none
1734 */
1735#ifdef CONFIG_CNSS
1736static void hdd_ipa_init_uc_op_work(struct work_struct *work,
1737 work_func_t work_handler)
1738{
1739 cnss_init_work(work, work_handler);
1740}
1741#else
1742static void hdd_ipa_init_uc_op_work(struct work_struct *work,
1743 work_func_t work_handler)
1744{
1745 INIT_WORK(work, work_handler);
1746}
1747#endif
1748
1749
1750/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001751 * hdd_ipa_uc_ol_init() - Initialize IPA uC offload
1752 * @hdd_ctx: Global HDD context
1753 *
1754 * Return: CDF_STATUS
1755 */
1756static CDF_STATUS hdd_ipa_uc_ol_init(hdd_context_t *hdd_ctx)
1757{
1758 struct ipa_wdi_in_params pipe_in;
1759 struct ipa_wdi_out_params pipe_out;
1760 struct hdd_ipa_priv *ipa_ctxt = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1761 p_cds_contextType cds_ctx = hdd_ctx->pcds_context;
1762 uint8_t i;
1763
1764 cdf_mem_zero(&pipe_in, sizeof(struct ipa_wdi_in_params));
1765 cdf_mem_zero(&pipe_out, sizeof(struct ipa_wdi_out_params));
1766
1767 cdf_list_init(&ipa_ctxt->pending_event, 1000);
1768 cdf_mutex_init(&ipa_ctxt->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08001769 cdf_mutex_init(&ipa_ctxt->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001770
1771 /* TX PIPE */
1772 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1773 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
1774 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1775 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
1776 pipe_in.sys.ipa_ep_cfg.hdr.hdr_additional_const_len =
1777 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
1778 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1779 pipe_in.sys.client = IPA_CLIENT_WLAN1_CONS;
1780 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize;
1781 pipe_in.sys.priv = hdd_ctx->hdd_ipa;
1782 pipe_in.sys.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
1783 pipe_in.sys.notify = hdd_ipa_i2w_cb;
1784 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1785 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1786 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1787 pipe_in.sys.keep_ipa_awake = true;
1788 }
1789
Leo Chang3bc8fed2015-11-13 10:59:47 -08001790 pipe_in.u.dl.comp_ring_base_pa = ipa_ctxt->tx_comp_ring_base_paddr;
1791 pipe_in.u.dl.comp_ring_size =
1792 ipa_ctxt->tx_comp_ring_size * sizeof(cdf_dma_addr_t);
1793 pipe_in.u.dl.ce_ring_base_pa = ipa_ctxt->ce_sr_base_paddr;
1794 pipe_in.u.dl.ce_door_bell_pa = ipa_ctxt->ce_reg_paddr;
1795 pipe_in.u.dl.ce_ring_size = ipa_ctxt->ce_sr_ring_size;
1796 pipe_in.u.dl.num_tx_buffers = ipa_ctxt->tx_num_alloc_buffer;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001797
1798 /* Connect WDI IPA PIPE */
1799 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
1800 /* Micro Controller Doorbell register */
Leo Chang3bc8fed2015-11-13 10:59:47 -08001801 ipa_ctxt->tx_comp_doorbell_paddr = pipe_out.uc_door_bell_pa;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001802 /* WLAN TX PIPE Handle */
1803 ipa_ctxt->tx_pipe_handle = pipe_out.clnt_hdl;
1804 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1805 "TX : CRBPA 0x%x, CRS %d, CERBPA 0x%x, CEDPA 0x%x,"
1806 " CERZ %d, NB %d, CDBPAD 0x%x",
1807 (unsigned int)pipe_in.u.dl.comp_ring_base_pa,
1808 pipe_in.u.dl.comp_ring_size,
1809 (unsigned int)pipe_in.u.dl.ce_ring_base_pa,
1810 (unsigned int)pipe_in.u.dl.ce_door_bell_pa,
1811 pipe_in.u.dl.ce_ring_size,
1812 pipe_in.u.dl.num_tx_buffers,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001813 (unsigned int)ipa_ctxt->tx_comp_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001814
1815 /* RX PIPE */
1816 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1817 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_RX_HDR_LEN;
1818 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
1819 pipe_in.sys.ipa_ep_cfg.hdr.hdr_metadata_reg_valid = 1;
1820 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1821 pipe_in.sys.client = IPA_CLIENT_WLAN1_PROD;
1822 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize +
1823 sizeof(struct sps_iovec);
1824 pipe_in.sys.notify = hdd_ipa_w2i_cb;
1825 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1826 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1827 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1828 pipe_in.sys.keep_ipa_awake = true;
1829 }
1830
Leo Chang3bc8fed2015-11-13 10:59:47 -08001831 pipe_in.u.ul.rdy_ring_base_pa = ipa_ctxt->rx_rdy_ring_base_paddr;
1832 pipe_in.u.ul.rdy_ring_size = ipa_ctxt->rx_rdy_ring_size;
1833 pipe_in.u.ul.rdy_ring_rp_pa = ipa_ctxt->rx_proc_done_idx_paddr;
1834 HDD_IPA_WDI2_SET(pipe_in, ipa_ctxt);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001835 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
Leo Chang3bc8fed2015-11-13 10:59:47 -08001836 ipa_ctxt->rx_ready_doorbell_paddr = pipe_out.uc_door_bell_pa;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001837 ipa_ctxt->rx_pipe_handle = pipe_out.clnt_hdl;
1838 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1839 "RX : RRBPA 0x%x, RRS %d, PDIPA 0x%x, RDY_DB_PAD 0x%x",
1840 (unsigned int)pipe_in.u.ul.rdy_ring_base_pa,
1841 pipe_in.u.ul.rdy_ring_size,
1842 (unsigned int)pipe_in.u.ul.rdy_ring_rp_pa,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001843 (unsigned int)ipa_ctxt->rx_ready_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001844
1845 ol_txrx_ipa_uc_set_doorbell_paddr(cds_ctx->pdev_txrx_ctx,
Leo Chang3bc8fed2015-11-13 10:59:47 -08001846 ipa_ctxt->tx_comp_doorbell_paddr,
1847 ipa_ctxt->rx_ready_doorbell_paddr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001848
1849 ol_txrx_ipa_uc_register_op_cb(cds_ctx->pdev_txrx_ctx,
1850 hdd_ipa_uc_op_event_handler, (void *)hdd_ctx);
1851
1852 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
Rajeev Kumar217f2172016-01-06 18:11:55 -08001853 hdd_ipa_init_uc_op_work(&ipa_ctxt->uc_op_work[i].work,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001854 hdd_ipa_uc_fw_op_event_handler);
1855 ipa_ctxt->uc_op_work[i].msg = NULL;
1856 }
1857
1858 return CDF_STATUS_SUCCESS;
1859}
1860
Leo Change3e49442015-10-26 20:07:13 -07001861/**
1862 * hdd_ipa_uc_force_pipe_shutdown() - Force shutdown IPA pipe
1863 * @hdd_ctx: hdd main context
1864 *
1865 * Force shutdown IPA pipe
1866 * Independent of FW pipe status, IPA pipe shutdonw progress
1867 * in case, any STA does not leave properly, IPA HW pipe should cleaned up
1868 * independent from FW pipe status
1869 *
1870 * Return: NONE
1871 */
1872void hdd_ipa_uc_force_pipe_shutdown(hdd_context_t *hdd_ctx)
1873{
1874 struct hdd_ipa_priv *hdd_ipa;
1875
1876 if (!hdd_ipa_is_enabled(hdd_ctx) || !hdd_ctx->hdd_ipa)
1877 return;
1878
1879 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1880 if (false == hdd_ipa->ipa_pipes_down) {
1881 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1882 "IPA pipes are not down yet, force shutdown");
1883 hdd_ipa_uc_disable_pipes(hdd_ipa);
1884 } else {
1885 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1886 "IPA pipes are down, do nothing");
1887 }
1888
1889 return;
1890}
1891
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001892/**
1893 * hdd_ipa_uc_ssr_deinit() - handle ipa deinit for SSR
1894 *
1895 * Deinit basic IPA UC host side to be in sync reloaded FW during
1896 * SSR
1897 *
1898 * Return: 0 - Success
1899 */
1900int hdd_ipa_uc_ssr_deinit(void)
1901{
1902 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1903 int idx;
1904 struct hdd_ipa_iface_context *iface_context;
1905
Leo Chang3bc8fed2015-11-13 10:59:47 -08001906 if ((!hdd_ipa) || (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)))
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001907 return 0;
1908
1909 /* Clean up HDD IPA interfaces */
1910 for (idx = 0; (hdd_ipa->num_iface > 0) &&
1911 (idx < HDD_IPA_MAX_IFACE); idx++) {
1912 iface_context = &hdd_ipa->iface_context[idx];
1913 if (iface_context && iface_context->adapter)
1914 hdd_ipa_cleanup_iface(iface_context);
1915 }
1916
1917 /* After SSR, wlan driver reloads FW again. But we need to protect
1918 * IPA submodule during SSR transient state. So deinit basic IPA
1919 * UC host side to be in sync with reloaded FW during SSR
1920 */
Yun Parkf7dc8cd2015-11-17 15:25:12 -08001921 if (!hdd_ipa->ipa_pipes_down)
1922 hdd_ipa_uc_disable_pipes(hdd_ipa);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001923
Leo Chang3bc8fed2015-11-13 10:59:47 -08001924 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001925 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1926 hdd_ipa->assoc_stas_map[idx].is_reserved = false;
1927 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
1928 }
Leo Chang3bc8fed2015-11-13 10:59:47 -08001929 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001930
1931 /* Full IPA driver cleanup not required since wlan driver is now
1932 * unloaded and reloaded after SSR.
1933 */
1934 return 0;
1935}
1936
1937/**
1938 * hdd_ipa_uc_ssr_reinit() - handle ipa reinit after SSR
1939 *
1940 * Init basic IPA UC host side to be in sync with reloaded FW after
1941 * SSR to resume IPA UC operations
1942 *
1943 * Return: 0 - Success
1944 */
1945int hdd_ipa_uc_ssr_reinit(void)
1946{
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001947
1948 /* After SSR is complete, IPA UC can resume operation. But now wlan
1949 * driver will be unloaded and reloaded, which takes care of IPA cleanup
1950 * and initialization. This is a placeholder func if IPA has to resume
1951 * operations without driver reload.
1952 */
1953 return 0;
1954}
Leo Chang3bc8fed2015-11-13 10:59:47 -08001955
1956/**
1957 * hdd_ipa_tx_packet_ipa() - send packet to IPA
1958 * @hdd_ctx: Global HDD context
1959 * @skb: skb sent to IPA
1960 * @session_id: send packet instance session id
1961 *
1962 * Send TX packet which generated by system to IPA.
1963 * This routine only will be used for function verification
1964 *
1965 * Return: NULL packet sent to IPA properly
1966 * NULL invalid packet drop
1967 * skb packet not sent to IPA. legacy data path should handle
1968 */
1969struct sk_buff *hdd_ipa_tx_packet_ipa(hdd_context_t *hdd_ctx,
1970 struct sk_buff *skb, uint8_t session_id)
Leo Change3e49442015-10-26 20:07:13 -07001971{
Leo Chang3bc8fed2015-11-13 10:59:47 -08001972 struct ipa_header *ipa_header;
1973 struct frag_header *frag_header;
1974
1975 if (!hdd_ipa_uc_is_enabled(hdd_ctx))
1976 return skb;
1977
1978 ipa_header = (struct ipa_header *) skb_push(skb,
1979 sizeof(struct ipa_header));
1980 if (!ipa_header) {
1981 /* No headroom, legacy */
1982 return skb;
1983 }
1984 memset(ipa_header, 0, sizeof(*ipa_header));
1985 ipa_header->vdev_id = 0;
1986
1987 frag_header = (struct frag_header *) skb_push(skb,
1988 sizeof(struct frag_header));
1989 if (!frag_header) {
1990 /* No headroom, drop */
1991 kfree_skb(skb);
1992 return NULL;
1993 }
1994 memset(frag_header, 0, sizeof(*frag_header));
1995 frag_header->length = skb->len - sizeof(struct frag_header)
1996 - sizeof(struct ipa_header);
1997
1998 ipa_tx_dp(IPA_CLIENT_WLAN1_CONS, skb, NULL);
1999 return NULL;
Leo Change3e49442015-10-26 20:07:13 -07002000}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002001
2002/**
2003 * hdd_ipa_wake_lock_timer_func() - Wake lock work handler
2004 * @work: scheduled work
2005 *
2006 * When IPA resources are released in hdd_ipa_rm_try_release() we do
2007 * not want to immediately release the wake lock since the system
2008 * would then potentially try to suspend when there is a healthy data
2009 * rate. Deferred work is scheduled and this function handles the
2010 * work. When this function is called, if the IPA resource is still
2011 * released then we release the wake lock.
2012 *
2013 * Return: None
2014 */
2015static void hdd_ipa_wake_lock_timer_func(struct work_struct *work)
2016{
2017 struct hdd_ipa_priv *hdd_ipa = container_of(to_delayed_work(work),
2018 struct hdd_ipa_priv,
2019 wake_lock_work);
2020
2021 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2022
2023 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED)
2024 goto end;
2025
2026 hdd_ipa->wake_lock_released = true;
2027 cdf_wake_lock_release(&hdd_ipa->wake_lock,
2028 WIFI_POWER_EVENT_WAKELOCK_IPA);
2029
2030end:
2031 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2032}
2033
2034/**
2035 * hdd_ipa_rm_request() - Request resource from IPA
2036 * @hdd_ipa: Global HDD IPA context
2037 *
2038 * Return: 0 on success, negative errno on error
2039 */
2040static int hdd_ipa_rm_request(struct hdd_ipa_priv *hdd_ipa)
2041{
2042 int ret = 0;
2043
2044 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2045 return 0;
2046
2047 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2048
2049 switch (hdd_ipa->rm_state) {
2050 case HDD_IPA_RM_GRANTED:
2051 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2052 return 0;
2053 case HDD_IPA_RM_GRANT_PENDING:
2054 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2055 return -EINPROGRESS;
2056 case HDD_IPA_RM_RELEASED:
2057 hdd_ipa->rm_state = HDD_IPA_RM_GRANT_PENDING;
2058 break;
2059 }
2060
2061 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2062
2063 ret = ipa_rm_inactivity_timer_request_resource(
2064 IPA_RM_RESOURCE_WLAN_PROD);
2065
2066 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2067 if (ret == 0) {
2068 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2069 hdd_ipa->stats.num_rm_grant_imm++;
2070 }
2071
2072 cancel_delayed_work(&hdd_ipa->wake_lock_work);
2073 if (hdd_ipa->wake_lock_released) {
2074 cdf_wake_lock_acquire(&hdd_ipa->wake_lock,
2075 WIFI_POWER_EVENT_WAKELOCK_IPA);
2076 hdd_ipa->wake_lock_released = false;
2077 }
2078 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2079
2080 return ret;
2081}
2082
2083/**
2084 * hdd_ipa_rm_try_release() - Attempt to release IPA resource
2085 * @hdd_ipa: Global HDD IPA context
2086 *
2087 * Return: 0 if resources released, negative errno otherwise
2088 */
2089static int hdd_ipa_rm_try_release(struct hdd_ipa_priv *hdd_ipa)
2090{
2091 int ret = 0;
2092
2093 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2094 return 0;
2095
2096 if (atomic_read(&hdd_ipa->tx_ref_cnt))
2097 return -EAGAIN;
2098
2099 spin_lock_bh(&hdd_ipa->q_lock);
2100 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
2101 (hdd_ipa->pending_hw_desc_cnt || hdd_ipa->pend_q_cnt)) {
2102 spin_unlock_bh(&hdd_ipa->q_lock);
2103 return -EAGAIN;
2104 }
2105 spin_unlock_bh(&hdd_ipa->q_lock);
2106
2107 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2108
2109 if (!cdf_nbuf_is_queue_empty(&hdd_ipa->pm_queue_head)) {
2110 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2111 return -EAGAIN;
2112 }
2113 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2114
2115 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2116 switch (hdd_ipa->rm_state) {
2117 case HDD_IPA_RM_GRANTED:
2118 break;
2119 case HDD_IPA_RM_GRANT_PENDING:
2120 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2121 return -EINPROGRESS;
2122 case HDD_IPA_RM_RELEASED:
2123 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2124 return 0;
2125 }
2126
2127 /* IPA driver returns immediately so set the state here to avoid any
2128 * race condition.
2129 */
2130 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2131 hdd_ipa->stats.num_rm_release++;
2132 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2133
2134 ret =
2135 ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_WLAN_PROD);
2136
2137 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2138 if (unlikely(ret != 0)) {
2139 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2140 WARN_ON(1);
2141 }
2142
2143 /*
2144 * If wake_lock is released immediately, kernel would try to suspend
2145 * immediately as well, Just avoid ping-pong between suspend-resume
2146 * while there is healthy amount of data transfer going on by
2147 * releasing the wake_lock after some delay.
2148 */
2149 schedule_delayed_work(&hdd_ipa->wake_lock_work,
2150 msecs_to_jiffies
2151 (HDD_IPA_RX_INACTIVITY_MSEC_DELAY));
2152
2153 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2154
2155 return ret;
2156}
2157
2158/**
2159 * hdd_ipa_rm_notify() - IPA resource manager notifier callback
2160 * @user_data: user data registered with IPA
2161 * @event: the IPA resource manager event that occurred
2162 * @data: the data associated with the event
2163 *
2164 * Return: None
2165 */
2166static void hdd_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
2167 unsigned long data)
2168{
2169 struct hdd_ipa_priv *hdd_ipa = user_data;
2170
2171 if (unlikely(!hdd_ipa))
2172 return;
2173
2174 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2175 return;
2176
2177 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Evt: %d", event);
2178
2179 switch (event) {
2180 case IPA_RM_RESOURCE_GRANTED:
2181 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2182 /* RM Notification comes with ISR context
2183 * it should be serialized into work queue to avoid
2184 * ISR sleep problem
2185 */
2186 hdd_ipa->uc_rm_work.event = event;
2187 schedule_work(&hdd_ipa->uc_rm_work.work);
2188 break;
2189 }
2190 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2191 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2192 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2193 hdd_ipa->stats.num_rm_grant++;
2194 break;
2195
2196 case IPA_RM_RESOURCE_RELEASED:
2197 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "RM Release");
2198 hdd_ipa->resource_unloading = false;
2199 break;
2200
2201 default:
2202 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Unknown RM Evt: %d", event);
2203 break;
2204 }
2205}
2206
2207/**
2208 * hdd_ipa_rm_cons_release() - WLAN consumer resource release handler
2209 *
2210 * Callback function registered with IPA that is called when IPA wants
2211 * to release the WLAN consumer resource
2212 *
2213 * Return: 0 if the request is granted, negative errno otherwise
2214 */
2215static int hdd_ipa_rm_cons_release(void)
2216{
2217 return 0;
2218}
2219
2220/**
2221 * hdd_ipa_rm_cons_request() - WLAN consumer resource request handler
2222 *
2223 * Callback function registered with IPA that is called when IPA wants
2224 * to access the WLAN consumer resource
2225 *
2226 * Return: 0 if the request is granted, negative errno otherwise
2227 */
2228static int hdd_ipa_rm_cons_request(void)
2229{
Yun Park4d8b60a2015-10-22 13:59:32 -07002230 int ret = 0;
2231
2232 if (ghdd_ipa->resource_loading) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002233 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
Yun Park4d8b60a2015-10-22 13:59:32 -07002234 "%s: IPA resource loading in progress",
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002235 __func__);
2236 ghdd_ipa->pending_cons_req = true;
Yun Park4d8b60a2015-10-22 13:59:32 -07002237 ret = -EINPROGRESS;
2238 } else if (ghdd_ipa->resource_unloading) {
2239 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
2240 "%s: IPA resource unloading in progress",
2241 __func__);
2242 ghdd_ipa->pending_cons_req = true;
2243 ret = -EPERM;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002244 }
Yun Park4d8b60a2015-10-22 13:59:32 -07002245
2246 return ret;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002247}
2248
2249/**
2250 * hdd_ipa_set_perf_level() - Set IPA performance level
2251 * @hdd_ctx: Global HDD context
2252 * @tx_packets: Number of packets transmitted in the last sample period
2253 * @rx_packets: Number of packets received in the last sample period
2254 *
2255 * Return: 0 on success, negative errno on error
2256 */
2257int hdd_ipa_set_perf_level(hdd_context_t *hdd_ctx, uint64_t tx_packets,
2258 uint64_t rx_packets)
2259{
2260 uint32_t next_cons_bw, next_prod_bw;
2261 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2262 struct ipa_rm_perf_profile profile;
2263 int ret;
2264
2265 if ((!hdd_ipa_is_enabled(hdd_ctx)) ||
2266 (!hdd_ipa_is_clk_scaling_enabled(hdd_ctx)))
2267 return 0;
2268
2269 memset(&profile, 0, sizeof(profile));
2270
2271 if (tx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2272 next_cons_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2273 else if (tx_packets >
2274 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2275 next_cons_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2276 else
2277 next_cons_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2278
2279 if (rx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2280 next_prod_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2281 else if (rx_packets >
2282 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2283 next_prod_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2284 else
2285 next_prod_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2286
2287 HDD_IPA_LOG(LOG1,
2288 "CONS perf curr: %d, next: %d",
2289 hdd_ipa->curr_cons_bw, next_cons_bw);
2290 HDD_IPA_LOG(LOG1,
2291 "PROD perf curr: %d, next: %d",
2292 hdd_ipa->curr_prod_bw, next_prod_bw);
2293
2294 if (hdd_ipa->curr_cons_bw != next_cons_bw) {
2295 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2296 "Requesting CONS perf curr: %d, next: %d",
2297 hdd_ipa->curr_cons_bw, next_cons_bw);
2298 profile.max_supported_bandwidth_mbps = next_cons_bw;
2299 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_CONS,
2300 &profile);
2301 if (ret) {
2302 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2303 "RM CONS set perf profile failed: %d", ret);
2304
2305 return ret;
2306 }
2307 hdd_ipa->curr_cons_bw = next_cons_bw;
2308 hdd_ipa->stats.num_cons_perf_req++;
2309 }
2310
2311 if (hdd_ipa->curr_prod_bw != next_prod_bw) {
2312 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2313 "Requesting PROD perf curr: %d, next: %d",
2314 hdd_ipa->curr_prod_bw, next_prod_bw);
2315 profile.max_supported_bandwidth_mbps = next_prod_bw;
2316 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_PROD,
2317 &profile);
2318 if (ret) {
2319 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2320 "RM PROD set perf profile failed: %d", ret);
2321 return ret;
2322 }
2323 hdd_ipa->curr_prod_bw = next_prod_bw;
2324 hdd_ipa->stats.num_prod_perf_req++;
2325 }
2326
2327 return 0;
2328}
2329
2330/**
Rajeev Kumar217f2172016-01-06 18:11:55 -08002331 * hdd_ipa_init_uc_rm_work - init ipa uc resource manager work
2332 * @work: struct work_struct
2333 * @work_handler: work_handler
2334 *
2335 * Return: none
2336 */
2337#ifdef CONFIG_CNSS
2338static void hdd_ipa_init_uc_rm_work(struct work_struct *work,
2339 work_func_t work_handler)
2340{
2341 cnss_init_work(work, work_handler);
2342}
2343#else
2344static void hdd_ipa_init_uc_rm_work(struct work_struct *work,
2345 work_func_t work_handler)
2346{
2347 INIT_WORK(work, work_handler);
2348}
2349#endif
2350
2351/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002352 * hdd_ipa_setup_rm() - Setup IPA resource management
2353 * @hdd_ipa: Global HDD IPA context
2354 *
2355 * Return: 0 on success, negative errno on error
2356 */
2357static int hdd_ipa_setup_rm(struct hdd_ipa_priv *hdd_ipa)
2358{
2359 struct ipa_rm_create_params create_params = { 0 };
2360 int ret;
2361
2362 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2363 return 0;
2364
Rajeev Kumar217f2172016-01-06 18:11:55 -08002365 hdd_ipa_init_uc_rm_work(&hdd_ipa->uc_rm_work.work,
2366 hdd_ipa_uc_rm_notify_defer);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002367 memset(&create_params, 0, sizeof(create_params));
2368 create_params.name = IPA_RM_RESOURCE_WLAN_PROD;
2369 create_params.reg_params.user_data = hdd_ipa;
2370 create_params.reg_params.notify_cb = hdd_ipa_rm_notify;
2371 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2372
2373 ret = ipa_rm_create_resource(&create_params);
2374 if (ret) {
2375 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2376 "Create RM resource failed: %d", ret);
2377 goto setup_rm_fail;
2378 }
2379
2380 memset(&create_params, 0, sizeof(create_params));
2381 create_params.name = IPA_RM_RESOURCE_WLAN_CONS;
2382 create_params.request_resource = hdd_ipa_rm_cons_request;
2383 create_params.release_resource = hdd_ipa_rm_cons_release;
2384 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2385
2386 ret = ipa_rm_create_resource(&create_params);
2387 if (ret) {
2388 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2389 "Create RM CONS resource failed: %d", ret);
2390 goto delete_prod;
2391 }
2392
2393 ipa_rm_add_dependency(IPA_RM_RESOURCE_WLAN_PROD,
2394 IPA_RM_RESOURCE_APPS_CONS);
2395
2396 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WLAN_PROD,
2397 HDD_IPA_RX_INACTIVITY_MSEC_DELAY);
2398 if (ret) {
2399 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Timer init failed: %d",
2400 ret);
2401 goto timer_init_failed;
2402 }
2403
2404 /* Set the lowest bandwidth to start with */
2405 ret = hdd_ipa_set_perf_level(hdd_ipa->hdd_ctx, 0, 0);
2406
2407 if (ret) {
2408 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2409 "Set perf level failed: %d", ret);
2410 goto set_perf_failed;
2411 }
2412
2413 cdf_wake_lock_init(&hdd_ipa->wake_lock, "wlan_ipa");
2414#ifdef CONFIG_CNSS
2415 cnss_init_delayed_work(&hdd_ipa->wake_lock_work,
2416 hdd_ipa_wake_lock_timer_func);
2417#else
2418 INIT_DELAYED_WORK(&hdd_ipa->wake_lock_work,
2419 hdd_ipa_wake_lock_timer_func);
2420#endif
2421 cdf_spinlock_init(&hdd_ipa->rm_lock);
2422 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2423 hdd_ipa->wake_lock_released = true;
2424 atomic_set(&hdd_ipa->tx_ref_cnt, 0);
2425
2426 return ret;
2427
2428set_perf_failed:
2429 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2430
2431timer_init_failed:
2432 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2433
2434delete_prod:
2435 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2436
2437setup_rm_fail:
2438 return ret;
2439}
2440
2441/**
2442 * hdd_ipa_destroy_rm_resource() - Destroy IPA resources
2443 * @hdd_ipa: Global HDD IPA context
2444 *
2445 * Destroys all resources associated with the IPA resource manager
2446 *
2447 * Return: None
2448 */
2449static void hdd_ipa_destroy_rm_resource(struct hdd_ipa_priv *hdd_ipa)
2450{
2451 int ret;
2452
2453 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2454 return;
2455
2456 cancel_delayed_work_sync(&hdd_ipa->wake_lock_work);
2457 cdf_wake_lock_destroy(&hdd_ipa->wake_lock);
2458
2459#ifdef WLAN_OPEN_SOURCE
2460 cancel_work_sync(&hdd_ipa->uc_rm_work.work);
2461#endif
2462 cdf_spinlock_destroy(&hdd_ipa->rm_lock);
2463
2464 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2465
2466 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2467 if (ret)
2468 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2469 "RM PROD resource delete failed %d", ret);
2470
2471 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2472 if (ret)
2473 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2474 "RM CONS resource delete failed %d", ret);
2475}
2476
2477/**
2478 * hdd_ipa_send_skb_to_network() - Send skb to kernel
2479 * @skb: network buffer
2480 * @adapter: network adapter
2481 *
2482 * Called when a network buffer is received which should not be routed
2483 * to the IPA module.
2484 *
2485 * Return: None
2486 */
2487static void hdd_ipa_send_skb_to_network(cdf_nbuf_t skb,
2488 hdd_adapter_t *adapter)
2489{
2490 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2491 unsigned int cpu_index;
2492
2493 if (!adapter || adapter->magic != WLAN_HDD_ADAPTER_MAGIC) {
2494 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_LOW, "Invalid adapter: 0x%p",
2495 adapter);
2496 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2497 cdf_nbuf_free(skb);
2498 return;
2499 }
2500
Prashanth Bhatta9e143052015-12-04 11:56:47 -08002501 if (cds_is_driver_unloading()) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002502 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2503 cdf_nbuf_free(skb);
2504 return;
2505 }
2506
2507 skb->destructor = hdd_ipa_uc_rt_debug_destructor;
2508 skb->dev = adapter->dev;
2509 skb->protocol = eth_type_trans(skb, skb->dev);
2510 skb->ip_summed = CHECKSUM_NONE;
2511
2512 cpu_index = wlan_hdd_get_cpu();
2513
2514 ++adapter->hdd_stats.hddTxRxStats.rxPackets[cpu_index];
2515 if (netif_rx_ni(skb) == NET_RX_SUCCESS)
2516 ++adapter->hdd_stats.hddTxRxStats.rxDelivered[cpu_index];
2517 else
2518 ++adapter->hdd_stats.hddTxRxStats.rxRefused[cpu_index];
2519
2520 HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa);
2521 adapter->dev->last_rx = jiffies;
2522}
2523
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002524/**
2525 * hdd_ipa_w2i_cb() - WLAN to IPA callback handler
2526 * @priv: pointer to private data registered with IPA (we register a
2527 * pointer to the global IPA context)
2528 * @evt: the IPA event which triggered the callback
2529 * @data: data associated with the event
2530 *
2531 * Return: None
2532 */
2533static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
2534 unsigned long data)
2535{
2536 struct hdd_ipa_priv *hdd_ipa = NULL;
2537 hdd_adapter_t *adapter = NULL;
2538 cdf_nbuf_t skb;
2539 uint8_t iface_id;
2540 uint8_t session_id;
2541 struct hdd_ipa_iface_context *iface_context;
2542 cdf_nbuf_t copy;
2543 uint8_t fw_desc;
2544 int ret;
2545
2546 hdd_ipa = (struct hdd_ipa_priv *)priv;
2547
2548 switch (evt) {
2549 case IPA_RECEIVE:
2550 skb = (cdf_nbuf_t) data;
2551 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2552 session_id = (uint8_t)skb->cb[0];
2553 iface_id = vdev_to_iface[session_id];
2554 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2555 "IPA_RECEIVE: session_id=%u, iface_id=%u",
2556 session_id, iface_id);
2557 } else {
2558 iface_id = HDD_IPA_GET_IFACE_ID(skb->data);
2559 }
2560
2561 if (iface_id >= HDD_IPA_MAX_IFACE) {
2562 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2563 "IPA_RECEIVE: Invalid iface_id: %u",
2564 iface_id);
2565 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_INFO_HIGH,
2566 "w2i -- skb", skb->data, 8);
2567 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2568 cdf_nbuf_free(skb);
2569 return;
2570 }
2571
2572 iface_context = &hdd_ipa->iface_context[iface_id];
2573 adapter = iface_context->adapter;
2574
2575 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG,
2576 "w2i -- skb", skb->data, 8);
2577 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2578 hdd_ipa->stats.num_rx_excep++;
2579 skb_pull(skb, HDD_IPA_UC_WLAN_CLD_HDR_LEN);
2580 } else {
2581 skb_pull(skb, HDD_IPA_WLAN_CLD_HDR_LEN);
2582 }
2583
2584 iface_context->stats.num_rx_ipa_excep++;
2585
2586 /* Disable to forward Intra-BSS Rx packets when
2587 * ap_isolate=1 in hostapd.conf
2588 */
2589 if (adapter->sessionCtx.ap.apDisableIntraBssFwd) {
2590 /*
2591 * When INTRA_BSS_FWD_OFFLOAD is enabled, FW will send
2592 * all Rx packets to IPA uC, which need to be forwarded
2593 * to other interface.
2594 * And, IPA driver will send back to WLAN host driver
2595 * through exception pipe with fw_desc field set by FW.
2596 * Here we are checking fw_desc field for FORWARD bit
2597 * set, and forward to Tx. Then copy to kernel stack
2598 * only when DISCARD bit is not set.
2599 */
2600 fw_desc = (uint8_t)skb->cb[1];
2601
Leo Chang3bc8fed2015-11-13 10:59:47 -08002602 if (fw_desc & HDD_IPA_FW_RX_DESC_FORWARD_M) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002603 HDD_IPA_LOG(
2604 CDF_TRACE_LEVEL_DEBUG,
2605 "Forward packet to Tx (fw_desc=%d)",
2606 fw_desc);
2607 copy = cdf_nbuf_copy(skb);
2608 if (copy) {
2609 hdd_ipa->ipa_tx_forward++;
2610 ret = hdd_softap_hard_start_xmit(
2611 (struct sk_buff *)copy,
2612 adapter->dev);
2613 if (ret) {
2614 HDD_IPA_LOG(
2615 CDF_TRACE_LEVEL_DEBUG,
2616 "Forward packet tx fail");
2617 hdd_ipa->stats.
2618 num_tx_bcmc_err++;
2619 } else {
2620 hdd_ipa->stats.num_tx_bcmc++;
2621 }
2622 }
2623 }
Mahesh Kumar Kalikot Veetil221dc672015-11-06 14:27:28 -08002624
Leo Chang3bc8fed2015-11-13 10:59:47 -08002625 if (fw_desc & HDD_IPA_FW_RX_DESC_DISCARD_M) {
Mahesh Kumar Kalikot Veetil221dc672015-11-06 14:27:28 -08002626 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2627 hdd_ipa->ipa_rx_discard++;
2628 cdf_nbuf_free(skb);
2629 break;
2630 }
2631
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002632 } else {
2633 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2634 "Intra-BSS FWD is disabled-skip forward to Tx");
2635 }
2636
2637 hdd_ipa_send_skb_to_network(skb, adapter);
2638 break;
2639
2640 default:
2641 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2642 "w2i cb wrong event: 0x%x", evt);
2643 return;
2644 }
2645}
2646
2647/**
2648 * hdd_ipa_nbuf_cb() - IPA TX complete callback
2649 * @skb: packet buffer which was transmitted
2650 *
2651 * Return: None
2652 */
2653static void hdd_ipa_nbuf_cb(cdf_nbuf_t skb)
2654{
2655 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2656
2657 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "%lx", NBUF_OWNER_PRIV_DATA(skb));
2658 ipa_free_skb((struct ipa_rx_data *)NBUF_OWNER_PRIV_DATA(skb));
2659
2660 hdd_ipa->stats.num_tx_comp_cnt++;
2661
2662 atomic_dec(&hdd_ipa->tx_ref_cnt);
2663
2664 hdd_ipa_rm_try_release(hdd_ipa);
2665}
2666
2667/**
2668 * hdd_ipa_send_pkt_to_tl() - Send an IPA packet to TL
2669 * @iface_context: interface-specific IPA context
2670 * @ipa_tx_desc: packet data descriptor
2671 *
2672 * Return: None
2673 */
2674static void hdd_ipa_send_pkt_to_tl(
2675 struct hdd_ipa_iface_context *iface_context,
2676 struct ipa_rx_data *ipa_tx_desc)
2677{
2678 struct hdd_ipa_priv *hdd_ipa = iface_context->hdd_ipa;
2679 uint8_t interface_id;
2680 hdd_adapter_t *adapter = NULL;
2681 cdf_nbuf_t skb;
2682
2683 cdf_spin_lock_bh(&iface_context->interface_lock);
2684 adapter = iface_context->adapter;
2685 if (!adapter) {
2686 HDD_IPA_LOG(CDF_TRACE_LEVEL_WARN, "Interface Down");
2687 ipa_free_skb(ipa_tx_desc);
2688 iface_context->stats.num_tx_drop++;
2689 cdf_spin_unlock_bh(&iface_context->interface_lock);
2690 hdd_ipa_rm_try_release(hdd_ipa);
2691 return;
2692 }
2693
2694 /*
2695 * During CAC period, data packets shouldn't be sent over the air so
2696 * drop all the packets here
2697 */
2698 if (WLAN_HDD_GET_AP_CTX_PTR(adapter)->dfs_cac_block_tx) {
2699 ipa_free_skb(ipa_tx_desc);
2700 cdf_spin_unlock_bh(&iface_context->interface_lock);
2701 iface_context->stats.num_tx_cac_drop++;
2702 hdd_ipa_rm_try_release(hdd_ipa);
2703 return;
2704 }
2705
2706 interface_id = adapter->sessionId;
2707 ++adapter->stats.tx_packets;
2708
2709 cdf_spin_unlock_bh(&iface_context->interface_lock);
2710
2711 skb = ipa_tx_desc->skb;
2712
2713 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2714 NBUF_OWNER_ID(skb) = IPA_NBUF_OWNER_ID;
2715 NBUF_CALLBACK_FN(skb) = hdd_ipa_nbuf_cb;
2716 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2717 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr
2718 + HDD_IPA_WLAN_FRAG_HEADER
2719 + HDD_IPA_WLAN_IPA_HEADER;
2720 ipa_tx_desc->skb->len -=
2721 HDD_IPA_WLAN_FRAG_HEADER + HDD_IPA_WLAN_IPA_HEADER;
2722 } else
2723 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr;
2724
2725 NBUF_OWNER_PRIV_DATA(skb) = (unsigned long)ipa_tx_desc;
2726
2727 adapter->stats.tx_bytes += ipa_tx_desc->skb->len;
2728
2729 skb = ol_tx_send_ipa_data_frame(iface_context->tl_context,
2730 ipa_tx_desc->skb);
2731 if (skb) {
2732 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "TLSHIM tx fail");
2733 ipa_free_skb(ipa_tx_desc);
2734 iface_context->stats.num_tx_err++;
2735 hdd_ipa_rm_try_release(hdd_ipa);
2736 return;
2737 }
2738
2739 atomic_inc(&hdd_ipa->tx_ref_cnt);
2740
2741 iface_context->stats.num_tx++;
2742
2743}
2744
2745/**
2746 * hdd_ipa_pm_send_pkt_to_tl() - Send queued packets to TL
2747 * @work: pointer to the scheduled work
2748 *
2749 * Called during PM resume to send packets to TL which were queued
2750 * while host was in the process of suspending.
2751 *
2752 * Return: None
2753 */
2754static void hdd_ipa_pm_send_pkt_to_tl(struct work_struct *work)
2755{
2756 struct hdd_ipa_priv *hdd_ipa = container_of(work,
2757 struct hdd_ipa_priv,
2758 pm_work);
2759 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2760 cdf_nbuf_t skb;
2761 uint32_t dequeued = 0;
2762
2763 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2764
2765 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
2766 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2767
2768 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2769
2770 dequeued++;
2771
2772 hdd_ipa_send_pkt_to_tl(pm_tx_cb->iface_context,
2773 pm_tx_cb->ipa_tx_desc);
2774
2775 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2776 }
2777
2778 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2779
2780 hdd_ipa->stats.num_tx_dequeued += dequeued;
2781 if (dequeued > hdd_ipa->stats.num_max_pm_queue)
2782 hdd_ipa->stats.num_max_pm_queue = dequeued;
2783}
2784
2785/**
2786 * hdd_ipa_i2w_cb() - IPA to WLAN callback
2787 * @priv: pointer to private data registered with IPA (we register a
2788 * pointer to the interface-specific IPA context)
2789 * @evt: the IPA event which triggered the callback
2790 * @data: data associated with the event
2791 *
2792 * Return: None
2793 */
2794static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
2795 unsigned long data)
2796{
2797 struct hdd_ipa_priv *hdd_ipa = NULL;
2798 struct ipa_rx_data *ipa_tx_desc;
2799 struct hdd_ipa_iface_context *iface_context;
2800 cdf_nbuf_t skb;
2801 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2802 CDF_STATUS status = CDF_STATUS_SUCCESS;
2803
Mukul Sharma81661ae2015-10-30 20:26:02 +05302804 iface_context = (struct hdd_ipa_iface_context *)priv;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002805 if (evt != IPA_RECEIVE) {
2806 skb = (cdf_nbuf_t) data;
2807 dev_kfree_skb_any(skb);
2808 iface_context->stats.num_tx_drop++;
2809 return;
2810 }
2811
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002812 ipa_tx_desc = (struct ipa_rx_data *)data;
2813
2814 hdd_ipa = iface_context->hdd_ipa;
2815
2816 /*
2817 * When SSR is going on or driver is unloading, just drop the packets.
2818 * During SSR, there is no use in queueing the packets as STA has to
2819 * connect back any way
2820 */
2821 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
2822 if (0 != status) {
2823 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
2824 ipa_free_skb(ipa_tx_desc);
2825 iface_context->stats.num_tx_drop++;
2826 return;
2827 }
2828
2829 skb = ipa_tx_desc->skb;
2830
2831 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG, "i2w", skb->data, 8);
2832
2833 /*
2834 * If PROD resource is not requested here then there may be cases where
2835 * IPA hardware may be clocked down because of not having proper
2836 * dependency graph between WLAN CONS and modem PROD pipes. Adding the
2837 * workaround to request PROD resource while data is going over CONS
2838 * pipe to prevent the IPA hardware clockdown.
2839 */
2840 hdd_ipa_rm_request(hdd_ipa);
2841
2842 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2843 /*
2844 * If host is still suspended then queue the packets and these will be
2845 * drained later when resume completes. When packet is arrived here and
2846 * host is suspended, this means that there is already resume is in
2847 * progress.
2848 */
2849 if (hdd_ipa->suspended) {
2850 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2851 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2852 pm_tx_cb->iface_context = iface_context;
2853 pm_tx_cb->ipa_tx_desc = ipa_tx_desc;
2854 cdf_nbuf_queue_add(&hdd_ipa->pm_queue_head, skb);
2855 hdd_ipa->stats.num_tx_queued++;
2856
2857 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2858 return;
2859 }
2860
2861 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2862
2863 /*
2864 * If we are here means, host is not suspended, wait for the work queue
2865 * to finish.
2866 */
2867#ifdef WLAN_OPEN_SOURCE
2868 flush_work(&hdd_ipa->pm_work);
2869#endif
2870
2871 return hdd_ipa_send_pkt_to_tl(iface_context, ipa_tx_desc);
2872}
2873
2874/**
2875 * hdd_ipa_suspend() - Suspend IPA
2876 * @hdd_ctx: Global HDD context
2877 *
2878 * Return: 0 on success, negativer errno on error
2879 */
2880int hdd_ipa_suspend(hdd_context_t *hdd_ctx)
2881{
2882 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2883
2884 if (!hdd_ipa_is_enabled(hdd_ctx))
2885 return 0;
2886
2887 /*
2888 * Check if IPA is ready for suspend, If we are here means, there is
2889 * high chance that suspend would go through but just to avoid any race
2890 * condition after suspend started, these checks are conducted before
2891 * allowing to suspend.
2892 */
2893 if (atomic_read(&hdd_ipa->tx_ref_cnt))
2894 return -EAGAIN;
2895
2896 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2897
2898 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED) {
2899 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2900 return -EAGAIN;
2901 }
2902 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2903
2904 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2905 hdd_ipa->suspended = true;
2906 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2907
2908 return 0;
2909}
2910
2911/**
2912 * hdd_ipa_resume() - Resume IPA following suspend
2913 * hdd_ctx: Global HDD context
2914 *
2915 * Return: 0 on success, negative errno on error
2916 */
2917int hdd_ipa_resume(hdd_context_t *hdd_ctx)
2918{
2919 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2920
2921 if (!hdd_ipa_is_enabled(hdd_ctx))
2922 return 0;
2923
2924 schedule_work(&hdd_ipa->pm_work);
2925
2926 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2927 hdd_ipa->suspended = false;
2928 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2929
2930 return 0;
2931}
2932
2933/**
2934 * hdd_ipa_setup_sys_pipe() - Setup all IPA Sys pipes
2935 * @hdd_ipa: Global HDD IPA context
2936 *
2937 * Return: 0 on success, negative errno on error
2938 */
2939static int hdd_ipa_setup_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
2940{
2941 int i, ret = 0;
2942 struct ipa_sys_connect_params *ipa;
2943 uint32_t desc_fifo_sz;
2944
2945 /* The maximum number of descriptors that can be provided to a BAM at
2946 * once is one less than the total number of descriptors that the buffer
2947 * can contain.
2948 * If max_num_of_descriptors = (BAM_PIPE_DESCRIPTOR_FIFO_SIZE / sizeof
2949 * (SPS_DESCRIPTOR)), then (max_num_of_descriptors - 1) descriptors can
2950 * be provided at once.
2951 * Because of above requirement, one extra descriptor will be added to
2952 * make sure hardware always has one descriptor.
2953 */
2954 desc_fifo_sz = hdd_ipa->hdd_ctx->config->IpaDescSize
2955 + sizeof(struct sps_iovec);
2956
2957 /*setup TX pipes */
2958 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
2959 ipa = &hdd_ipa->sys_pipe[i].ipa_sys_params;
2960
2961 ipa->client = hdd_ipa_adapter_2_client[i].cons_client;
2962 ipa->desc_fifo_sz = desc_fifo_sz;
2963 ipa->priv = &hdd_ipa->iface_context[i];
2964 ipa->notify = hdd_ipa_i2w_cb;
2965
2966 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2967 ipa->ipa_ep_cfg.hdr.hdr_len =
2968 HDD_IPA_UC_WLAN_TX_HDR_LEN;
2969 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
2970 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
2971 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
2972 ipa->ipa_ep_cfg.hdr.hdr_additional_const_len =
2973 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
2974 ipa->ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
2975 } else {
2976 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
2977 }
2978 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
2979
2980 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2981 ipa->keep_ipa_awake = 1;
2982
2983 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
2984 if (ret) {
2985 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed for pipe %d"
2986 " ret: %d", i, ret);
2987 goto setup_sys_pipe_fail;
2988 }
2989 hdd_ipa->sys_pipe[i].conn_hdl_valid = 1;
2990 }
2991
2992 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2993 /*
2994 * Hard code it here, this can be extended if in case
2995 * PROD pipe is also per interface.
2996 * Right now there is no advantage of doing this.
2997 */
2998 hdd_ipa->prod_client = IPA_CLIENT_WLAN1_PROD;
2999
3000 ipa = &hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].ipa_sys_params;
3001
3002 ipa->client = hdd_ipa->prod_client;
3003
3004 ipa->desc_fifo_sz = desc_fifo_sz;
3005 ipa->priv = hdd_ipa;
3006 ipa->notify = hdd_ipa_w2i_cb;
3007
3008 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
3009 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_RX_HDR_LEN;
3010 ipa->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
3011 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
3012
3013 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
3014 ipa->keep_ipa_awake = 1;
3015
3016 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
3017 if (ret) {
3018 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3019 "Failed for RX pipe: %d", ret);
3020 goto setup_sys_pipe_fail;
3021 }
3022 hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].conn_hdl_valid = 1;
3023 }
3024
3025 return ret;
3026
3027setup_sys_pipe_fail:
3028
3029 while (--i >= 0) {
3030 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].conn_hdl);
3031 cdf_mem_zero(&hdd_ipa->sys_pipe[i],
3032 sizeof(struct hdd_ipa_sys_pipe));
3033 }
3034
3035 return ret;
3036}
3037
3038/**
3039 * hdd_ipa_teardown_sys_pipe() - Tear down all IPA Sys pipes
3040 * @hdd_ipa: Global HDD IPA context
3041 *
3042 * Return: None
3043 */
3044static void hdd_ipa_teardown_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
3045{
3046 int ret = 0, i;
3047 for (i = 0; i < HDD_IPA_MAX_SYSBAM_PIPE; i++) {
3048 if (hdd_ipa->sys_pipe[i].conn_hdl_valid) {
3049 ret =
3050 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].
3051 conn_hdl);
3052 if (ret)
3053 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed: %d",
3054 ret);
3055
3056 hdd_ipa->sys_pipe[i].conn_hdl_valid = 0;
3057 }
3058 }
3059}
3060
3061/**
3062 * hdd_ipa_register_interface() - register IPA interface
3063 * @hdd_ipa: Global IPA context
3064 * @iface_context: Per-interface IPA context
3065 *
3066 * Return: 0 on success, negative errno on error
3067 */
3068static int hdd_ipa_register_interface(struct hdd_ipa_priv *hdd_ipa,
3069 struct hdd_ipa_iface_context
3070 *iface_context)
3071{
3072 struct ipa_tx_intf tx_intf;
3073 struct ipa_rx_intf rx_intf;
3074 struct ipa_ioc_tx_intf_prop *tx_prop = NULL;
3075 struct ipa_ioc_rx_intf_prop *rx_prop = NULL;
3076 char *ifname = iface_context->adapter->dev->name;
3077
3078 char ipv4_hdr_name[IPA_RESOURCE_NAME_MAX];
3079 char ipv6_hdr_name[IPA_RESOURCE_NAME_MAX];
3080
3081 int num_prop = 1;
3082 int ret = 0;
3083
3084 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx))
3085 num_prop++;
3086
3087 /* Allocate TX properties for TOS categories, 1 each for IPv4 & IPv6 */
3088 tx_prop =
3089 cdf_mem_malloc(sizeof(struct ipa_ioc_tx_intf_prop) * num_prop);
3090 if (!tx_prop) {
3091 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "tx_prop allocation failed");
3092 goto register_interface_fail;
3093 }
3094
3095 /* Allocate RX properties, 1 each for IPv4 & IPv6 */
3096 rx_prop =
3097 cdf_mem_malloc(sizeof(struct ipa_ioc_rx_intf_prop) * num_prop);
3098 if (!rx_prop) {
3099 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "rx_prop allocation failed");
3100 goto register_interface_fail;
3101 }
3102
3103 cdf_mem_zero(&tx_intf, sizeof(tx_intf));
3104 cdf_mem_zero(&rx_intf, sizeof(rx_intf));
3105
3106 snprintf(ipv4_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
3107 ifname, HDD_IPA_IPV4_NAME_EXT);
3108 snprintf(ipv6_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
3109 ifname, HDD_IPA_IPV6_NAME_EXT);
3110
3111 rx_prop[IPA_IP_v4].ip = IPA_IP_v4;
3112 rx_prop[IPA_IP_v4].src_pipe = iface_context->prod_client;
3113 rx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3114 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
3115
3116 /*
3117 * Interface ID is 3rd byte in the CLD header. Add the meta data and
3118 * mask to identify the interface in IPA hardware
3119 */
3120 rx_prop[IPA_IP_v4].attrib.meta_data =
3121 htonl(iface_context->adapter->sessionId << 16);
3122 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
3123
3124 rx_intf.num_props++;
3125 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3126 rx_prop[IPA_IP_v6].ip = IPA_IP_v6;
3127 rx_prop[IPA_IP_v6].src_pipe = iface_context->prod_client;
3128 rx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3129 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
3130 rx_prop[IPA_IP_v4].attrib.meta_data =
3131 htonl(iface_context->adapter->sessionId << 16);
3132 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
3133
3134 rx_intf.num_props++;
3135 }
3136
3137 tx_prop[IPA_IP_v4].ip = IPA_IP_v4;
3138 tx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3139 tx_prop[IPA_IP_v4].dst_pipe = IPA_CLIENT_WLAN1_CONS;
3140 tx_prop[IPA_IP_v4].alt_dst_pipe = iface_context->cons_client;
3141 strlcpy(tx_prop[IPA_IP_v4].hdr_name, ipv4_hdr_name,
3142 IPA_RESOURCE_NAME_MAX);
3143 tx_intf.num_props++;
3144
3145 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3146 tx_prop[IPA_IP_v6].ip = IPA_IP_v6;
3147 tx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
3148 tx_prop[IPA_IP_v6].dst_pipe = IPA_CLIENT_WLAN1_CONS;
3149 tx_prop[IPA_IP_v6].alt_dst_pipe = iface_context->cons_client;
3150 strlcpy(tx_prop[IPA_IP_v6].hdr_name, ipv6_hdr_name,
3151 IPA_RESOURCE_NAME_MAX);
3152 tx_intf.num_props++;
3153 }
3154
3155 tx_intf.prop = tx_prop;
3156 rx_intf.prop = rx_prop;
3157
3158 /* Call the ipa api to register interface */
3159 ret = ipa_register_intf(ifname, &tx_intf, &rx_intf);
3160
3161register_interface_fail:
3162 cdf_mem_free(tx_prop);
3163 cdf_mem_free(rx_prop);
3164 return ret;
3165}
3166
3167/**
3168 * hdd_remove_ipa_header() - Remove a specific header from IPA
3169 * @name: Name of the header to be removed
3170 *
3171 * Return: None
3172 */
3173static void hdd_ipa_remove_header(char *name)
3174{
3175 struct ipa_ioc_get_hdr hdrlookup;
3176 int ret = 0, len;
3177 struct ipa_ioc_del_hdr *ipa_hdr;
3178
3179 cdf_mem_zero(&hdrlookup, sizeof(hdrlookup));
3180 strlcpy(hdrlookup.name, name, sizeof(hdrlookup.name));
3181 ret = ipa_get_hdr(&hdrlookup);
3182 if (ret) {
3183 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Hdr deleted already %s, %d",
3184 name, ret);
3185 return;
3186 }
3187
3188 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "hdl: 0x%x", hdrlookup.hdl);
3189 len = sizeof(struct ipa_ioc_del_hdr) + sizeof(struct ipa_hdr_del) * 1;
3190 ipa_hdr = (struct ipa_ioc_del_hdr *)cdf_mem_malloc(len);
3191 if (ipa_hdr == NULL) {
3192 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "ipa_hdr allocation failed");
3193 return;
3194 }
3195 ipa_hdr->num_hdls = 1;
3196 ipa_hdr->commit = 0;
3197 ipa_hdr->hdl[0].hdl = hdrlookup.hdl;
3198 ipa_hdr->hdl[0].status = -1;
3199 ret = ipa_del_hdr(ipa_hdr);
3200 if (ret != 0)
3201 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Delete header failed: %d",
3202 ret);
3203
3204 cdf_mem_free(ipa_hdr);
3205}
3206
3207/**
3208 * hdd_ipa_add_header_info() - Add IPA header for a given interface
3209 * @hdd_ipa: Global HDD IPA context
3210 * @iface_context: Interface-specific HDD IPA context
3211 * @mac_addr: Interface MAC address
3212 *
3213 * Return: 0 on success, negativer errno value on error
3214 */
3215static int hdd_ipa_add_header_info(struct hdd_ipa_priv *hdd_ipa,
3216 struct hdd_ipa_iface_context *iface_context,
3217 uint8_t *mac_addr)
3218{
3219 hdd_adapter_t *adapter = iface_context->adapter;
3220 char *ifname;
3221 struct ipa_ioc_add_hdr *ipa_hdr = NULL;
3222 int ret = -EINVAL;
3223 struct hdd_ipa_tx_hdr *tx_hdr = NULL;
3224 struct hdd_ipa_uc_tx_hdr *uc_tx_hdr = NULL;
3225
3226 ifname = adapter->dev->name;
3227
3228 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Add Partial hdr: %s, %pM",
3229 ifname, mac_addr);
3230
3231 /* dynamically allocate the memory to add the hdrs */
3232 ipa_hdr = cdf_mem_malloc(sizeof(struct ipa_ioc_add_hdr)
3233 + sizeof(struct ipa_hdr_add));
3234 if (!ipa_hdr) {
3235 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3236 "%s: ipa_hdr allocation failed", ifname);
3237 ret = -ENOMEM;
3238 goto end;
3239 }
3240
3241 ipa_hdr->commit = 0;
3242 ipa_hdr->num_hdrs = 1;
3243
3244 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3245 uc_tx_hdr = (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3246 memcpy(uc_tx_hdr, &ipa_uc_tx_hdr, HDD_IPA_UC_WLAN_TX_HDR_LEN);
3247 memcpy(uc_tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3248 uc_tx_hdr->ipa_hd.vdev_id = iface_context->adapter->sessionId;
3249 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
3250 "ifname=%s, vdev_id=%d",
3251 ifname, uc_tx_hdr->ipa_hd.vdev_id);
3252 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3253 ifname, HDD_IPA_IPV4_NAME_EXT);
3254 ipa_hdr->hdr[0].hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
3255 ipa_hdr->hdr[0].type = IPA_HDR_L2_ETHERNET_II;
3256 ipa_hdr->hdr[0].is_partial = 1;
3257 ipa_hdr->hdr[0].hdr_hdl = 0;
3258 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3259 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3260
3261 ret = ipa_add_hdr(ipa_hdr);
3262 } else {
3263 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3264
3265 /* Set the Source MAC */
3266 memcpy(tx_hdr, &ipa_tx_hdr, HDD_IPA_WLAN_TX_HDR_LEN);
3267 memcpy(tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3268
3269 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3270 ifname, HDD_IPA_IPV4_NAME_EXT);
3271 ipa_hdr->hdr[0].hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
3272 ipa_hdr->hdr[0].is_partial = 1;
3273 ipa_hdr->hdr[0].hdr_hdl = 0;
3274 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3275 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3276
3277 /* Set the type to IPV4 in the header */
3278 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IP);
3279
3280 ret = ipa_add_hdr(ipa_hdr);
3281 }
3282 if (ret) {
3283 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s IPv4 add hdr failed: %d",
3284 ifname, ret);
3285 goto end;
3286 }
3287
3288 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv4 hdr_hdl: 0x%x",
3289 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3290
3291 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3292 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3293 ifname, HDD_IPA_IPV6_NAME_EXT);
3294
3295 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3296 uc_tx_hdr =
3297 (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3298 uc_tx_hdr->eth.h_proto = cpu_to_be16(ETH_P_IPV6);
3299 } else {
3300 /* Set the type to IPV6 in the header */
3301 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3302 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IPV6);
3303 }
3304
3305 ret = ipa_add_hdr(ipa_hdr);
3306 if (ret) {
3307 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3308 "%s: IPv6 add hdr failed: %d", ifname, ret);
3309 goto clean_ipv4_hdr;
3310 }
3311
3312 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv6 hdr_hdl: 0x%x",
3313 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3314 }
3315
3316 cdf_mem_free(ipa_hdr);
3317
3318 return ret;
3319
3320clean_ipv4_hdr:
3321 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3322 ifname, HDD_IPA_IPV4_NAME_EXT);
3323 hdd_ipa_remove_header(ipa_hdr->hdr[0].name);
3324end:
3325 if (ipa_hdr)
3326 cdf_mem_free(ipa_hdr);
3327
3328 return ret;
3329}
3330
3331/**
3332 * hdd_ipa_clean_hdr() - Cleanup IPA on a given adapter
3333 * @adapter: Adapter upon which IPA was previously configured
3334 *
3335 * Return: None
3336 */
3337static void hdd_ipa_clean_hdr(hdd_adapter_t *adapter)
3338{
3339 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3340 int ret;
3341 char name_ipa[IPA_RESOURCE_NAME_MAX];
3342
3343 /* Remove the headers */
3344 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3345 adapter->dev->name, HDD_IPA_IPV4_NAME_EXT);
3346 hdd_ipa_remove_header(name_ipa);
3347
3348 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3349 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3350 adapter->dev->name, HDD_IPA_IPV6_NAME_EXT);
3351 hdd_ipa_remove_header(name_ipa);
3352 }
3353 /* unregister the interface with IPA */
3354 ret = ipa_deregister_intf(adapter->dev->name);
3355 if (ret)
3356 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3357 "%s: ipa_deregister_intf fail: %d",
3358 adapter->dev->name, ret);
3359}
3360
3361/**
3362 * hdd_ipa_cleanup_iface() - Cleanup IPA on a given interface
3363 * @iface_context: interface-specific IPA context
3364 *
3365 * Return: None
3366 */
3367static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context)
3368{
3369 if (iface_context == NULL)
3370 return;
3371
3372 hdd_ipa_clean_hdr(iface_context->adapter);
3373
3374 cdf_spin_lock_bh(&iface_context->interface_lock);
3375 iface_context->adapter->ipa_context = NULL;
3376 iface_context->adapter = NULL;
3377 iface_context->tl_context = NULL;
3378 cdf_spin_unlock_bh(&iface_context->interface_lock);
3379 iface_context->ifa_address = 0;
3380 if (!iface_context->hdd_ipa->num_iface) {
3381 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3382 "NUM INTF 0, Invalid");
3383 CDF_ASSERT(0);
3384 }
3385 iface_context->hdd_ipa->num_iface--;
3386}
3387
3388/**
3389 * hdd_ipa_setup_iface() - Setup IPA on a given interface
3390 * @hdd_ipa: HDD IPA global context
3391 * @adapter: Interface upon which IPA is being setup
3392 * @sta_id: Station ID of the API instance
3393 *
3394 * Return: 0 on success, negative errno value on error
3395 */
3396static int hdd_ipa_setup_iface(struct hdd_ipa_priv *hdd_ipa,
3397 hdd_adapter_t *adapter, uint8_t sta_id)
3398{
3399 struct hdd_ipa_iface_context *iface_context = NULL;
3400 void *tl_context = NULL;
3401 int i, ret = 0;
3402
3403 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
3404 * channel change indication. Since these indications are sent by lower
3405 * layer as SAP updates and IPA doesn't have to do anything for these
3406 * updates so ignoring!
3407 */
3408 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
3409 return 0;
3410
3411 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3412 if (hdd_ipa->iface_context[i].adapter == NULL) {
3413 iface_context = &(hdd_ipa->iface_context[i]);
3414 break;
3415 }
3416 }
3417
3418 if (iface_context == NULL) {
3419 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3420 "All the IPA interfaces are in use");
3421 ret = -ENOMEM;
3422 goto end;
3423 }
3424
3425 adapter->ipa_context = iface_context;
3426 iface_context->adapter = adapter;
3427 iface_context->sta_id = sta_id;
3428 tl_context = ol_txrx_get_vdev_by_sta_id(sta_id);
3429
3430 if (tl_context == NULL) {
3431 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3432 "Not able to get TL context sta_id: %d", sta_id);
3433 ret = -EINVAL;
3434 goto end;
3435 }
3436
3437 iface_context->tl_context = tl_context;
3438
3439 ret = hdd_ipa_add_header_info(hdd_ipa, iface_context,
3440 adapter->dev->dev_addr);
3441
3442 if (ret)
3443 goto end;
3444
3445 /* Configure the TX and RX pipes filter rules */
3446 ret = hdd_ipa_register_interface(hdd_ipa, iface_context);
3447 if (ret)
3448 goto cleanup_header;
3449
3450 hdd_ipa->num_iface++;
3451 return ret;
3452
3453cleanup_header:
3454
3455 hdd_ipa_clean_hdr(adapter);
3456end:
3457 if (iface_context)
3458 hdd_ipa_cleanup_iface(iface_context);
3459 return ret;
3460}
3461
3462/**
3463 * hdd_ipa_msg_free_fn() - Free an IPA message
3464 * @buff: pointer to the IPA message
3465 * @len: length of the IPA message
3466 * @type: type of IPA message
3467 *
3468 * Return: None
3469 */
3470static void hdd_ipa_msg_free_fn(void *buff, uint32_t len, uint32_t type)
3471{
3472 hddLog(LOG1, "msg type:%d, len:%d", type, len);
3473 ghdd_ipa->stats.num_free_msg++;
3474 cdf_mem_free(buff);
3475}
3476
3477/**
3478 * hdd_ipa_send_mcc_scc_msg() - send IPA WLAN_SWITCH_TO_MCC/SCC message
3479 * @mcc_mode: 0=MCC/1=SCC
3480 *
3481 * Return: 0 on success, negative errno value on error
3482 */
3483int hdd_ipa_send_mcc_scc_msg(hdd_context_t *pHddCtx, bool mcc_mode)
3484{
3485 hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
3486 CDF_STATUS status;
3487 hdd_adapter_t *pAdapter;
3488 struct ipa_msg_meta meta;
3489 struct ipa_wlan_msg *msg;
3490 int ret;
3491
3492 if (!hdd_ipa_uc_sta_is_enabled(pHddCtx))
3493 return -EINVAL;
3494
3495 if (!pHddCtx->mcc_mode) {
3496 /* Flush TxRx queue for each adapter before switch to SCC */
3497 status = hdd_get_front_adapter(pHddCtx, &adapter_node);
3498 while (NULL != adapter_node && CDF_STATUS_SUCCESS == status) {
3499 pAdapter = adapter_node->pAdapter;
3500 if (pAdapter->device_mode == WLAN_HDD_INFRA_STATION ||
3501 pAdapter->device_mode == WLAN_HDD_SOFTAP) {
3502 hddLog(CDF_TRACE_LEVEL_INFO,
3503 "MCC->SCC: Flush TxRx queue(d_mode=%d)",
3504 pAdapter->device_mode);
3505 hdd_deinit_tx_rx(pAdapter);
3506 }
3507 status = hdd_get_next_adapter(
3508 pHddCtx, adapter_node, &next);
3509 adapter_node = next;
3510 }
3511 }
3512
3513 /* Send SCC/MCC Switching event to IPA */
3514 meta.msg_len = sizeof(*msg);
3515 msg = cdf_mem_malloc(meta.msg_len);
3516 if (msg == NULL) {
3517 hddLog(LOGE, "msg allocation failed");
3518 return -ENOMEM;
3519 }
3520
3521 meta.msg_type = mcc_mode ?
3522 WLAN_SWITCH_TO_MCC : WLAN_SWITCH_TO_SCC;
3523 hddLog(LOG1, "ipa_send_msg(Evt:%d)", meta.msg_type);
3524
3525 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3526
3527 if (ret) {
3528 hddLog(LOGE, "ipa_send_msg(Evt:%d) - fail=%d",
3529 meta.msg_type, ret);
3530 cdf_mem_free(msg);
3531 }
3532
3533 return ret;
3534}
3535
3536/**
3537 * hdd_ipa_wlan_event_to_str() - convert IPA WLAN event to string
3538 * @event: IPA WLAN event to be converted to a string
3539 *
3540 * Return: ASCII string representing the IPA WLAN event
3541 */
3542static inline char *hdd_ipa_wlan_event_to_str(enum ipa_wlan_event event)
3543{
3544 switch (event) {
3545 case WLAN_CLIENT_CONNECT:
3546 return "WLAN_CLIENT_CONNECT";
3547 case WLAN_CLIENT_DISCONNECT:
3548 return "WLAN_CLIENT_DISCONNECT";
3549 case WLAN_CLIENT_POWER_SAVE_MODE:
3550 return "WLAN_CLIENT_POWER_SAVE_MODE";
3551 case WLAN_CLIENT_NORMAL_MODE:
3552 return "WLAN_CLIENT_NORMAL_MODE";
3553 case SW_ROUTING_ENABLE:
3554 return "SW_ROUTING_ENABLE";
3555 case SW_ROUTING_DISABLE:
3556 return "SW_ROUTING_DISABLE";
3557 case WLAN_AP_CONNECT:
3558 return "WLAN_AP_CONNECT";
3559 case WLAN_AP_DISCONNECT:
3560 return "WLAN_AP_DISCONNECT";
3561 case WLAN_STA_CONNECT:
3562 return "WLAN_STA_CONNECT";
3563 case WLAN_STA_DISCONNECT:
3564 return "WLAN_STA_DISCONNECT";
3565 case WLAN_CLIENT_CONNECT_EX:
3566 return "WLAN_CLIENT_CONNECT_EX";
3567
3568 case IPA_WLAN_EVENT_MAX:
3569 default:
3570 return "UNKNOWN";
3571 }
3572}
3573
3574/**
3575 * hdd_ipa_wlan_evt() - IPA event handler
3576 * @adapter: adapter upon which the event was received
3577 * @sta_id: station id for the event
3578 * @type: the event type
3579 * @mac_address: MAC address associated with the event
3580 *
3581 * Return: 0 on success, negative errno value on error
3582 */
3583int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
3584 enum ipa_wlan_event type, uint8_t *mac_addr)
3585{
3586 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3587 struct ipa_msg_meta meta;
3588 struct ipa_wlan_msg *msg;
3589 struct ipa_wlan_msg_ex *msg_ex = NULL;
3590 int ret;
3591
3592 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: %s evt, MAC: %pM sta_id: %d",
3593 adapter->dev->name, hdd_ipa_wlan_event_to_str(type),
3594 mac_addr, sta_id);
3595
3596 if (type >= IPA_WLAN_EVENT_MAX)
3597 return -EINVAL;
3598
3599 if (WARN_ON(is_zero_ether_addr(mac_addr)))
3600 return -EINVAL;
3601
3602 if (!hdd_ipa || !hdd_ipa_is_enabled(hdd_ipa->hdd_ctx)) {
3603 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "IPA OFFLOAD NOT ENABLED");
3604 return -EINVAL;
3605 }
3606
3607 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3608 !hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
3609 (WLAN_HDD_SOFTAP != adapter->device_mode)) {
3610 return 0;
3611 }
3612
3613 /*
3614 * During IPA UC resource loading/unloading new events can be issued.
3615 * Store the events separately and handle them later.
3616 */
3617 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3618 ((hdd_ipa->resource_loading) ||
3619 (hdd_ipa->resource_unloading))) {
Yun Parkf19e07d2015-11-20 11:34:27 -08003620 unsigned int pending_event_count;
3621 struct ipa_uc_pending_event *pending_event = NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003622
Yun Parkf19e07d2015-11-20 11:34:27 -08003623 hdd_err("IPA resource %s inprogress",
3624 hdd_ipa->resource_loading ? "load":"unload");
3625
3626 cdf_mutex_acquire(&hdd_ipa->event_lock);
3627
3628 cdf_list_size(&hdd_ipa->pending_event, &pending_event_count);
3629 if (pending_event_count >= HDD_IPA_MAX_PENDING_EVENT_COUNT) {
3630 hdd_notice("Reached max pending event count");
3631 cdf_list_remove_front(&hdd_ipa->pending_event,
3632 (cdf_list_node_t **)&pending_event);
3633 } else {
3634 pending_event =
3635 (struct ipa_uc_pending_event *)cdf_mem_malloc(
3636 sizeof(struct ipa_uc_pending_event));
3637 }
3638
3639 if (!pending_event) {
3640 hdd_err("Pending event memory alloc fail");
3641 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003642 return -ENOMEM;
3643 }
Yun Parkf19e07d2015-11-20 11:34:27 -08003644
3645 pending_event->adapter = adapter;
3646 pending_event->sta_id = sta_id;
3647 pending_event->type = type;
3648 cdf_mem_copy(pending_event->mac_addr,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003649 mac_addr,
3650 CDF_MAC_ADDR_SIZE);
3651 cdf_list_insert_back(&hdd_ipa->pending_event,
Yun Parkf19e07d2015-11-20 11:34:27 -08003652 &pending_event->node);
3653
3654 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003655 return 0;
3656 }
3657
3658 hdd_ipa->stats.event[type]++;
3659
Leo Chang3bc8fed2015-11-13 10:59:47 -08003660 meta.msg_type = type;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003661 switch (type) {
3662 case WLAN_STA_CONNECT:
3663 /* STA already connected and without disconnect, connect again
3664 * This is Roaming scenario
3665 */
3666 if (hdd_ipa->sta_connected)
3667 hdd_ipa_cleanup_iface(adapter->ipa_context);
3668
3669 if ((hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) &&
3670 (!hdd_ipa->sta_connected))
3671 hdd_ipa_uc_offload_enable_disable(adapter,
3672 SIR_STA_RX_DATA_OFFLOAD, 1);
3673
3674 cdf_mutex_acquire(&hdd_ipa->event_lock);
3675
3676 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3677 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3678 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3679 msg_ex->name, meta.msg_type);
3680 } else if ((!hdd_ipa->sap_num_connected_sta) &&
3681 (!hdd_ipa->sta_connected)) {
3682 /* Enable IPA UC TX PIPE when STA connected */
3683 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
Yun Park4cab6ee2015-10-27 11:43:40 -07003684 if (ret) {
3685 cdf_mutex_release(&hdd_ipa->event_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003686 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3687 "handle 1st con ret %d", ret);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003688 hdd_ipa_uc_offload_enable_disable(adapter,
3689 SIR_STA_RX_DATA_OFFLOAD, 0);
3690 goto end;
3691 }
3692 }
3693 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3694 if (ret) {
3695 cdf_mutex_release(&hdd_ipa->event_lock);
3696 hdd_ipa_uc_offload_enable_disable(adapter,
3697 SIR_STA_RX_DATA_OFFLOAD, 0);
3698 goto end;
3699
3700#ifdef IPA_UC_OFFLOAD
3701 vdev_to_iface[adapter->sessionId] =
3702 ((struct hdd_ipa_iface_context *)
3703 (adapter->ipa_context))->iface_id;
3704#endif /* IPA_UC_OFFLOAD */
3705 }
3706
3707 cdf_mutex_release(&hdd_ipa->event_lock);
3708
3709 hdd_ipa->sta_connected = 1;
3710 break;
3711
3712 case WLAN_AP_CONNECT:
3713 /* For DFS channel we get two start_bss event (before and after
3714 * CAC). Also when ACS range includes both DFS and non DFS
3715 * channels, we could possibly change channel many times due to
3716 * RADAR detection and chosen channel may not be a DFS channels.
3717 * So dont return error here. Just discard the event.
3718 */
3719 if (adapter->ipa_context)
3720 return 0;
3721
3722 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3723 hdd_ipa_uc_offload_enable_disable(adapter,
3724 SIR_AP_RX_DATA_OFFLOAD, 1);
3725 }
3726 cdf_mutex_acquire(&hdd_ipa->event_lock);
3727 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3728 if (ret) {
3729 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3730 "%s: Evt: %d, Interface setup failed",
3731 msg_ex->name, meta.msg_type);
3732 cdf_mutex_release(&hdd_ipa->event_lock);
3733 goto end;
3734
3735#ifdef IPA_UC_OFFLOAD
3736 vdev_to_iface[adapter->sessionId] =
3737 ((struct hdd_ipa_iface_context *)
3738 (adapter->ipa_context))->iface_id;
3739#endif /* IPA_UC_OFFLOAD */
3740 }
3741 cdf_mutex_release(&hdd_ipa->event_lock);
3742 break;
3743
3744 case WLAN_STA_DISCONNECT:
3745 cdf_mutex_acquire(&hdd_ipa->event_lock);
3746 hdd_ipa_cleanup_iface(adapter->ipa_context);
3747
3748 if (!hdd_ipa->sta_connected) {
3749 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3750 "%s: Evt: %d, STA already disconnected",
3751 msg_ex->name, meta.msg_type);
3752 cdf_mutex_release(&hdd_ipa->event_lock);
3753 return -EINVAL;
3754 }
3755 hdd_ipa->sta_connected = 0;
3756 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3757 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3758 "%s: IPA UC OFFLOAD NOT ENABLED",
3759 msg_ex->name);
3760 } else {
3761 /* Disable IPA UC TX PIPE when STA disconnected */
3762 if ((!hdd_ipa->sap_num_connected_sta) ||
3763 ((!hdd_ipa->num_iface) &&
3764 (HDD_IPA_UC_NUM_WDI_PIPE ==
3765 hdd_ipa->activated_fw_pipe))) {
3766 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3767 }
3768 }
3769
3770 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
3771 hdd_ipa_uc_offload_enable_disable(adapter,
3772 SIR_STA_RX_DATA_OFFLOAD, 0);
3773 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3774 }
3775
3776 cdf_mutex_release(&hdd_ipa->event_lock);
3777 break;
3778
3779 case WLAN_AP_DISCONNECT:
3780 if (!adapter->ipa_context) {
3781 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3782 "%s: Evt: %d, SAP already disconnected",
3783 msg_ex->name, meta.msg_type);
3784 return -EINVAL;
3785 }
3786
3787 cdf_mutex_acquire(&hdd_ipa->event_lock);
3788 hdd_ipa_cleanup_iface(adapter->ipa_context);
3789 if ((!hdd_ipa->num_iface) &&
3790 (HDD_IPA_UC_NUM_WDI_PIPE ==
3791 hdd_ipa->activated_fw_pipe)) {
Prashanth Bhatta9e143052015-12-04 11:56:47 -08003792 if (cds_is_driver_unloading()) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003793 /*
3794 * We disable WDI pipes directly here since
3795 * IPA_OPCODE_TX/RX_SUSPEND message will not be
3796 * processed when unloading WLAN driver is in
3797 * progress
3798 */
3799 hdd_ipa_uc_disable_pipes(hdd_ipa);
3800 } else {
3801 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3802 "NO INTF left but still pipe clean up");
3803 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3804 }
3805 }
3806
3807 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3808 hdd_ipa_uc_offload_enable_disable(adapter,
3809 SIR_AP_RX_DATA_OFFLOAD, 0);
3810 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3811 }
3812 cdf_mutex_release(&hdd_ipa->event_lock);
3813 break;
3814
3815 case WLAN_CLIENT_CONNECT_EX:
3816 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%d %d",
3817 adapter->dev->ifindex, sta_id);
3818
3819 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3820 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3821 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3822 adapter->dev->name, meta.msg_type);
3823 return 0;
3824 }
3825
3826 cdf_mutex_acquire(&hdd_ipa->event_lock);
3827 if (hdd_ipa_uc_find_add_assoc_sta(hdd_ipa,
3828 true, sta_id)) {
3829 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3830 "%s: STA ID %d found, not valid",
3831 adapter->dev->name, sta_id);
3832 cdf_mutex_release(&hdd_ipa->event_lock);
3833 return 0;
3834 }
Yun Park312f71a2015-12-08 10:22:42 -08003835
3836 /* Enable IPA UC Data PIPEs when first STA connected */
3837 if ((0 == hdd_ipa->sap_num_connected_sta) &&
3838 (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) ||
3839 !hdd_ipa->sta_connected)) {
3840 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
3841 if (ret) {
3842 cdf_mutex_release(&hdd_ipa->event_lock);
3843 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3844 "%s: handle 1st con ret %d",
3845 adapter->dev->name, ret);
3846 return ret;
3847 }
3848 }
3849
3850 hdd_ipa->sap_num_connected_sta++;
Yun Park312f71a2015-12-08 10:22:42 -08003851
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003852 cdf_mutex_release(&hdd_ipa->event_lock);
3853
3854 meta.msg_type = type;
3855 meta.msg_len = (sizeof(struct ipa_wlan_msg_ex) +
3856 sizeof(struct ipa_wlan_hdr_attrib_val));
3857 msg_ex = cdf_mem_malloc(meta.msg_len);
3858
3859 if (msg_ex == NULL) {
3860 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3861 "msg_ex allocation failed");
3862 return -ENOMEM;
3863 }
3864 strlcpy(msg_ex->name, adapter->dev->name,
3865 IPA_RESOURCE_NAME_MAX);
3866 msg_ex->num_of_attribs = 1;
3867 msg_ex->attribs[0].attrib_type = WLAN_HDR_ATTRIB_MAC_ADDR;
3868 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3869 msg_ex->attribs[0].offset =
3870 HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3871 } else {
3872 msg_ex->attribs[0].offset =
3873 HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3874 }
3875 memcpy(msg_ex->attribs[0].u.mac_addr, mac_addr,
3876 IPA_MAC_ADDR_SIZE);
3877
3878 ret = ipa_send_msg(&meta, msg_ex, hdd_ipa_msg_free_fn);
3879
3880 if (ret) {
3881 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d : %d",
3882 msg_ex->name, meta.msg_type, ret);
3883 cdf_mem_free(msg_ex);
3884 return ret;
3885 }
3886 hdd_ipa->stats.num_send_msg++;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003887 return ret;
3888
3889 case WLAN_CLIENT_DISCONNECT:
3890 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3891 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3892 "%s: IPA UC OFFLOAD NOT ENABLED",
3893 msg_ex->name);
3894 return 0;
3895 }
3896
3897 cdf_mutex_acquire(&hdd_ipa->event_lock);
3898 if (!hdd_ipa_uc_find_add_assoc_sta(hdd_ipa, false, sta_id)) {
3899 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3900 "%s: STA ID %d NOT found, not valid",
3901 msg_ex->name, sta_id);
3902 cdf_mutex_release(&hdd_ipa->event_lock);
3903 return 0;
3904 }
3905 hdd_ipa->sap_num_connected_sta--;
3906 /* Disable IPA UC TX PIPE when last STA disconnected */
3907 if (!hdd_ipa->sap_num_connected_sta
3908 && (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) ||
3909 !hdd_ipa->sta_connected)
3910 && (false == hdd_ipa->resource_unloading)
3911 && (HDD_IPA_UC_NUM_WDI_PIPE ==
3912 hdd_ipa->activated_fw_pipe))
3913 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3914 cdf_mutex_release(&hdd_ipa->event_lock);
3915 break;
3916
3917 default:
3918 return 0;
3919 }
3920
3921 meta.msg_len = sizeof(struct ipa_wlan_msg);
3922 msg = cdf_mem_malloc(meta.msg_len);
3923 if (msg == NULL) {
3924 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "msg allocation failed");
3925 return -ENOMEM;
3926 }
3927
3928 meta.msg_type = type;
3929 strlcpy(msg->name, adapter->dev->name, IPA_RESOURCE_NAME_MAX);
3930 memcpy(msg->mac_addr, mac_addr, ETH_ALEN);
3931
3932 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d",
3933 msg->name, meta.msg_type);
3934
3935 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3936
3937 if (ret) {
3938 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d fail:%d",
3939 msg->name, meta.msg_type, ret);
3940 cdf_mem_free(msg);
3941 return ret;
3942 }
3943
3944 hdd_ipa->stats.num_send_msg++;
3945
3946end:
3947 return ret;
3948}
3949
3950/**
3951 * hdd_ipa_rm_state_to_str() - Convert IPA RM state to string
3952 * @state: IPA RM state value
3953 *
3954 * Return: ASCII string representing the IPA RM state
3955 */
3956static inline char *hdd_ipa_rm_state_to_str(enum hdd_ipa_rm_state state)
3957{
3958 switch (state) {
3959 case HDD_IPA_RM_RELEASED:
3960 return "RELEASED";
3961 case HDD_IPA_RM_GRANT_PENDING:
3962 return "GRANT_PENDING";
3963 case HDD_IPA_RM_GRANTED:
3964 return "GRANTED";
3965 }
3966
3967 return "UNKNOWN";
3968}
3969
3970/**
3971 * hdd_ipa_init() - IPA initialization function
3972 * @hdd_ctx: HDD global context
3973 *
3974 * Allocate hdd_ipa resources, ipa pipe resource and register
3975 * wlan interface with IPA module.
3976 *
3977 * Return: CDF_STATUS enumeration
3978 */
3979CDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx)
3980{
3981 struct hdd_ipa_priv *hdd_ipa = NULL;
3982 int ret, i;
3983 struct hdd_ipa_iface_context *iface_context = NULL;
3984
3985 if (!hdd_ipa_is_enabled(hdd_ctx))
3986 return CDF_STATUS_SUCCESS;
3987
3988 hdd_ipa = cdf_mem_malloc(sizeof(*hdd_ipa));
3989 if (!hdd_ipa) {
3990 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL, "hdd_ipa allocation failed");
Leo Chang3bc8fed2015-11-13 10:59:47 -08003991 goto fail_return;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003992 }
3993
3994 hdd_ctx->hdd_ipa = hdd_ipa;
3995 ghdd_ipa = hdd_ipa;
3996 hdd_ipa->hdd_ctx = hdd_ctx;
3997 hdd_ipa->num_iface = 0;
Leo Chang3bc8fed2015-11-13 10:59:47 -08003998 ol_txrx_ipa_uc_get_resource(cds_get_context(CDF_MODULE_ID_TXRX),
3999 &hdd_ipa->ce_sr_base_paddr,
4000 &hdd_ipa->ce_sr_ring_size,
4001 &hdd_ipa->ce_reg_paddr,
4002 &hdd_ipa->tx_comp_ring_base_paddr,
4003 &hdd_ipa->tx_comp_ring_size,
4004 &hdd_ipa->tx_num_alloc_buffer,
4005 &hdd_ipa->rx_rdy_ring_base_paddr,
4006 &hdd_ipa->rx_rdy_ring_size,
4007 &hdd_ipa->rx_proc_done_idx_paddr,
4008 &hdd_ipa->rx_proc_done_idx_vaddr,
4009 &hdd_ipa->rx2_rdy_ring_base_paddr,
4010 &hdd_ipa->rx2_rdy_ring_size,
4011 &hdd_ipa->rx2_proc_done_idx_paddr,
4012 &hdd_ipa->rx2_proc_done_idx_vaddr);
4013 if ((0 == hdd_ipa->ce_sr_base_paddr) ||
4014 (0 == hdd_ipa->tx_comp_ring_base_paddr) ||
4015 (0 == hdd_ipa->rx_rdy_ring_base_paddr) ||
4016 (0 == hdd_ipa->rx2_rdy_ring_base_paddr)) {
4017 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
4018 "IPA UC resource alloc fail");
4019 goto fail_get_resource;
4020 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004021
4022 /* Create the interface context */
4023 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
4024 iface_context = &hdd_ipa->iface_context[i];
4025 iface_context->hdd_ipa = hdd_ipa;
4026 iface_context->cons_client =
4027 hdd_ipa_adapter_2_client[i].cons_client;
4028 iface_context->prod_client =
4029 hdd_ipa_adapter_2_client[i].prod_client;
4030 iface_context->iface_id = i;
4031 iface_context->adapter = NULL;
4032 cdf_spinlock_init(&iface_context->interface_lock);
4033 }
4034
4035#ifdef CONFIG_CNSS
4036 cnss_init_work(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
4037#else
4038 INIT_WORK(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
4039#endif
4040 cdf_spinlock_init(&hdd_ipa->pm_lock);
4041 cdf_nbuf_queue_init(&hdd_ipa->pm_queue_head);
4042
4043 ret = hdd_ipa_setup_rm(hdd_ipa);
4044 if (ret)
4045 goto fail_setup_rm;
4046
4047 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
4048 hdd_ipa_uc_rt_debug_init(hdd_ctx);
4049 cdf_mem_zero(&hdd_ipa->stats, sizeof(hdd_ipa->stats));
4050 hdd_ipa->sap_num_connected_sta = 0;
4051 hdd_ipa->ipa_tx_packets_diff = 0;
4052 hdd_ipa->ipa_rx_packets_diff = 0;
4053 hdd_ipa->ipa_p_tx_packets = 0;
4054 hdd_ipa->ipa_p_rx_packets = 0;
4055 hdd_ipa->resource_loading = false;
4056 hdd_ipa->resource_unloading = false;
4057 hdd_ipa->sta_connected = 0;
Leo Change3e49442015-10-26 20:07:13 -07004058 hdd_ipa->ipa_pipes_down = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004059 /* Setup IPA sys_pipe for MCC */
4060 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
4061 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
4062 if (ret)
4063 goto fail_create_sys_pipe;
4064 }
4065 hdd_ipa_uc_ol_init(hdd_ctx);
4066 } else {
4067 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
4068 if (ret)
4069 goto fail_create_sys_pipe;
4070 }
4071
4072 return CDF_STATUS_SUCCESS;
4073
4074fail_create_sys_pipe:
4075 hdd_ipa_destroy_rm_resource(hdd_ipa);
4076fail_setup_rm:
Leo Chang3bc8fed2015-11-13 10:59:47 -08004077 cdf_spinlock_destroy(&hdd_ipa->pm_lock);
4078fail_get_resource:
4079 cdf_mem_free(hdd_ipa);
4080 hdd_ctx->hdd_ipa = NULL;
4081 ghdd_ipa = NULL;
4082fail_return:
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004083 return CDF_STATUS_E_FAILURE;
4084}
4085
4086/**
Yun Parkf19e07d2015-11-20 11:34:27 -08004087 * hdd_ipa_cleanup_pending_event() - Cleanup IPA pending event list
4088 * @hdd_ipa: pointer to HDD IPA struct
4089 *
4090 * Return: none
4091 */
4092void hdd_ipa_cleanup_pending_event(struct hdd_ipa_priv *hdd_ipa)
4093{
4094 struct ipa_uc_pending_event *pending_event = NULL;
4095
4096 while (cdf_list_remove_front(&hdd_ipa->pending_event,
4097 (cdf_list_node_t **)&pending_event) == CDF_STATUS_SUCCESS) {
4098 cdf_mem_free(pending_event);
4099 }
4100
4101 cdf_list_destroy(&hdd_ipa->pending_event);
4102}
4103
4104/**
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004105 * hdd_ipa_cleanup - IPA cleanup function
4106 * @hdd_ctx: HDD global context
4107 *
4108 * Return: CDF_STATUS enumeration
4109 */
4110CDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx)
4111{
4112 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
4113 int i;
4114 struct hdd_ipa_iface_context *iface_context = NULL;
4115 cdf_nbuf_t skb;
4116 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
4117
4118 if (!hdd_ipa_is_enabled(hdd_ctx))
4119 return CDF_STATUS_SUCCESS;
4120
4121 if (!hdd_ipa_uc_is_enabled(hdd_ctx)) {
4122 unregister_inetaddr_notifier(&hdd_ipa->ipv4_notifier);
4123 hdd_ipa_teardown_sys_pipe(hdd_ipa);
4124 }
4125
4126 /* Teardown IPA sys_pipe for MCC */
4127 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx))
4128 hdd_ipa_teardown_sys_pipe(hdd_ipa);
4129
4130 hdd_ipa_destroy_rm_resource(hdd_ipa);
4131
4132#ifdef WLAN_OPEN_SOURCE
4133 cancel_work_sync(&hdd_ipa->pm_work);
4134#endif
4135
4136 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
4137
4138 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
4139 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
4140
4141 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
4142 ipa_free_skb(pm_tx_cb->ipa_tx_desc);
4143
4144 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
4145 }
4146 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
4147
4148 cdf_spinlock_destroy(&hdd_ipa->pm_lock);
4149
4150 /* destory the interface lock */
4151 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
4152 iface_context = &hdd_ipa->iface_context[i];
4153 cdf_spinlock_destroy(&iface_context->interface_lock);
4154 }
4155
4156 /* This should never hit but still make sure that there are no pending
4157 * descriptor in IPA hardware
4158 */
4159 if (hdd_ipa->pending_hw_desc_cnt != 0) {
4160 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
4161 "IPA Pending write done: %d Waiting!",
4162 hdd_ipa->pending_hw_desc_cnt);
4163
4164 for (i = 0; hdd_ipa->pending_hw_desc_cnt != 0 && i < 10; i++) {
4165 usleep_range(100, 100);
4166 }
4167
4168 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
4169 "IPA Pending write done: desc: %d %s(%d)!",
4170 hdd_ipa->pending_hw_desc_cnt,
4171 hdd_ipa->pending_hw_desc_cnt == 0 ? "completed"
4172 : "leak", i);
4173 }
4174 if (hdd_ipa_uc_is_enabled(hdd_ctx)) {
4175 hdd_ipa_uc_rt_debug_deinit(hdd_ctx);
4176 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
4177 "%s: Disconnect TX PIPE", __func__);
4178 ipa_disconnect_wdi_pipe(hdd_ipa->tx_pipe_handle);
4179 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
4180 "%s: Disconnect RX PIPE", __func__);
4181 ipa_disconnect_wdi_pipe(hdd_ipa->rx_pipe_handle);
4182 cdf_mutex_destroy(&hdd_ipa->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08004183 cdf_mutex_destroy(&hdd_ipa->ipa_lock);
Yun Parkf19e07d2015-11-20 11:34:27 -08004184 hdd_ipa_cleanup_pending_event(hdd_ipa);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08004185
4186#ifdef WLAN_OPEN_SOURCE
4187 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
4188 cancel_work_sync(&hdd_ipa->uc_op_work[i].work);
4189 hdd_ipa->uc_op_work[i].msg = NULL;
4190 }
4191#endif
4192 }
4193
4194 cdf_mem_free(hdd_ipa);
4195 hdd_ctx->hdd_ipa = NULL;
4196
4197 return CDF_STATUS_SUCCESS;
4198}
4199#endif /* IPA_OFFLOAD */