blob: 10d1acb16bdee6caecd1bc8ed635972bcb8278e6 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * DOC: wlan_hdd_ipa.c
30 *
31 * WLAN HDD and ipa interface implementation
32 * Originally written by Qualcomm Atheros, Inc
33 */
34
35#ifdef IPA_OFFLOAD
36
37/* Include Files */
38#include <wlan_hdd_includes.h>
39#include <wlan_hdd_ipa.h>
40
41#include <linux/etherdevice.h>
42#include <linux/atomic.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <linux/list.h>
46#include <linux/debugfs.h>
47#include <linux/inetdevice.h>
48#include <linux/ip.h>
49#include <wlan_hdd_softap_tx_rx.h>
50#include <ol_txrx_osif_api.h>
51
52#include "cds_sched.h"
53
54#include "wma.h"
55#include "wma_api.h"
56
57#define HDD_IPA_DESC_BUFFER_RATIO 4
58#define HDD_IPA_IPV4_NAME_EXT "_ipv4"
59#define HDD_IPA_IPV6_NAME_EXT "_ipv6"
60
61#define HDD_IPA_RX_INACTIVITY_MSEC_DELAY 1000
62#define HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET 12
63#define HDD_IPA_UC_WLAN_8023_HDR_SIZE 14
64/* WDI TX and RX PIPE */
65#define HDD_IPA_UC_NUM_WDI_PIPE 2
66#define HDD_IPA_UC_MAX_PENDING_EVENT 33
67
68#define HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE 32000
69#define HDD_IPA_UC_RT_DEBUG_PERIOD 300
70#define HDD_IPA_UC_RT_DEBUG_BUF_COUNT 30
71#define HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL 10000
72
73#define HDD_IPA_WLAN_HDR_DES_MAC_OFFSET 0
74#define HDD_IPA_MAX_IFACE 3
75#define HDD_IPA_MAX_SYSBAM_PIPE 4
76#define HDD_IPA_RX_PIPE HDD_IPA_MAX_IFACE
77#define HDD_IPA_ENABLE_MASK BIT(0)
78#define HDD_IPA_PRE_FILTER_ENABLE_MASK BIT(1)
79#define HDD_IPA_IPV6_ENABLE_MASK BIT(2)
80#define HDD_IPA_RM_ENABLE_MASK BIT(3)
81#define HDD_IPA_CLK_SCALING_ENABLE_MASK BIT(4)
82#define HDD_IPA_UC_ENABLE_MASK BIT(5)
83#define HDD_IPA_UC_STA_ENABLE_MASK BIT(6)
84#define HDD_IPA_REAL_TIME_DEBUGGING BIT(8)
85
86typedef enum {
87 HDD_IPA_UC_OPCODE_TX_SUSPEND = 0,
88 HDD_IPA_UC_OPCODE_TX_RESUME = 1,
89 HDD_IPA_UC_OPCODE_RX_SUSPEND = 2,
90 HDD_IPA_UC_OPCODE_RX_RESUME = 3,
91 HDD_IPA_UC_OPCODE_STATS = 4,
92 /* keep this last */
93 HDD_IPA_UC_OPCODE_MAX
94} hdd_ipa_uc_op_code;
95
96/**
97 * enum - Reason codes for stat query
98 *
99 * @HDD_IPA_UC_STAT_REASON_NONE: Initial value
100 * @HDD_IPA_UC_STAT_REASON_DEBUG: For debug/info
101 * @HDD_IPA_UC_STAT_REASON_BW_CAL: For bandwidth calibration
102 */
103enum {
104 HDD_IPA_UC_STAT_REASON_NONE,
105 HDD_IPA_UC_STAT_REASON_DEBUG,
106 HDD_IPA_UC_STAT_REASON_BW_CAL
107};
108
109/**
110 * enum hdd_ipa_rm_state - IPA resource manager state
111 * @HDD_IPA_RM_RELEASED: PROD pipe resource released
112 * @HDD_IPA_RM_GRANT_PENDING: PROD pipe resource requested but not granted yet
113 * @HDD_IPA_RM_GRANTED: PROD pipe resource granted
114 */
115enum hdd_ipa_rm_state {
116 HDD_IPA_RM_RELEASED,
117 HDD_IPA_RM_GRANT_PENDING,
118 HDD_IPA_RM_GRANTED,
119};
120
121struct llc_snap_hdr {
122 uint8_t dsap;
123 uint8_t ssap;
124 uint8_t resv[4];
125 __be16 eth_type;
126} __packed;
127
128struct hdd_ipa_tx_hdr {
129 struct ethhdr eth;
130 struct llc_snap_hdr llc_snap;
131} __packed;
132
133struct frag_header {
134 uint32_t
135 length:16, /* length field is LSB of the FRAG DESC */
136 reserved16:16;
137 uint32_t reserved32;
138} __packed;
139
140struct ipa_header {
141 uint32_t
142 vdev_id:8, /* vdev_id field is LSB of IPA DESC */
143 reserved:24;
144} __packed;
145
146struct hdd_ipa_uc_tx_hdr {
147 struct frag_header frag_hd;
148 struct ipa_header ipa_hd;
149 struct ethhdr eth;
150} __packed;
151
152#define HDD_IPA_WLAN_FRAG_HEADER sizeof(struct frag_header)
153#define HDD_IPA_WLAN_IPA_HEADER sizeof(struct frag_header)
154
155/**
156 * struct hdd_ipa_cld_hdr - IPA CLD Header
157 * @reserved: reserved fields
158 * @iface_id: interface ID
159 * @sta_id: Station ID
160 *
161 * Packed 32-bit structure
162 * +----------+----------+--------------+--------+
163 * | Reserved | QCMAP ID | interface id | STA ID |
164 * +----------+----------+--------------+--------+
165 */
166struct hdd_ipa_cld_hdr {
167 uint8_t reserved[2];
168 uint8_t iface_id;
169 uint8_t sta_id;
170} __packed;
171
172struct hdd_ipa_rx_hdr {
173 struct hdd_ipa_cld_hdr cld_hdr;
174 struct ethhdr eth;
175} __packed;
176
177struct hdd_ipa_pm_tx_cb {
178 struct hdd_ipa_iface_context *iface_context;
179 struct ipa_rx_data *ipa_tx_desc;
180};
181
182struct hdd_ipa_uc_rx_hdr {
183 struct ethhdr eth;
184} __packed;
185
186struct hdd_ipa_sys_pipe {
187 uint32_t conn_hdl;
188 uint8_t conn_hdl_valid;
189 struct ipa_sys_connect_params ipa_sys_params;
190};
191
192struct hdd_ipa_iface_stats {
193 uint64_t num_tx;
194 uint64_t num_tx_drop;
195 uint64_t num_tx_err;
196 uint64_t num_tx_cac_drop;
197 uint64_t num_rx_prefilter;
198 uint64_t num_rx_ipa_excep;
199 uint64_t num_rx_recv;
200 uint64_t num_rx_recv_mul;
201 uint64_t num_rx_send_desc_err;
202 uint64_t max_rx_mul;
203};
204
205struct hdd_ipa_priv;
206
207struct hdd_ipa_iface_context {
208 struct hdd_ipa_priv *hdd_ipa;
209 hdd_adapter_t *adapter;
210 void *tl_context;
211
212 enum ipa_client_type cons_client;
213 enum ipa_client_type prod_client;
214
215 uint8_t iface_id; /* This iface ID */
216 uint8_t sta_id; /* This iface station ID */
217 cdf_spinlock_t interface_lock;
218 uint32_t ifa_address;
219 struct hdd_ipa_iface_stats stats;
220};
221
222struct hdd_ipa_stats {
223 uint32_t event[IPA_WLAN_EVENT_MAX];
224 uint64_t num_send_msg;
225 uint64_t num_free_msg;
226
227 uint64_t num_rm_grant;
228 uint64_t num_rm_release;
229 uint64_t num_rm_grant_imm;
230 uint64_t num_cons_perf_req;
231 uint64_t num_prod_perf_req;
232
233 uint64_t num_rx_drop;
234 uint64_t num_rx_ipa_tx_dp;
235 uint64_t num_rx_ipa_splice;
236 uint64_t num_rx_ipa_loop;
237 uint64_t num_rx_ipa_tx_dp_err;
238 uint64_t num_rx_ipa_write_done;
239 uint64_t num_max_ipa_tx_mul;
240 uint64_t num_rx_ipa_hw_maxed_out;
241 uint64_t max_pend_q_cnt;
242
243 uint64_t num_tx_comp_cnt;
244 uint64_t num_tx_queued;
245 uint64_t num_tx_dequeued;
246 uint64_t num_max_pm_queue;
247
248 uint64_t num_freeq_empty;
249 uint64_t num_pri_freeq_empty;
250 uint64_t num_rx_excep;
251 uint64_t num_tx_bcmc;
252 uint64_t num_tx_bcmc_err;
253};
254
255struct ipa_uc_stas_map {
256 bool is_reserved;
257 uint8_t sta_id;
258};
259struct op_msg_type {
260 uint8_t msg_t;
261 uint8_t rsvd;
262 uint16_t op_code;
263 uint16_t len;
264 uint16_t rsvd_snd;
265};
266
267struct ipa_uc_fw_stats {
268 uint32_t tx_comp_ring_base;
269 uint32_t tx_comp_ring_size;
270 uint32_t tx_comp_ring_dbell_addr;
271 uint32_t tx_comp_ring_dbell_ind_val;
272 uint32_t tx_comp_ring_dbell_cached_val;
273 uint32_t tx_pkts_enqueued;
274 uint32_t tx_pkts_completed;
275 uint32_t tx_is_suspend;
276 uint32_t tx_reserved;
277 uint32_t rx_ind_ring_base;
278 uint32_t rx_ind_ring_size;
279 uint32_t rx_ind_ring_dbell_addr;
280 uint32_t rx_ind_ring_dbell_ind_val;
281 uint32_t rx_ind_ring_dbell_ind_cached_val;
282 uint32_t rx_ind_ring_rdidx_addr;
283 uint32_t rx_ind_ring_rd_idx_cached_val;
284 uint32_t rx_refill_idx;
285 uint32_t rx_num_pkts_indicated;
286 uint32_t rx_buf_refilled;
287 uint32_t rx_num_ind_drop_no_space;
288 uint32_t rx_num_ind_drop_no_buf;
289 uint32_t rx_is_suspend;
290 uint32_t rx_reserved;
291};
292
293struct ipa_uc_pending_event {
294 cdf_list_node_t node;
295 hdd_adapter_t *adapter;
296 enum ipa_wlan_event type;
297 uint8_t sta_id;
298 uint8_t mac_addr[CDF_MAC_ADDR_SIZE];
299};
300
301/**
302 * struct uc_rm_work_struct
303 * @work: uC RM work
304 * @event: IPA RM event
305 */
306struct uc_rm_work_struct {
307 struct work_struct work;
308 enum ipa_rm_event event;
309};
310
311/**
312 * struct uc_op_work_struct
313 * @work: uC OP work
314 * @msg: OP message
315 */
316struct uc_op_work_struct {
317 struct work_struct work;
318 struct op_msg_type *msg;
319};
320static uint8_t vdev_to_iface[CSR_ROAM_SESSION_MAX];
321
322/**
323 * struct uc_rt_debug_info
324 * @time: system time
325 * @ipa_excep_count: IPA exception packet count
326 * @rx_drop_count: IPA Rx drop packet count
327 * @net_sent_count: IPA Rx packet sent to network stack count
328 * @rx_discard_count: IPA Rx discard packet count
329 * @rx_mcbc_count: IPA Rx BCMC packet count
330 * @tx_mcbc_count: IPA Tx BCMC packet countt
331 * @tx_fwd_count: IPA Tx forward packet count
332 * @rx_destructor_call: IPA Rx packet destructor count
333 */
334struct uc_rt_debug_info {
335 v_TIME_t time;
336 uint64_t ipa_excep_count;
337 uint64_t rx_drop_count;
338 uint64_t net_sent_count;
339 uint64_t rx_discard_count;
340 uint64_t rx_mcbc_count;
341 uint64_t tx_mcbc_count;
342 uint64_t tx_fwd_count;
343 uint64_t rx_destructor_call;
344};
345
346struct hdd_ipa_priv {
347 struct hdd_ipa_sys_pipe sys_pipe[HDD_IPA_MAX_SYSBAM_PIPE];
348 struct hdd_ipa_iface_context iface_context[HDD_IPA_MAX_IFACE];
349 uint8_t num_iface;
350 enum hdd_ipa_rm_state rm_state;
351 /*
352 * IPA driver can send RM notifications with IRQ disabled so using cdf
353 * APIs as it is taken care gracefully. Without this, kernel would throw
354 * an warning if spin_lock_bh is used while IRQ is disabled
355 */
356 cdf_spinlock_t rm_lock;
357 struct uc_rm_work_struct uc_rm_work;
358 struct uc_op_work_struct uc_op_work[HDD_IPA_UC_OPCODE_MAX];
359 cdf_wake_lock_t wake_lock;
360 struct delayed_work wake_lock_work;
361 bool wake_lock_released;
362
363 enum ipa_client_type prod_client;
364
365 atomic_t tx_ref_cnt;
366 cdf_nbuf_queue_t pm_queue_head;
367 struct work_struct pm_work;
368 cdf_spinlock_t pm_lock;
369 bool suspended;
370
371 uint32_t pending_hw_desc_cnt;
372 uint32_t hw_desc_cnt;
373 spinlock_t q_lock;
374 uint32_t freeq_cnt;
375 struct list_head free_desc_head;
376
377 uint32_t pend_q_cnt;
378 struct list_head pend_desc_head;
379
380 hdd_context_t *hdd_ctx;
381
382 struct dentry *debugfs_dir;
383 struct hdd_ipa_stats stats;
384
385 struct notifier_block ipv4_notifier;
386 uint32_t curr_prod_bw;
387 uint32_t curr_cons_bw;
388
389 uint8_t activated_fw_pipe;
390 uint8_t sap_num_connected_sta;
391 uint8_t sta_connected;
392 uint32_t tx_pipe_handle;
393 uint32_t rx_pipe_handle;
394 bool resource_loading;
395 bool resource_unloading;
396 bool pending_cons_req;
397 struct ipa_uc_stas_map assoc_stas_map[WLAN_MAX_STA_COUNT];
398 cdf_list_t pending_event;
399 cdf_mutex_t event_lock;
Leo Change3e49442015-10-26 20:07:13 -0700400 bool ipa_pipes_down;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800401 uint32_t ipa_tx_packets_diff;
402 uint32_t ipa_rx_packets_diff;
403 uint32_t ipa_p_tx_packets;
404 uint32_t ipa_p_rx_packets;
405 uint32_t stat_req_reason;
406 uint64_t ipa_tx_forward;
407 uint64_t ipa_rx_discard;
408 uint64_t ipa_rx_net_send_count;
409 uint64_t ipa_rx_internel_drop_count;
410 uint64_t ipa_rx_destructor_count;
411 cdf_mc_timer_t rt_debug_timer;
412 struct uc_rt_debug_info rt_bug_buffer[HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
413 unsigned int rt_buf_fill_index;
414 cdf_mc_timer_t rt_debug_fill_timer;
415 cdf_mutex_t rt_debug_lock;
Yun Parke59b3912015-11-09 13:19:06 -0800416 cdf_mutex_t ipa_lock;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800417};
418
419#define HDD_IPA_WLAN_CLD_HDR_LEN sizeof(struct hdd_ipa_cld_hdr)
420#define HDD_IPA_UC_WLAN_CLD_HDR_LEN 0
421#define HDD_IPA_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_tx_hdr)
422#define HDD_IPA_UC_WLAN_TX_HDR_LEN sizeof(struct hdd_ipa_uc_tx_hdr)
423#define HDD_IPA_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_rx_hdr)
424#define HDD_IPA_UC_WLAN_RX_HDR_LEN sizeof(struct hdd_ipa_uc_rx_hdr)
425
426#define HDD_IPA_GET_IFACE_ID(_data) \
427 (((struct hdd_ipa_cld_hdr *) (_data))->iface_id)
428
429#define HDD_IPA_LOG(LVL, fmt, args ...) \
430 CDF_TRACE(CDF_MODULE_ID_HDD, LVL, \
431 "%s:%d: "fmt, __func__, __LINE__, ## args)
432
433#define HDD_IPA_DBG_DUMP(_lvl, _prefix, _buf, _len) \
434 do { \
435 CDF_TRACE(CDF_MODULE_ID_HDD, _lvl, "%s:", _prefix); \
436 CDF_TRACE_HEX_DUMP(CDF_MODULE_ID_HDD, _lvl, _buf, _len); \
437 } while (0)
438
439#define HDD_IPA_IS_CONFIG_ENABLED(_hdd_ctx, _mask) \
440 (((_hdd_ctx)->config->IpaConfig & (_mask)) == (_mask))
441
442#define HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa) \
443 do { \
444 hdd_ipa->ipa_rx_internel_drop_count++; \
445 } while (0)
446#define HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa) \
447 do { \
448 hdd_ipa->ipa_rx_net_send_count++; \
449 } while (0)
450#define HDD_BW_GET_DIFF(_x, _y) (unsigned long)((ULONG_MAX - (_y)) + (_x) + 1)
451
452static struct hdd_ipa_adapter_2_client {
453 enum ipa_client_type cons_client;
454 enum ipa_client_type prod_client;
455} hdd_ipa_adapter_2_client[HDD_IPA_MAX_IFACE] = {
456 {
457 IPA_CLIENT_WLAN2_CONS, IPA_CLIENT_WLAN1_PROD
458 }, {
459 IPA_CLIENT_WLAN3_CONS, IPA_CLIENT_WLAN1_PROD
460 }, {
461 IPA_CLIENT_WLAN4_CONS, IPA_CLIENT_WLAN1_PROD
462 },
463};
464
465/* For Tx pipes, use Ethernet-II Header format */
466struct hdd_ipa_uc_tx_hdr ipa_uc_tx_hdr = {
467 {
468 0x00000000,
469 0x00000000
470 },
471 {
472 0x00000000
473 },
474 {
475 {0x00, 0x03, 0x7f, 0xaa, 0xbb, 0xcc},
476 {0x00, 0x03, 0x7f, 0xdd, 0xee, 0xff},
477 0x0008
478 }
479};
480
481/* For Tx pipes, use 802.3 Header format */
482static struct hdd_ipa_tx_hdr ipa_tx_hdr = {
483 {
484 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
485 {0xDE, 0xAD, 0xBE, 0xEF, 0xFF, 0xFF},
486 0x00 /* length can be zero */
487 },
488 {
489 /* LLC SNAP header 8 bytes */
490 0xaa, 0xaa,
491 {0x03, 0x00, 0x00, 0x00},
492 0x0008 /* type value(2 bytes) ,filled by wlan */
493 /* 0x0800 - IPV4, 0x86dd - IPV6 */
494 }
495};
496
497static const char *op_string[] = {
498 "TX_SUSPEND",
499 "TX_RESUME",
500 "RX_SUSPEND",
501 "RX_RESUME",
502 "STATS",
503};
504
505static struct hdd_ipa_priv *ghdd_ipa;
506
507/* Local Function Prototypes */
508static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
509 unsigned long data);
510static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
511 unsigned long data);
512
513static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context);
514
515/**
516 * hdd_ipa_is_enabled() - Is IPA enabled?
517 * @hdd_ctx: Global HDD context
518 *
519 * Return: true if IPA is enabled, false otherwise
520 */
521bool hdd_ipa_is_enabled(hdd_context_t *hdd_ctx)
522{
523 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_ENABLE_MASK);
524}
525
526/**
527 * hdd_ipa_uc_is_enabled() - Is IPA uC offload enabled?
528 * @hdd_ctx: Global HDD context
529 *
530 * Return: true if IPA uC offload is enabled, false otherwise
531 */
532bool hdd_ipa_uc_is_enabled(hdd_context_t *hdd_ctx)
533{
534 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_ENABLE_MASK);
535}
536
537/**
538 * hdd_ipa_uc_sta_is_enabled() - Is STA mode IPA uC offload enabled?
539 * @hdd_ctx: Global HDD context
540 *
541 * Return: true if STA mode IPA uC offload is enabled, false otherwise
542 */
543static inline bool hdd_ipa_uc_sta_is_enabled(hdd_context_t *hdd_ctx)
544{
545 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_UC_STA_ENABLE_MASK);
546}
547
548/**
549 * hdd_ipa_is_pre_filter_enabled() - Is IPA pre-filter enabled?
550 * @hdd_ipa: Global HDD IPA context
551 *
552 * Return: true if pre-filter is enabled, otherwise false
553 */
554static inline bool hdd_ipa_is_pre_filter_enabled(hdd_context_t *hdd_ctx)
555{
556 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
557 HDD_IPA_PRE_FILTER_ENABLE_MASK);
558}
559
560/**
561 * hdd_ipa_is_ipv6_enabled() - Is IPA IPv6 enabled?
562 * @hdd_ipa: Global HDD IPA context
563 *
564 * Return: true if IPv6 is enabled, otherwise false
565 */
566static inline bool hdd_ipa_is_ipv6_enabled(hdd_context_t *hdd_ctx)
567{
568 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_IPV6_ENABLE_MASK);
569}
570
571/**
572 * hdd_ipa_is_rm_enabled() - Is IPA resource manager enabled?
573 * @hdd_ipa: Global HDD IPA context
574 *
575 * Return: true if resource manager is enabled, otherwise false
576 */
577static inline bool hdd_ipa_is_rm_enabled(hdd_context_t *hdd_ctx)
578{
579 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_RM_ENABLE_MASK);
580}
581
582/**
583 * hdd_ipa_is_rt_debugging_enabled() - Is IPA real-time debug enabled?
584 * @hdd_ipa: Global HDD IPA context
585 *
586 * Return: true if resource manager is enabled, otherwise false
587 */
588static inline bool hdd_ipa_is_rt_debugging_enabled(hdd_context_t *hdd_ctx)
589{
590 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx, HDD_IPA_REAL_TIME_DEBUGGING);
591}
592
593/**
594 * hdd_ipa_is_clk_scaling_enabled() - Is IPA clock scaling enabled?
595 * @hdd_ipa: Global HDD IPA context
596 *
597 * Return: true if clock scaling is enabled, otherwise false
598 */
599static inline bool hdd_ipa_is_clk_scaling_enabled(hdd_context_t *hdd_ctx)
600{
601 return HDD_IPA_IS_CONFIG_ENABLED(hdd_ctx,
602 HDD_IPA_CLK_SCALING_ENABLE_MASK |
603 HDD_IPA_RM_ENABLE_MASK);
604}
605
606/**
607 * hdd_ipa_uc_rt_debug_host_fill - fill rt debug buffer
608 * @ctext: pointer to hdd context.
609 *
610 * If rt debug enabled, periodically called, and fill debug buffer
611 *
612 * Return: none
613 */
614static void hdd_ipa_uc_rt_debug_host_fill(void *ctext)
615{
616 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
617 struct hdd_ipa_priv *hdd_ipa;
618 struct uc_rt_debug_info *dump_info = NULL;
619
620 if (wlan_hdd_validate_context(hdd_ctx))
621 return;
622
623 if (!hdd_ctx->hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
624 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
625 "%s: IPA UC is not enabled", __func__);
626 return;
627 }
628
629 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
630
631 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
632 dump_info = &hdd_ipa->rt_bug_buffer[
633 hdd_ipa->rt_buf_fill_index % HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
634
635 dump_info->time = cdf_mc_timer_get_system_time();
636 dump_info->ipa_excep_count = hdd_ipa->stats.num_rx_excep;
637 dump_info->rx_drop_count = hdd_ipa->ipa_rx_internel_drop_count;
638 dump_info->net_sent_count = hdd_ipa->ipa_rx_net_send_count;
639 dump_info->rx_discard_count = hdd_ipa->ipa_rx_discard;
640 dump_info->tx_mcbc_count = hdd_ipa->stats.num_tx_bcmc;
641 dump_info->tx_fwd_count = hdd_ipa->ipa_tx_forward;
642 dump_info->rx_destructor_call = hdd_ipa->ipa_rx_destructor_count;
643 hdd_ipa->rt_buf_fill_index++;
644 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
645
646 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
647 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
648}
649
650/**
651 * hdd_ipa_uc_rt_debug_host_dump - dump rt debug buffer
652 * @hdd_ctx: pointer to hdd context.
653 *
654 * If rt debug enabled, dump debug buffer contents based on requirement
655 *
656 * Return: none
657 */
658void hdd_ipa_uc_rt_debug_host_dump(hdd_context_t *hdd_ctx)
659{
660 struct hdd_ipa_priv *hdd_ipa;
661 unsigned int dump_count;
662 unsigned int dump_index;
663 struct uc_rt_debug_info *dump_info = NULL;
664
665 if (wlan_hdd_validate_context(hdd_ctx))
666 return;
667
668 hdd_ipa = hdd_ctx->hdd_ipa;
669 if (!hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx)) {
670 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
671 "%s: IPA UC is not enabled", __func__);
672 return;
673 }
674
675 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
676 "========= WLAN-IPA DEBUG BUF DUMP ==========\n");
677 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
678 " TM : EXEP : DROP : NETS : MCBC : TXFD : DSTR : DSCD\n");
679
680 cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
681 for (dump_count = 0;
682 dump_count < HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
683 dump_count++) {
684 dump_index = (hdd_ipa->rt_buf_fill_index + dump_count) %
685 HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
686 dump_info = &hdd_ipa->rt_bug_buffer[dump_index];
687 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
688 "%12lu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu:%10llu\n",
689 dump_info->time, dump_info->ipa_excep_count,
690 dump_info->rx_drop_count, dump_info->net_sent_count,
691 dump_info->tx_mcbc_count, dump_info->tx_fwd_count,
692 dump_info->rx_destructor_call,
693 dump_info->rx_discard_count);
694 }
695 cdf_mutex_release(&hdd_ipa->rt_debug_lock);
696 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
697 "======= WLAN-IPA DEBUG BUF DUMP END ========\n");
698}
699
700/**
701 * hdd_ipa_uc_rt_debug_handler - periodic memory health monitor handler
702 * @ctext: pointer to hdd context.
703 *
704 * periodically called by timer expire
705 * will try to alloc dummy memory and detect out of memory condition
706 * if out of memory detected, dump wlan-ipa stats
707 *
708 * Return: none
709 */
710static void hdd_ipa_uc_rt_debug_handler(void *ctext)
711{
712 hdd_context_t *hdd_ctx = (hdd_context_t *)ctext;
713 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
714 void *dummy_ptr = NULL;
715
716 if (wlan_hdd_validate_context(hdd_ctx))
717 return;
718
719 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
720 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
721 "%s: IPA RT debug is not enabled", __func__);
722 return;
723 }
724
725 /* Allocate dummy buffer periodically and free immediately. this will
726 * proactively detect OOM and if allocation fails dump ipa stats
727 */
728 dummy_ptr = kmalloc(HDD_IPA_UC_DEBUG_DUMMY_MEM_SIZE,
729 GFP_KERNEL | GFP_ATOMIC);
730 if (!dummy_ptr) {
731 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
732 "%s: Dummy alloc fail", __func__);
733 hdd_ipa_uc_rt_debug_host_dump(hdd_ctx);
734 hdd_ipa_uc_stat_request(
735 hdd_get_adapter(hdd_ctx, WLAN_HDD_SOFTAP), 1);
736 } else {
737 kfree(dummy_ptr);
738 }
739
740 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
741 HDD_IPA_UC_RT_DEBUG_PERIOD);
742}
743
744/**
745 * hdd_ipa_uc_rt_debug_destructor - called by data packet free
746 * @skb: packet pinter
747 *
748 * when free data packet, will be invoked by wlan client and will increase
749 * free counter
750 *
751 * Return: none
752 */
753void hdd_ipa_uc_rt_debug_destructor(struct sk_buff *skb)
754{
755 if (!ghdd_ipa) {
756 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
757 "%s: invalid hdd context", __func__);
758 return;
759 }
760
761 ghdd_ipa->ipa_rx_destructor_count++;
762}
763
764/**
765 * hdd_ipa_uc_rt_debug_deinit - remove resources to handle rt debugging
766 * @hdd_ctx: hdd main context
767 *
768 * free all rt debugging resources
769 *
770 * Return: none
771 */
772static void hdd_ipa_uc_rt_debug_deinit(hdd_context_t *hdd_ctx)
773{
774 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
775
776 if (CDF_TIMER_STATE_STOPPED !=
777 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_fill_timer)) {
778 cdf_mc_timer_stop(&hdd_ipa->rt_debug_fill_timer);
779 }
780 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_fill_timer);
781 cdf_mutex_destroy(&hdd_ipa->rt_debug_lock);
782
783 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
784 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
785 "%s: IPA RT debug is not enabled", __func__);
786 return;
787 }
788
789 if (CDF_TIMER_STATE_STOPPED !=
790 cdf_mc_timer_get_current_state(&hdd_ipa->rt_debug_timer)) {
791 cdf_mc_timer_stop(&hdd_ipa->rt_debug_timer);
792 }
793 cdf_mc_timer_destroy(&hdd_ipa->rt_debug_timer);
794}
795
796/**
797 * hdd_ipa_uc_rt_debug_init - intialize resources to handle rt debugging
798 * @hdd_ctx: hdd main context
799 *
800 * alloc and initialize all rt debugging resources
801 *
802 * Return: none
803 */
804static void hdd_ipa_uc_rt_debug_init(hdd_context_t *hdd_ctx)
805{
806 struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
807
808 cdf_mutex_init(&hdd_ipa->rt_debug_lock);
809 cdf_mc_timer_init(&hdd_ipa->rt_debug_fill_timer, CDF_TIMER_TYPE_SW,
810 hdd_ipa_uc_rt_debug_host_fill, (void *)hdd_ctx);
811 hdd_ipa->rt_buf_fill_index = 0;
812 cdf_mem_zero(hdd_ipa->rt_bug_buffer,
813 sizeof(struct uc_rt_debug_info) *
814 HDD_IPA_UC_RT_DEBUG_BUF_COUNT);
815 hdd_ipa->ipa_tx_forward = 0;
816 hdd_ipa->ipa_rx_discard = 0;
817 hdd_ipa->ipa_rx_net_send_count = 0;
818 hdd_ipa->ipa_rx_internel_drop_count = 0;
819 hdd_ipa->ipa_rx_destructor_count = 0;
820
821 cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
822 HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
823
824 /* Reatime debug enable on feature enable */
825 if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
826 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
827 "%s: IPA RT debug is not enabled", __func__);
828 return;
829 }
830 cdf_mc_timer_init(&hdd_ipa->rt_debug_timer, CDF_TIMER_TYPE_SW,
831 hdd_ipa_uc_rt_debug_handler, (void *)hdd_ctx);
832 cdf_mc_timer_start(&hdd_ipa->rt_debug_timer,
833 HDD_IPA_UC_RT_DEBUG_PERIOD);
834
835}
836
837/**
838 * hdd_ipa_uc_stat_query() - Query the IPA stats
839 * @hdd_ctx: Global HDD context
840 * @ipa_tx_diff: tx packet count diff from previous
841 * tx packet count
842 * @ipa_rx_diff: rx packet count diff from previous
843 * rx packet count
844 *
845 * Return: true if IPA is enabled, false otherwise
846 */
847void hdd_ipa_uc_stat_query(hdd_context_t *pHddCtx,
848 uint32_t *ipa_tx_diff, uint32_t *ipa_rx_diff)
849{
850 struct hdd_ipa_priv *hdd_ipa;
851
852 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
853 *ipa_tx_diff = 0;
854 *ipa_rx_diff = 0;
855
856 if (!hdd_ipa_is_enabled(pHddCtx) ||
857 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
858 return;
859 }
860
Yun Parke59b3912015-11-09 13:19:06 -0800861 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800862 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
863 (false == hdd_ipa->resource_loading)) {
864 *ipa_tx_diff = hdd_ipa->ipa_tx_packets_diff;
865 *ipa_rx_diff = hdd_ipa->ipa_rx_packets_diff;
866 HDD_IPA_LOG(LOG1, "STAT Query TX DIFF %d, RX DIFF %d",
867 *ipa_tx_diff, *ipa_rx_diff);
868 }
Yun Parke59b3912015-11-09 13:19:06 -0800869 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800870 return;
871}
872
873/**
874 * hdd_ipa_uc_stat_request() - Get IPA stats from IPA.
875 * @adapter: network adapter
876 * @reason: STAT REQ Reason
877 *
878 * Return: None
879 */
880void hdd_ipa_uc_stat_request(hdd_adapter_t *adapter, uint8_t reason)
881{
882 hdd_context_t *pHddCtx;
883 struct hdd_ipa_priv *hdd_ipa;
884
885 if (!adapter) {
886 return;
887 }
888
889 pHddCtx = (hdd_context_t *)adapter->pHddCtx;
890 hdd_ipa = (struct hdd_ipa_priv *)pHddCtx->hdd_ipa;
891 if (!hdd_ipa_is_enabled(pHddCtx) ||
892 !(hdd_ipa_uc_is_enabled(pHddCtx))) {
893 return;
894 }
895
896 HDD_IPA_LOG(LOG1, "STAT REQ Reason %d", reason);
Yun Parke59b3912015-11-09 13:19:06 -0800897 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800898 if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
899 (false == hdd_ipa->resource_loading)) {
900 hdd_ipa->stat_req_reason = reason;
901 wma_cli_set_command(
902 (int)adapter->sessionId,
903 (int)WMA_VDEV_TXRX_GET_IPA_UC_FW_STATS_CMDID,
904 0, VDEV_CMD);
905 }
Yun Parke59b3912015-11-09 13:19:06 -0800906 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800907}
908
909/**
910 * hdd_ipa_uc_find_add_assoc_sta() - Find associated station
911 * @hdd_ipa: Global HDD IPA context
912 * @sta_add: Should station be added
913 * @sta_id: ID of the station being queried
914 *
915 * Return: true if the station was found
916 */
917static bool hdd_ipa_uc_find_add_assoc_sta(struct hdd_ipa_priv *hdd_ipa,
918 bool sta_add, uint8_t sta_id)
919{
920 bool sta_found = false;
921 uint8_t idx;
922 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
923 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
924 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
925 sta_found = true;
926 break;
927 }
928 }
929 if (sta_add && sta_found) {
930 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
931 "%s: STA ID %d already exist, cannot add",
932 __func__, sta_id);
933 return sta_found;
934 }
935 if (sta_add) {
936 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
937 if (!hdd_ipa->assoc_stas_map[idx].is_reserved) {
938 hdd_ipa->assoc_stas_map[idx].is_reserved = true;
939 hdd_ipa->assoc_stas_map[idx].sta_id = sta_id;
940 return sta_found;
941 }
942 }
943 }
944 if (!sta_add && !sta_found) {
945 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
946 "%s: STA ID %d does not exist, cannot delete",
947 __func__, sta_id);
948 return sta_found;
949 }
950 if (!sta_add) {
951 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
952 if ((hdd_ipa->assoc_stas_map[idx].is_reserved) &&
953 (hdd_ipa->assoc_stas_map[idx].sta_id == sta_id)) {
954 hdd_ipa->assoc_stas_map[idx].is_reserved =
955 false;
956 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
957 return sta_found;
958 }
959 }
960 }
961 return sta_found;
962}
963
964/**
965 * hdd_ipa_uc_enable_pipes() - Enable IPA uC pipes
966 * @hdd_ipa: Global HDD IPA context
967 *
968 * Return: 0 on success, negative errno if error
969 */
970static int hdd_ipa_uc_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
971{
972 int result;
973 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
974
975 /* ACTIVATE TX PIPE */
976 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Enable TX PIPE", __func__);
977 result = ipa_enable_wdi_pipe(hdd_ipa->tx_pipe_handle);
978 if (result) {
979 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
980 "%s: Enable TX PIPE fail, code %d",
981 __func__, result);
982 return result;
983 }
984 result = ipa_resume_wdi_pipe(hdd_ipa->tx_pipe_handle);
985 if (result) {
986 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
987 "%s: Resume TX PIPE fail, code %d",
988 __func__, result);
989 return result;
990 }
991 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, true);
992
993 /* ACTIVATE RX PIPE */
994 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Enable RX PIPE", __func__);
995 result = ipa_enable_wdi_pipe(hdd_ipa->rx_pipe_handle);
996 if (result) {
997 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
998 "%s: Enable RX PIPE fail, code %d",
999 __func__, result);
1000 return result;
1001 }
1002 result = ipa_resume_wdi_pipe(hdd_ipa->rx_pipe_handle);
1003 if (result) {
1004 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1005 "%s: Resume RX PIPE fail, code %d",
1006 __func__, result);
1007 return result;
1008 }
1009 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, true, false);
Leo Change3e49442015-10-26 20:07:13 -07001010 hdd_ipa->ipa_pipes_down = false;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001011 return 0;
1012}
1013
1014/**
1015 * hdd_ipa_uc_disable_pipes() - Disable IPA uC pipes
1016 * @hdd_ipa: Global HDD IPA context
1017 *
1018 * Return: 0 on success, negative errno if error
1019 */
1020static int hdd_ipa_uc_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
1021{
1022 int result;
1023
Leo Change3e49442015-10-26 20:07:13 -07001024 hdd_ipa->ipa_pipes_down = true;
1025
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001026 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable RX PIPE", __func__);
1027 result = ipa_suspend_wdi_pipe(hdd_ipa->rx_pipe_handle);
1028 if (result) {
1029 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1030 "%s: Suspend RX PIPE fail, code %d",
1031 __func__, result);
1032 return result;
1033 }
1034 result = ipa_disable_wdi_pipe(hdd_ipa->rx_pipe_handle);
1035 if (result) {
1036 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1037 "%s: Disable RX PIPE fail, code %d",
1038 __func__, result);
1039 return result;
1040 }
1041
1042 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable TX PIPE", __func__);
1043 result = ipa_suspend_wdi_pipe(hdd_ipa->tx_pipe_handle);
1044 if (result) {
1045 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1046 "%s: Suspend TX PIPE fail, code %d",
1047 __func__, result);
1048 return result;
1049 }
1050 result = ipa_disable_wdi_pipe(hdd_ipa->tx_pipe_handle);
1051 if (result) {
1052 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1053 "%s: Disable TX PIPE fail, code %d",
1054 __func__, result);
1055 return result;
1056 }
1057
1058 return 0;
1059}
1060
1061/**
1062 * hdd_ipa_uc_handle_first_con() - Handle first uC IPA connection
1063 * @hdd_ipa: Global HDD IPA context
1064 *
1065 * Return: 0 on success, negative errno if error
1066 */
1067static int hdd_ipa_uc_handle_first_con(struct hdd_ipa_priv *hdd_ipa)
1068{
1069 hdd_ipa->activated_fw_pipe = 0;
1070 hdd_ipa->resource_loading = true;
1071 /* If RM feature enabled
1072 * Request PROD Resource first
1073 * PROD resource may return sync or async manners */
1074 if ((hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx)) &&
1075 (!ipa_rm_request_resource(IPA_RM_RESOURCE_WLAN_PROD))) {
1076 /* RM PROD request sync return
1077 * enable pipe immediately */
1078 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1079 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1080 "%s: IPA WDI Pipes activate fail",
1081 __func__);
1082 hdd_ipa->resource_loading = false;
1083 return -EBUSY;
1084 }
1085 } else {
1086 /* RM Disabled
1087 * Just enabled all the PIPEs */
1088 if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
1089 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1090 "%s: IPA WDI Pipes activate fail",
1091 __func__);
1092 hdd_ipa->resource_loading = false;
1093 return -EBUSY;
1094 }
1095 hdd_ipa->resource_loading = false;
1096 }
1097 return 0;
1098}
1099
1100/**
1101 * hdd_ipa_uc_handle_last_discon() - Handle last uC IPA disconnection
1102 * @hdd_ipa: Global HDD IPA context
1103 *
1104 * Return: None
1105 */
1106static void hdd_ipa_uc_handle_last_discon(struct hdd_ipa_priv *hdd_ipa)
1107{
1108 p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
1109
1110 hdd_ipa->resource_unloading = true;
1111 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW RX PIPE", __func__);
1112 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, false);
1113 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Disable FW TX PIPE", __func__);
1114 ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, true);
1115}
1116
1117/**
1118 * hdd_ipa_uc_rm_notify_handler() - IPA uC resource notification handler
1119 * @context: User context registered with TL (the IPA Global context is
1120 * registered
1121 * @rxpkt: Packet containing the notification
1122 * @staid: ID of the station associated with the packet
1123 *
1124 * Return: None
1125 */
1126static void
1127hdd_ipa_uc_rm_notify_handler(void *context, enum ipa_rm_event event)
1128{
1129 struct hdd_ipa_priv *hdd_ipa = context;
1130 CDF_STATUS status = CDF_STATUS_SUCCESS;
1131
1132 /*
1133 * When SSR is going on or driver is unloading, just return.
1134 */
1135 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
1136 if (0 != status) {
1137 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1138 return;
1139 }
1140
1141 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
1142 return;
1143
1144 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s, event code %d",
1145 __func__, event);
1146
1147 switch (event) {
1148 case IPA_RM_RESOURCE_GRANTED:
1149 /* Differed RM Granted */
1150 hdd_ipa_uc_enable_pipes(hdd_ipa);
Yun Parke59b3912015-11-09 13:19:06 -08001151 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001152 if ((false == hdd_ipa->resource_unloading) &&
1153 (!hdd_ipa->activated_fw_pipe)) {
1154 hdd_ipa_uc_enable_pipes(hdd_ipa);
1155 }
Yun Parke59b3912015-11-09 13:19:06 -08001156 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001157 if (hdd_ipa->pending_cons_req) {
1158 ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
1159 IPA_RM_RESOURCE_WLAN_CONS);
1160 }
1161 hdd_ipa->pending_cons_req = false;
1162 break;
1163
1164 case IPA_RM_RESOURCE_RELEASED:
1165 /* Differed RM Released */
1166 hdd_ipa->resource_unloading = false;
1167 if (hdd_ipa->pending_cons_req) {
1168 ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED,
1169 IPA_RM_RESOURCE_WLAN_CONS);
1170 }
1171 hdd_ipa->pending_cons_req = false;
1172 break;
1173
1174 default:
1175 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1176 "%s, invalid event code %d", __func__, event);
1177 break;
1178 }
1179}
1180
1181/**
1182 * hdd_ipa_uc_rm_notify_defer() - Defer IPA uC notification
1183 * @hdd_ipa: Global HDD IPA context
1184 * @event: IPA resource manager event to be deferred
1185 *
1186 * This function is called when a resource manager event is received
1187 * from firmware in interrupt context. This function will defer the
1188 * handling to the OL RX thread
1189 *
1190 * Return: None
1191 */
1192static void hdd_ipa_uc_rm_notify_defer(struct work_struct *work)
1193{
1194 enum ipa_rm_event event;
1195 struct uc_rm_work_struct *uc_rm_work = container_of(work,
1196 struct uc_rm_work_struct, work);
1197 struct hdd_ipa_priv *hdd_ipa = container_of(uc_rm_work,
1198 struct hdd_ipa_priv, uc_rm_work);
1199
1200 cds_ssr_protect(__func__);
1201 event = uc_rm_work->event;
1202 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1203 "%s, posted event %d", __func__, event);
1204
1205 hdd_ipa_uc_rm_notify_handler(hdd_ipa, event);
1206 cds_ssr_unprotect(__func__);
1207
1208 return;
1209}
1210
1211/**
1212 * hdd_ipa_uc_proc_pending_event() - Process IPA uC pending events
1213 * @hdd_ipa: Global HDD IPA context
1214 *
1215 * Return: None
1216 */
1217static void hdd_ipa_uc_proc_pending_event(struct hdd_ipa_priv *hdd_ipa)
1218{
1219 unsigned int pending_event_count;
1220 struct ipa_uc_pending_event *pending_event = NULL;
1221
1222 cdf_list_size(&hdd_ipa->pending_event, &pending_event_count);
1223 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1224 "%s, Pending Event Count %d", __func__, pending_event_count);
1225 if (!pending_event_count) {
1226 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1227 "%s, No Pending Event", __func__);
1228 return;
1229 }
1230
1231 cdf_list_remove_front(&hdd_ipa->pending_event,
1232 (cdf_list_node_t **)&pending_event);
1233 while (pending_event != NULL) {
1234 hdd_ipa_wlan_evt(pending_event->adapter,
1235 pending_event->type,
1236 pending_event->sta_id,
1237 pending_event->mac_addr);
1238 cdf_mem_free(pending_event);
1239 pending_event = NULL;
1240 cdf_list_remove_front(&hdd_ipa->pending_event,
1241 (cdf_list_node_t **)&pending_event);
1242 }
1243}
1244
1245/**
1246 * hdd_ipa_uc_op_cb() - IPA uC operation callback
1247 * @op_msg: operation message received from firmware
1248 * @usr_ctxt: user context registered with TL (we register the HDD Global
1249 * context)
1250 *
1251 * Return: None
1252 */
1253static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
1254{
1255 struct op_msg_type *msg = op_msg;
1256 struct ipa_uc_fw_stats *uc_fw_stat;
1257 struct IpaHwStatsWDIInfoData_t ipa_stat;
1258 struct hdd_ipa_priv *hdd_ipa;
1259 hdd_context_t *hdd_ctx;
1260 CDF_STATUS status = CDF_STATUS_SUCCESS;
1261
1262 if (!op_msg || !usr_ctxt) {
1263 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s, INVALID ARG", __func__);
1264 return;
1265 }
1266
1267 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1268 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1269 "%s, INVALID OPCODE %d", __func__, msg->op_code);
1270 return;
1271 }
1272
1273 hdd_ctx = (hdd_context_t *) usr_ctxt;
1274
1275 /*
1276 * When SSR is going on or driver is unloading, just return.
1277 */
1278 status = wlan_hdd_validate_context(hdd_ctx);
1279 if (0 != status) {
1280 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1281 cdf_mem_free(op_msg);
1282 return;
1283 }
1284
1285 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1286
1287 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
1288 "%s, OPCODE %s", __func__, op_string[msg->op_code]);
1289
1290 if ((HDD_IPA_UC_OPCODE_TX_RESUME == msg->op_code) ||
1291 (HDD_IPA_UC_OPCODE_RX_RESUME == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001292 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001293 hdd_ipa->activated_fw_pipe++;
1294 if (HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) {
1295 hdd_ipa->resource_loading = false;
1296 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1297 }
Yun Parke59b3912015-11-09 13:19:06 -08001298 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001299 }
1300
1301 if ((HDD_IPA_UC_OPCODE_TX_SUSPEND == msg->op_code) ||
1302 (HDD_IPA_UC_OPCODE_RX_SUSPEND == msg->op_code)) {
Yun Parke59b3912015-11-09 13:19:06 -08001303 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001304 hdd_ipa->activated_fw_pipe--;
1305 if (!hdd_ipa->activated_fw_pipe) {
1306 hdd_ipa_uc_disable_pipes(hdd_ipa);
1307 if ((hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx)) &&
1308 (!ipa_rm_release_resource(IPA_RM_RESOURCE_WLAN_PROD))) {
1309 /* Sync return success from IPA
1310 * Enable/resume all the PIPEs */
1311 hdd_ipa->resource_unloading = false;
1312 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1313 } else {
1314 hdd_ipa->resource_unloading = false;
1315 hdd_ipa_uc_proc_pending_event(hdd_ipa);
1316 }
1317 }
Yun Parke59b3912015-11-09 13:19:06 -08001318 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001319 }
1320
1321 if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1322 (HDD_IPA_UC_STAT_REASON_DEBUG == hdd_ipa->stat_req_reason)) {
1323
1324 /* STATs from host */
1325 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1326 "==== IPA_UC WLAN_HOST CE ====\n"
1327 "CE RING BASE: 0x%x\n"
1328 "CE RING SIZE: %d\n"
1329 "CE REG ADDR : 0x%llx",
1330 hdd_ctx->ce_sr_base_paddr,
1331 hdd_ctx->ce_sr_ring_size,
1332 (uint64_t) hdd_ctx->ce_reg_paddr);
1333 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1334 "==== IPA_UC WLAN_HOST TX ====\n"
1335 "COMP RING BASE: 0x%x\n"
1336 "COMP RING SIZE: %d\n"
1337 "NUM ALLOC BUF: %d\n"
1338 "COMP RING DBELL : 0x%x",
1339 hdd_ctx->tx_comp_ring_base_paddr,
1340 hdd_ctx->tx_comp_ring_size,
1341 hdd_ctx->tx_num_alloc_buffer,
1342 hdd_ctx->tx_comp_doorbell_paddr);
1343 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1344 "==== IPA_UC WLAN_HOST RX ====\n"
1345 "IND RING BASE: 0x%x\n"
1346 "IND RING SIZE: %d\n"
1347 "IND RING DBELL : 0x%x\n"
1348 "PROC DONE IND ADDR : 0x%x\n"
1349 "NUM EXCP PKT : %llu\n"
1350 "NUM TX BCMC : %llu\n"
1351 "NUM TX BCMC ERR : %llu",
1352 hdd_ctx->rx_rdy_ring_base_paddr,
1353 hdd_ctx->rx_rdy_ring_size,
1354 hdd_ctx->rx_ready_doorbell_paddr,
1355 hdd_ctx->rx_proc_done_idx_paddr,
1356 hdd_ipa->stats.num_rx_excep,
1357 hdd_ipa->stats.num_tx_bcmc,
1358 hdd_ipa->stats.num_tx_bcmc_err);
1359 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1360 "==== IPA_UC WLAN_HOST CONTROL ====\n"
1361 "SAP NUM STAs: %d\n"
1362 "STA CONNECTED: %d\n"
1363 "TX PIPE HDL: %d\n"
1364 "RX PIPE HDL : %d\n"
1365 "RSC LOADING : %d\n"
1366 "RSC UNLOADING : %d\n"
1367 "PNDNG CNS RQT : %d",
1368 hdd_ipa->sap_num_connected_sta,
1369 hdd_ipa->sta_connected,
1370 hdd_ipa->tx_pipe_handle,
1371 hdd_ipa->rx_pipe_handle,
1372 (unsigned int)hdd_ipa->resource_loading,
1373 (unsigned int)hdd_ipa->resource_unloading,
1374 (unsigned int)hdd_ipa->pending_cons_req);
1375
1376 /* STATs from FW */
1377 uc_fw_stat = (struct ipa_uc_fw_stats *)
1378 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
1379 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1380 "==== IPA_UC WLAN_FW TX ====\n"
1381 "COMP RING BASE: 0x%x\n"
1382 "COMP RING SIZE: %d\n"
1383 "COMP RING DBELL : 0x%x\n"
1384 "COMP RING DBELL IND VAL : %d\n"
1385 "COMP RING DBELL CACHED VAL : %d\n"
1386 "COMP RING DBELL CACHED VAL : %d\n"
1387 "PKTS ENQ : %d\n"
1388 "PKTS COMP : %d\n"
1389 "IS SUSPEND : %d\n"
1390 "RSVD : 0x%x",
1391 uc_fw_stat->tx_comp_ring_base,
1392 uc_fw_stat->tx_comp_ring_size,
1393 uc_fw_stat->tx_comp_ring_dbell_addr,
1394 uc_fw_stat->tx_comp_ring_dbell_ind_val,
1395 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1396 uc_fw_stat->tx_comp_ring_dbell_cached_val,
1397 uc_fw_stat->tx_pkts_enqueued,
1398 uc_fw_stat->tx_pkts_completed,
1399 uc_fw_stat->tx_is_suspend, uc_fw_stat->tx_reserved);
1400 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1401 "==== IPA_UC WLAN_FW RX ====\n"
1402 "IND RING BASE: 0x%x\n"
1403 "IND RING SIZE: %d\n"
1404 "IND RING DBELL : 0x%x\n"
1405 "IND RING DBELL IND VAL : %d\n"
1406 "IND RING DBELL CACHED VAL : %d\n"
1407 "RDY IND ADDR : 0x%x\n"
1408 "RDY IND CACHE VAL : %d\n"
1409 "RFIL IND : %d\n"
1410 "NUM PKT INDICAT : %d\n"
1411 "BUF REFIL : %d\n"
1412 "NUM DROP NO SPC : %d\n"
1413 "NUM DROP NO BUF : %d\n"
1414 "IS SUSPND : %d\n"
1415 "RSVD : 0x%x\n",
1416 uc_fw_stat->rx_ind_ring_base,
1417 uc_fw_stat->rx_ind_ring_size,
1418 uc_fw_stat->rx_ind_ring_dbell_addr,
1419 uc_fw_stat->rx_ind_ring_dbell_ind_val,
1420 uc_fw_stat->rx_ind_ring_dbell_ind_cached_val,
1421 uc_fw_stat->rx_ind_ring_rdidx_addr,
1422 uc_fw_stat->rx_ind_ring_rd_idx_cached_val,
1423 uc_fw_stat->rx_refill_idx,
1424 uc_fw_stat->rx_num_pkts_indicated,
1425 uc_fw_stat->rx_buf_refilled,
1426 uc_fw_stat->rx_num_ind_drop_no_space,
1427 uc_fw_stat->rx_num_ind_drop_no_buf,
1428 uc_fw_stat->rx_is_suspend, uc_fw_stat->rx_reserved);
1429 /* STATs from IPA */
1430 ipa_get_wdi_stats(&ipa_stat);
1431 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1432 "==== IPA_UC IPA TX ====\n"
1433 "NUM PROCD : %d\n"
1434 "CE DBELL : 0x%x\n"
1435 "NUM DBELL FIRED : %d\n"
1436 "COMP RNG FULL : %d\n"
1437 "COMP RNG EMPT : %d\n"
1438 "COMP RNG USE HGH : %d\n"
1439 "COMP RNG USE LOW : %d\n"
1440 "BAM FIFO FULL : %d\n"
1441 "BAM FIFO EMPT : %d\n"
1442 "BAM FIFO USE HGH : %d\n"
1443 "BAM FIFO USE LOW : %d\n"
1444 "NUM DBELL : %d\n"
1445 "NUM UNEXP DBELL : %d\n"
1446 "NUM BAM INT HDL : 0x%x\n"
1447 "NUM BAM INT NON-RUN : 0x%x\n"
1448 "NUM QMB INT HDL : 0x%x",
1449 ipa_stat.tx_ch_stats.num_pkts_processed,
1450 ipa_stat.tx_ch_stats.copy_engine_doorbell_value,
1451 ipa_stat.tx_ch_stats.num_db_fired,
1452 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringFull,
1453 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
1454 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
1455 ipa_stat.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
1456 ipa_stat.tx_ch_stats.bam_stats.bamFifoFull,
1457 ipa_stat.tx_ch_stats.bam_stats.bamFifoEmpty,
1458 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageHigh,
1459 ipa_stat.tx_ch_stats.bam_stats.bamFifoUsageLow,
1460 ipa_stat.tx_ch_stats.num_db,
1461 ipa_stat.tx_ch_stats.num_unexpected_db,
1462 ipa_stat.tx_ch_stats.num_bam_int_handled,
1463 ipa_stat.tx_ch_stats.
1464 num_bam_int_in_non_runnning_state,
1465 ipa_stat.tx_ch_stats.num_qmb_int_handled);
1466
1467 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1468 "==== IPA_UC IPA RX ====\n"
1469 "MAX OST PKT : %d\n"
1470 "NUM PKT PRCSD : %d\n"
1471 "RNG RP : 0x%x\n"
1472 "COMP RNG FULL : %d\n"
1473 "COMP RNG EMPT : %d\n"
1474 "COMP RNG USE HGH : %d\n"
1475 "COMP RNG USE LOW : %d\n"
1476 "BAM FIFO FULL : %d\n"
1477 "BAM FIFO EMPT : %d\n"
1478 "BAM FIFO USE HGH : %d\n"
1479 "BAM FIFO USE LOW : %d\n"
1480 "NUM DB : %d\n"
1481 "NUM UNEXP DB : %d\n"
1482 "NUM BAM INT HNDL : 0x%x\n",
1483 ipa_stat.rx_ch_stats.max_outstanding_pkts,
1484 ipa_stat.rx_ch_stats.num_pkts_processed,
1485 ipa_stat.rx_ch_stats.rx_ring_rp_value,
1486 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringFull,
1487 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
1488 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
1489 ipa_stat.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
1490 ipa_stat.rx_ch_stats.bam_stats.bamFifoFull,
1491 ipa_stat.rx_ch_stats.bam_stats.bamFifoEmpty,
1492 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageHigh,
1493 ipa_stat.rx_ch_stats.bam_stats.bamFifoUsageLow,
1494 ipa_stat.rx_ch_stats.num_db,
1495 ipa_stat.rx_ch_stats.num_unexpected_db,
1496 ipa_stat.rx_ch_stats.num_bam_int_handled);
1497 } else if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
1498 (HDD_IPA_UC_STAT_REASON_BW_CAL == hdd_ipa->stat_req_reason)) {
1499 /* STATs from FW */
1500 uc_fw_stat = (struct ipa_uc_fw_stats *)
1501 ((uint8_t *)op_msg + sizeof(struct op_msg_type));
Yun Parke59b3912015-11-09 13:19:06 -08001502 cdf_mutex_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001503 hdd_ipa->ipa_tx_packets_diff = HDD_BW_GET_DIFF(
1504 uc_fw_stat->tx_pkts_completed,
1505 hdd_ipa->ipa_p_tx_packets);
1506 hdd_ipa->ipa_rx_packets_diff = HDD_BW_GET_DIFF(
1507 (uc_fw_stat->rx_num_ind_drop_no_space +
1508 uc_fw_stat->rx_num_ind_drop_no_buf +
1509 uc_fw_stat->rx_num_pkts_indicated),
1510 hdd_ipa->ipa_p_rx_packets);
1511
1512 hdd_ipa->ipa_p_tx_packets = uc_fw_stat->tx_pkts_completed;
1513 hdd_ipa->ipa_p_rx_packets =
1514 (uc_fw_stat->rx_num_ind_drop_no_space +
1515 uc_fw_stat->rx_num_ind_drop_no_buf +
1516 uc_fw_stat->rx_num_pkts_indicated);
Yun Parke59b3912015-11-09 13:19:06 -08001517 cdf_mutex_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001518 } else {
1519 HDD_IPA_LOG(LOGE, "INVALID REASON %d",
1520 hdd_ipa->stat_req_reason);
1521 }
1522 cdf_mem_free(op_msg);
1523}
1524
1525
1526/**
1527 * hdd_ipa_uc_offload_enable_disable() - wdi enable/disable notify to fw
1528 * @adapter: device adapter instance
1529 * @offload_type: MCC or SCC
1530 * @enable: TX offload enable or disable
1531 *
1532 * Return: none
1533 */
1534static void hdd_ipa_uc_offload_enable_disable(hdd_adapter_t *adapter,
1535 uint32_t offload_type, uint32_t enable)
1536{
1537 struct sir_ipa_offload_enable_disable ipa_offload_enable_disable;
1538
1539 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1540 * channel change indication. Since these indications are sent by lower
1541 * layer as SAP updates and IPA doesn't have to do anything for these
1542 * updates so ignoring!
1543 */
1544 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
1545 return;
1546
1547 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
1548 * channel change indication. Since these indications are sent by lower
1549 * layer as SAP updates and IPA doesn't have to do anything for these
1550 * updates so ignoring!
1551 */
1552 if (adapter->ipa_context)
1553 return;
1554
1555 cdf_mem_zero(&ipa_offload_enable_disable,
1556 sizeof(ipa_offload_enable_disable));
1557 ipa_offload_enable_disable.offload_type = offload_type;
1558 ipa_offload_enable_disable.vdev_id = adapter->sessionId;
1559 ipa_offload_enable_disable.enable = enable;
1560
1561 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1562 "%s: offload_type=%d, vdev_id=%d, enable=%d", __func__,
1563 ipa_offload_enable_disable.offload_type,
1564 ipa_offload_enable_disable.vdev_id,
1565 ipa_offload_enable_disable.enable);
1566
1567 if (CDF_STATUS_SUCCESS !=
1568 sme_ipa_offload_enable_disable(WLAN_HDD_GET_HAL_CTX(adapter),
1569 adapter->sessionId, &ipa_offload_enable_disable)) {
1570 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1571 "%s: Failure to enable IPA offload \
1572 (offload_type=%d, vdev_id=%d, enable=%d)", __func__,
1573 ipa_offload_enable_disable.offload_type,
1574 ipa_offload_enable_disable.vdev_id,
1575 ipa_offload_enable_disable.enable);
1576 }
1577}
1578
1579/**
1580 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1581 * @work: uC OP work
1582 *
1583 * Return: None
1584 */
1585static void hdd_ipa_uc_fw_op_event_handler(struct work_struct *work)
1586{
1587 struct op_msg_type *msg;
1588 struct uc_op_work_struct *uc_op_work = container_of(work,
1589 struct uc_op_work_struct, work);
1590 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1591
1592 cds_ssr_protect(__func__);
1593
1594 msg = uc_op_work->msg;
1595 uc_op_work->msg = NULL;
1596 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1597 "%s, posted msg %d", __func__, msg->op_code);
1598
1599 hdd_ipa_uc_op_cb(msg, hdd_ipa->hdd_ctx);
1600
1601 cds_ssr_unprotect(__func__);
1602
1603 return;
1604}
1605
1606/**
1607 * hdd_ipa_uc_op_event_handler() - Adapter lookup
1608 * hdd_ipa_uc_fw_op_event_handler - IPA uC FW OPvent handler
1609 * @op_msg: operation message received from firmware
1610 * @hdd_ctx: Global HDD context
1611 *
1612 * Return: None
1613 */
1614static void hdd_ipa_uc_op_event_handler(uint8_t *op_msg, void *hdd_ctx)
1615{
1616 struct hdd_ipa_priv *hdd_ipa;
1617 struct op_msg_type *msg;
1618 struct uc_op_work_struct *uc_op_work;
1619 CDF_STATUS status = CDF_STATUS_SUCCESS;
1620
1621 status = wlan_hdd_validate_context(hdd_ctx);
1622 if (0 != status) {
1623 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
1624 goto end;
1625 }
1626
1627 msg = (struct op_msg_type *)op_msg;
1628 hdd_ipa = ((hdd_context_t *)hdd_ctx)->hdd_ipa;
1629
1630 if (unlikely(!hdd_ipa))
1631 goto end;
1632
1633 if (HDD_IPA_UC_OPCODE_MAX <= msg->op_code) {
1634 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s: Invalid OP Code (%d)",
1635 __func__, msg->op_code);
1636 goto end;
1637 }
1638
1639 uc_op_work = &hdd_ipa->uc_op_work[msg->op_code];
1640 if (uc_op_work->msg)
1641 /* When the same uC OPCODE is already pended, just return */
1642 goto end;
1643
1644 uc_op_work->msg = msg;
1645 schedule_work(&uc_op_work->work);
1646 return;
1647
1648end:
1649 cdf_mem_free(op_msg);
1650}
1651
1652/**
1653 * hdd_ipa_uc_ol_init() - Initialize IPA uC offload
1654 * @hdd_ctx: Global HDD context
1655 *
1656 * Return: CDF_STATUS
1657 */
1658static CDF_STATUS hdd_ipa_uc_ol_init(hdd_context_t *hdd_ctx)
1659{
1660 struct ipa_wdi_in_params pipe_in;
1661 struct ipa_wdi_out_params pipe_out;
1662 struct hdd_ipa_priv *ipa_ctxt = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1663 p_cds_contextType cds_ctx = hdd_ctx->pcds_context;
1664 uint8_t i;
1665
1666 cdf_mem_zero(&pipe_in, sizeof(struct ipa_wdi_in_params));
1667 cdf_mem_zero(&pipe_out, sizeof(struct ipa_wdi_out_params));
1668
1669 cdf_list_init(&ipa_ctxt->pending_event, 1000);
1670 cdf_mutex_init(&ipa_ctxt->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08001671 cdf_mutex_init(&ipa_ctxt->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001672
1673 /* TX PIPE */
1674 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1675 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
1676 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
1677 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
1678 pipe_in.sys.ipa_ep_cfg.hdr.hdr_additional_const_len =
1679 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
1680 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1681 pipe_in.sys.client = IPA_CLIENT_WLAN1_CONS;
1682 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize;
1683 pipe_in.sys.priv = hdd_ctx->hdd_ipa;
1684 pipe_in.sys.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
1685 pipe_in.sys.notify = hdd_ipa_i2w_cb;
1686 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1687 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1688 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1689 pipe_in.sys.keep_ipa_awake = true;
1690 }
1691
1692 pipe_in.u.dl.comp_ring_base_pa = hdd_ctx->tx_comp_ring_base_paddr;
1693 pipe_in.u.dl.comp_ring_size = hdd_ctx->tx_comp_ring_size * 4;
1694 pipe_in.u.dl.ce_ring_base_pa = hdd_ctx->ce_sr_base_paddr;
1695 pipe_in.u.dl.ce_door_bell_pa = hdd_ctx->ce_reg_paddr;
1696 pipe_in.u.dl.ce_ring_size = hdd_ctx->ce_sr_ring_size * 8;
1697 pipe_in.u.dl.num_tx_buffers = hdd_ctx->tx_num_alloc_buffer;
1698
1699 /* Connect WDI IPA PIPE */
1700 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
1701 /* Micro Controller Doorbell register */
1702 hdd_ctx->tx_comp_doorbell_paddr = (uint32_t) pipe_out.uc_door_bell_pa;
1703 /* WLAN TX PIPE Handle */
1704 ipa_ctxt->tx_pipe_handle = pipe_out.clnt_hdl;
1705 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1706 "TX : CRBPA 0x%x, CRS %d, CERBPA 0x%x, CEDPA 0x%x,"
1707 " CERZ %d, NB %d, CDBPAD 0x%x",
1708 (unsigned int)pipe_in.u.dl.comp_ring_base_pa,
1709 pipe_in.u.dl.comp_ring_size,
1710 (unsigned int)pipe_in.u.dl.ce_ring_base_pa,
1711 (unsigned int)pipe_in.u.dl.ce_door_bell_pa,
1712 pipe_in.u.dl.ce_ring_size,
1713 pipe_in.u.dl.num_tx_buffers,
1714 (unsigned int)hdd_ctx->tx_comp_doorbell_paddr);
1715
1716 /* RX PIPE */
1717 pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
1718 pipe_in.sys.ipa_ep_cfg.hdr.hdr_len = HDD_IPA_UC_WLAN_RX_HDR_LEN;
1719 pipe_in.sys.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
1720 pipe_in.sys.ipa_ep_cfg.hdr.hdr_metadata_reg_valid = 1;
1721 pipe_in.sys.ipa_ep_cfg.mode.mode = IPA_BASIC;
1722 pipe_in.sys.client = IPA_CLIENT_WLAN1_PROD;
1723 pipe_in.sys.desc_fifo_sz = hdd_ctx->config->IpaDescSize +
1724 sizeof(struct sps_iovec);
1725 pipe_in.sys.notify = hdd_ipa_w2i_cb;
1726 if (!hdd_ipa_is_rm_enabled(hdd_ctx)) {
1727 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1728 "%s: IPA RM DISABLED, IPA AWAKE", __func__);
1729 pipe_in.sys.keep_ipa_awake = true;
1730 }
1731
1732 pipe_in.u.ul.rdy_ring_base_pa = hdd_ctx->rx_rdy_ring_base_paddr;
1733 pipe_in.u.ul.rdy_ring_size = hdd_ctx->rx_rdy_ring_size;
1734 pipe_in.u.ul.rdy_ring_rp_pa = hdd_ctx->rx_proc_done_idx_paddr;
1735
1736 ipa_connect_wdi_pipe(&pipe_in, &pipe_out);
1737 hdd_ctx->rx_ready_doorbell_paddr = pipe_out.uc_door_bell_pa;
1738 ipa_ctxt->rx_pipe_handle = pipe_out.clnt_hdl;
1739 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
1740 "RX : RRBPA 0x%x, RRS %d, PDIPA 0x%x, RDY_DB_PAD 0x%x",
1741 (unsigned int)pipe_in.u.ul.rdy_ring_base_pa,
1742 pipe_in.u.ul.rdy_ring_size,
1743 (unsigned int)pipe_in.u.ul.rdy_ring_rp_pa,
1744 (unsigned int)hdd_ctx->rx_ready_doorbell_paddr);
1745
1746 ol_txrx_ipa_uc_set_doorbell_paddr(cds_ctx->pdev_txrx_ctx,
1747 (uint32_t) hdd_ctx->tx_comp_doorbell_paddr,
1748 (uint32_t) hdd_ctx->rx_ready_doorbell_paddr);
1749
1750 ol_txrx_ipa_uc_register_op_cb(cds_ctx->pdev_txrx_ctx,
1751 hdd_ipa_uc_op_event_handler, (void *)hdd_ctx);
1752
1753 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
1754 cnss_init_work(&ipa_ctxt->uc_op_work[i].work,
1755 hdd_ipa_uc_fw_op_event_handler);
1756 ipa_ctxt->uc_op_work[i].msg = NULL;
1757 }
1758
1759 return CDF_STATUS_SUCCESS;
1760}
1761
Leo Change3e49442015-10-26 20:07:13 -07001762/**
1763 * hdd_ipa_uc_force_pipe_shutdown() - Force shutdown IPA pipe
1764 * @hdd_ctx: hdd main context
1765 *
1766 * Force shutdown IPA pipe
1767 * Independent of FW pipe status, IPA pipe shutdonw progress
1768 * in case, any STA does not leave properly, IPA HW pipe should cleaned up
1769 * independent from FW pipe status
1770 *
1771 * Return: NONE
1772 */
1773void hdd_ipa_uc_force_pipe_shutdown(hdd_context_t *hdd_ctx)
1774{
1775 struct hdd_ipa_priv *hdd_ipa;
1776
1777 if (!hdd_ipa_is_enabled(hdd_ctx) || !hdd_ctx->hdd_ipa)
1778 return;
1779
1780 hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
1781 if (false == hdd_ipa->ipa_pipes_down) {
1782 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
1783 "IPA pipes are not down yet, force shutdown");
1784 hdd_ipa_uc_disable_pipes(hdd_ipa);
1785 } else {
1786 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
1787 "IPA pipes are down, do nothing");
1788 }
1789
1790 return;
1791}
1792
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001793/**
1794 * hdd_ipa_uc_ssr_deinit() - handle ipa deinit for SSR
1795 *
1796 * Deinit basic IPA UC host side to be in sync reloaded FW during
1797 * SSR
1798 *
1799 * Return: 0 - Success
1800 */
Leo Change3e49442015-10-26 20:07:13 -07001801#ifdef IPA_UC_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001802int hdd_ipa_uc_ssr_deinit(void)
1803{
1804 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1805 int idx;
1806 struct hdd_ipa_iface_context *iface_context;
1807
1808 if (!hdd_ipa_uc_is_enabled(hdd_ipa))
1809 return 0;
1810
1811 /* Clean up HDD IPA interfaces */
1812 for (idx = 0; (hdd_ipa->num_iface > 0) &&
1813 (idx < HDD_IPA_MAX_IFACE); idx++) {
1814 iface_context = &hdd_ipa->iface_context[idx];
1815 if (iface_context && iface_context->adapter)
1816 hdd_ipa_cleanup_iface(iface_context);
1817 }
1818
1819 /* After SSR, wlan driver reloads FW again. But we need to protect
1820 * IPA submodule during SSR transient state. So deinit basic IPA
1821 * UC host side to be in sync with reloaded FW during SSR
1822 */
1823 hdd_ipa_uc_disable_pipes(hdd_ipa);
1824
Yun Parke59b3912015-11-09 13:19:06 -08001825 cdf_wake_lock_acquire(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001826 for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
1827 hdd_ipa->assoc_stas_map[idx].is_reserved = false;
1828 hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
1829 }
Yun Parke59b3912015-11-09 13:19:06 -08001830 cdf_wake_lock_release(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001831
1832 /* Full IPA driver cleanup not required since wlan driver is now
1833 * unloaded and reloaded after SSR.
1834 */
1835 return 0;
1836}
Leo Change3e49442015-10-26 20:07:13 -07001837#else
1838int hdd_ipa_uc_ssr_deinit(void)
1839{
1840 return 0;
1841}
1842#endif
1843
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001844
1845/**
1846 * hdd_ipa_uc_ssr_reinit() - handle ipa reinit after SSR
1847 *
1848 * Init basic IPA UC host side to be in sync with reloaded FW after
1849 * SSR to resume IPA UC operations
1850 *
1851 * Return: 0 - Success
1852 */
Leo Change3e49442015-10-26 20:07:13 -07001853#ifdef IPA_UC_OFFLOAD
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001854int hdd_ipa_uc_ssr_reinit(void)
1855{
1856 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
1857
1858 if (!hdd_ipa_uc_is_enabled(hdd_ipa))
1859 return 0;
1860
1861 /* After SSR is complete, IPA UC can resume operation. But now wlan
1862 * driver will be unloaded and reloaded, which takes care of IPA cleanup
1863 * and initialization. This is a placeholder func if IPA has to resume
1864 * operations without driver reload.
1865 */
1866 return 0;
1867}
Leo Change3e49442015-10-26 20:07:13 -07001868#else
1869int hdd_ipa_uc_ssr_reinit(void)
1870{
1871 return 0;
1872}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001873#endif
1874
1875/**
1876 * hdd_ipa_wake_lock_timer_func() - Wake lock work handler
1877 * @work: scheduled work
1878 *
1879 * When IPA resources are released in hdd_ipa_rm_try_release() we do
1880 * not want to immediately release the wake lock since the system
1881 * would then potentially try to suspend when there is a healthy data
1882 * rate. Deferred work is scheduled and this function handles the
1883 * work. When this function is called, if the IPA resource is still
1884 * released then we release the wake lock.
1885 *
1886 * Return: None
1887 */
1888static void hdd_ipa_wake_lock_timer_func(struct work_struct *work)
1889{
1890 struct hdd_ipa_priv *hdd_ipa = container_of(to_delayed_work(work),
1891 struct hdd_ipa_priv,
1892 wake_lock_work);
1893
1894 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
1895
1896 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED)
1897 goto end;
1898
1899 hdd_ipa->wake_lock_released = true;
1900 cdf_wake_lock_release(&hdd_ipa->wake_lock,
1901 WIFI_POWER_EVENT_WAKELOCK_IPA);
1902
1903end:
1904 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
1905}
1906
1907/**
1908 * hdd_ipa_rm_request() - Request resource from IPA
1909 * @hdd_ipa: Global HDD IPA context
1910 *
1911 * Return: 0 on success, negative errno on error
1912 */
1913static int hdd_ipa_rm_request(struct hdd_ipa_priv *hdd_ipa)
1914{
1915 int ret = 0;
1916
1917 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
1918 return 0;
1919
1920 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
1921
1922 switch (hdd_ipa->rm_state) {
1923 case HDD_IPA_RM_GRANTED:
1924 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
1925 return 0;
1926 case HDD_IPA_RM_GRANT_PENDING:
1927 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
1928 return -EINPROGRESS;
1929 case HDD_IPA_RM_RELEASED:
1930 hdd_ipa->rm_state = HDD_IPA_RM_GRANT_PENDING;
1931 break;
1932 }
1933
1934 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
1935
1936 ret = ipa_rm_inactivity_timer_request_resource(
1937 IPA_RM_RESOURCE_WLAN_PROD);
1938
1939 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
1940 if (ret == 0) {
1941 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
1942 hdd_ipa->stats.num_rm_grant_imm++;
1943 }
1944
1945 cancel_delayed_work(&hdd_ipa->wake_lock_work);
1946 if (hdd_ipa->wake_lock_released) {
1947 cdf_wake_lock_acquire(&hdd_ipa->wake_lock,
1948 WIFI_POWER_EVENT_WAKELOCK_IPA);
1949 hdd_ipa->wake_lock_released = false;
1950 }
1951 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
1952
1953 return ret;
1954}
1955
1956/**
1957 * hdd_ipa_rm_try_release() - Attempt to release IPA resource
1958 * @hdd_ipa: Global HDD IPA context
1959 *
1960 * Return: 0 if resources released, negative errno otherwise
1961 */
1962static int hdd_ipa_rm_try_release(struct hdd_ipa_priv *hdd_ipa)
1963{
1964 int ret = 0;
1965
1966 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
1967 return 0;
1968
1969 if (atomic_read(&hdd_ipa->tx_ref_cnt))
1970 return -EAGAIN;
1971
1972 spin_lock_bh(&hdd_ipa->q_lock);
1973 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
1974 (hdd_ipa->pending_hw_desc_cnt || hdd_ipa->pend_q_cnt)) {
1975 spin_unlock_bh(&hdd_ipa->q_lock);
1976 return -EAGAIN;
1977 }
1978 spin_unlock_bh(&hdd_ipa->q_lock);
1979
1980 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
1981
1982 if (!cdf_nbuf_is_queue_empty(&hdd_ipa->pm_queue_head)) {
1983 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
1984 return -EAGAIN;
1985 }
1986 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
1987
1988 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
1989 switch (hdd_ipa->rm_state) {
1990 case HDD_IPA_RM_GRANTED:
1991 break;
1992 case HDD_IPA_RM_GRANT_PENDING:
1993 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
1994 return -EINPROGRESS;
1995 case HDD_IPA_RM_RELEASED:
1996 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
1997 return 0;
1998 }
1999
2000 /* IPA driver returns immediately so set the state here to avoid any
2001 * race condition.
2002 */
2003 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2004 hdd_ipa->stats.num_rm_release++;
2005 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2006
2007 ret =
2008 ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_WLAN_PROD);
2009
2010 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2011 if (unlikely(ret != 0)) {
2012 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2013 WARN_ON(1);
2014 }
2015
2016 /*
2017 * If wake_lock is released immediately, kernel would try to suspend
2018 * immediately as well, Just avoid ping-pong between suspend-resume
2019 * while there is healthy amount of data transfer going on by
2020 * releasing the wake_lock after some delay.
2021 */
2022 schedule_delayed_work(&hdd_ipa->wake_lock_work,
2023 msecs_to_jiffies
2024 (HDD_IPA_RX_INACTIVITY_MSEC_DELAY));
2025
2026 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2027
2028 return ret;
2029}
2030
2031/**
2032 * hdd_ipa_rm_notify() - IPA resource manager notifier callback
2033 * @user_data: user data registered with IPA
2034 * @event: the IPA resource manager event that occurred
2035 * @data: the data associated with the event
2036 *
2037 * Return: None
2038 */
2039static void hdd_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
2040 unsigned long data)
2041{
2042 struct hdd_ipa_priv *hdd_ipa = user_data;
2043
2044 if (unlikely(!hdd_ipa))
2045 return;
2046
2047 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2048 return;
2049
2050 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Evt: %d", event);
2051
2052 switch (event) {
2053 case IPA_RM_RESOURCE_GRANTED:
2054 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2055 /* RM Notification comes with ISR context
2056 * it should be serialized into work queue to avoid
2057 * ISR sleep problem
2058 */
2059 hdd_ipa->uc_rm_work.event = event;
2060 schedule_work(&hdd_ipa->uc_rm_work.work);
2061 break;
2062 }
2063 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2064 hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
2065 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2066 hdd_ipa->stats.num_rm_grant++;
2067 break;
2068
2069 case IPA_RM_RESOURCE_RELEASED:
2070 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "RM Release");
2071 hdd_ipa->resource_unloading = false;
2072 break;
2073
2074 default:
2075 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Unknown RM Evt: %d", event);
2076 break;
2077 }
2078}
2079
2080/**
2081 * hdd_ipa_rm_cons_release() - WLAN consumer resource release handler
2082 *
2083 * Callback function registered with IPA that is called when IPA wants
2084 * to release the WLAN consumer resource
2085 *
2086 * Return: 0 if the request is granted, negative errno otherwise
2087 */
2088static int hdd_ipa_rm_cons_release(void)
2089{
2090 return 0;
2091}
2092
2093/**
2094 * hdd_ipa_rm_cons_request() - WLAN consumer resource request handler
2095 *
2096 * Callback function registered with IPA that is called when IPA wants
2097 * to access the WLAN consumer resource
2098 *
2099 * Return: 0 if the request is granted, negative errno otherwise
2100 */
2101static int hdd_ipa_rm_cons_request(void)
2102{
2103 if ((ghdd_ipa->resource_loading) || (ghdd_ipa->resource_unloading)) {
2104 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL,
2105 "%s: ipa resource loading/unloading in progress",
2106 __func__);
2107 ghdd_ipa->pending_cons_req = true;
2108 return -EPERM;
2109 }
2110 return 0;
2111}
2112
2113/**
2114 * hdd_ipa_set_perf_level() - Set IPA performance level
2115 * @hdd_ctx: Global HDD context
2116 * @tx_packets: Number of packets transmitted in the last sample period
2117 * @rx_packets: Number of packets received in the last sample period
2118 *
2119 * Return: 0 on success, negative errno on error
2120 */
2121int hdd_ipa_set_perf_level(hdd_context_t *hdd_ctx, uint64_t tx_packets,
2122 uint64_t rx_packets)
2123{
2124 uint32_t next_cons_bw, next_prod_bw;
2125 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2126 struct ipa_rm_perf_profile profile;
2127 int ret;
2128
2129 if ((!hdd_ipa_is_enabled(hdd_ctx)) ||
2130 (!hdd_ipa_is_clk_scaling_enabled(hdd_ctx)))
2131 return 0;
2132
2133 memset(&profile, 0, sizeof(profile));
2134
2135 if (tx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2136 next_cons_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2137 else if (tx_packets >
2138 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2139 next_cons_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2140 else
2141 next_cons_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2142
2143 if (rx_packets > (hdd_ctx->config->busBandwidthHighThreshold / 2))
2144 next_prod_bw = hdd_ctx->config->IpaHighBandwidthMbps;
2145 else if (rx_packets >
2146 (hdd_ctx->config->busBandwidthMediumThreshold / 2))
2147 next_prod_bw = hdd_ctx->config->IpaMediumBandwidthMbps;
2148 else
2149 next_prod_bw = hdd_ctx->config->IpaLowBandwidthMbps;
2150
2151 HDD_IPA_LOG(LOG1,
2152 "CONS perf curr: %d, next: %d",
2153 hdd_ipa->curr_cons_bw, next_cons_bw);
2154 HDD_IPA_LOG(LOG1,
2155 "PROD perf curr: %d, next: %d",
2156 hdd_ipa->curr_prod_bw, next_prod_bw);
2157
2158 if (hdd_ipa->curr_cons_bw != next_cons_bw) {
2159 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2160 "Requesting CONS perf curr: %d, next: %d",
2161 hdd_ipa->curr_cons_bw, next_cons_bw);
2162 profile.max_supported_bandwidth_mbps = next_cons_bw;
2163 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_CONS,
2164 &profile);
2165 if (ret) {
2166 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2167 "RM CONS set perf profile failed: %d", ret);
2168
2169 return ret;
2170 }
2171 hdd_ipa->curr_cons_bw = next_cons_bw;
2172 hdd_ipa->stats.num_cons_perf_req++;
2173 }
2174
2175 if (hdd_ipa->curr_prod_bw != next_prod_bw) {
2176 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
2177 "Requesting PROD perf curr: %d, next: %d",
2178 hdd_ipa->curr_prod_bw, next_prod_bw);
2179 profile.max_supported_bandwidth_mbps = next_prod_bw;
2180 ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WLAN_PROD,
2181 &profile);
2182 if (ret) {
2183 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2184 "RM PROD set perf profile failed: %d", ret);
2185 return ret;
2186 }
2187 hdd_ipa->curr_prod_bw = next_prod_bw;
2188 hdd_ipa->stats.num_prod_perf_req++;
2189 }
2190
2191 return 0;
2192}
2193
2194/**
2195 * hdd_ipa_setup_rm() - Setup IPA resource management
2196 * @hdd_ipa: Global HDD IPA context
2197 *
2198 * Return: 0 on success, negative errno on error
2199 */
2200static int hdd_ipa_setup_rm(struct hdd_ipa_priv *hdd_ipa)
2201{
2202 struct ipa_rm_create_params create_params = { 0 };
2203 int ret;
2204
2205 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2206 return 0;
2207
2208 cnss_init_work(&hdd_ipa->uc_rm_work.work, hdd_ipa_uc_rm_notify_defer);
2209 memset(&create_params, 0, sizeof(create_params));
2210 create_params.name = IPA_RM_RESOURCE_WLAN_PROD;
2211 create_params.reg_params.user_data = hdd_ipa;
2212 create_params.reg_params.notify_cb = hdd_ipa_rm_notify;
2213 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2214
2215 ret = ipa_rm_create_resource(&create_params);
2216 if (ret) {
2217 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2218 "Create RM resource failed: %d", ret);
2219 goto setup_rm_fail;
2220 }
2221
2222 memset(&create_params, 0, sizeof(create_params));
2223 create_params.name = IPA_RM_RESOURCE_WLAN_CONS;
2224 create_params.request_resource = hdd_ipa_rm_cons_request;
2225 create_params.release_resource = hdd_ipa_rm_cons_release;
2226 create_params.floor_voltage = IPA_VOLTAGE_SVS;
2227
2228 ret = ipa_rm_create_resource(&create_params);
2229 if (ret) {
2230 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2231 "Create RM CONS resource failed: %d", ret);
2232 goto delete_prod;
2233 }
2234
2235 ipa_rm_add_dependency(IPA_RM_RESOURCE_WLAN_PROD,
2236 IPA_RM_RESOURCE_APPS_CONS);
2237
2238 ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WLAN_PROD,
2239 HDD_IPA_RX_INACTIVITY_MSEC_DELAY);
2240 if (ret) {
2241 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Timer init failed: %d",
2242 ret);
2243 goto timer_init_failed;
2244 }
2245
2246 /* Set the lowest bandwidth to start with */
2247 ret = hdd_ipa_set_perf_level(hdd_ipa->hdd_ctx, 0, 0);
2248
2249 if (ret) {
2250 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2251 "Set perf level failed: %d", ret);
2252 goto set_perf_failed;
2253 }
2254
2255 cdf_wake_lock_init(&hdd_ipa->wake_lock, "wlan_ipa");
2256#ifdef CONFIG_CNSS
2257 cnss_init_delayed_work(&hdd_ipa->wake_lock_work,
2258 hdd_ipa_wake_lock_timer_func);
2259#else
2260 INIT_DELAYED_WORK(&hdd_ipa->wake_lock_work,
2261 hdd_ipa_wake_lock_timer_func);
2262#endif
2263 cdf_spinlock_init(&hdd_ipa->rm_lock);
2264 hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
2265 hdd_ipa->wake_lock_released = true;
2266 atomic_set(&hdd_ipa->tx_ref_cnt, 0);
2267
2268 return ret;
2269
2270set_perf_failed:
2271 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2272
2273timer_init_failed:
2274 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2275
2276delete_prod:
2277 ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2278
2279setup_rm_fail:
2280 return ret;
2281}
2282
2283/**
2284 * hdd_ipa_destroy_rm_resource() - Destroy IPA resources
2285 * @hdd_ipa: Global HDD IPA context
2286 *
2287 * Destroys all resources associated with the IPA resource manager
2288 *
2289 * Return: None
2290 */
2291static void hdd_ipa_destroy_rm_resource(struct hdd_ipa_priv *hdd_ipa)
2292{
2293 int ret;
2294
2295 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2296 return;
2297
2298 cancel_delayed_work_sync(&hdd_ipa->wake_lock_work);
2299 cdf_wake_lock_destroy(&hdd_ipa->wake_lock);
2300
2301#ifdef WLAN_OPEN_SOURCE
2302 cancel_work_sync(&hdd_ipa->uc_rm_work.work);
2303#endif
2304 cdf_spinlock_destroy(&hdd_ipa->rm_lock);
2305
2306 ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
2307
2308 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
2309 if (ret)
2310 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2311 "RM PROD resource delete failed %d", ret);
2312
2313 ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
2314 if (ret)
2315 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2316 "RM CONS resource delete failed %d", ret);
2317}
2318
2319/**
2320 * hdd_ipa_send_skb_to_network() - Send skb to kernel
2321 * @skb: network buffer
2322 * @adapter: network adapter
2323 *
2324 * Called when a network buffer is received which should not be routed
2325 * to the IPA module.
2326 *
2327 * Return: None
2328 */
2329static void hdd_ipa_send_skb_to_network(cdf_nbuf_t skb,
2330 hdd_adapter_t *adapter)
2331{
2332 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2333 unsigned int cpu_index;
2334
2335 if (!adapter || adapter->magic != WLAN_HDD_ADAPTER_MAGIC) {
2336 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_LOW, "Invalid adapter: 0x%p",
2337 adapter);
2338 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2339 cdf_nbuf_free(skb);
2340 return;
2341 }
2342
2343 if (hdd_ipa->hdd_ctx->isUnloadInProgress) {
2344 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2345 cdf_nbuf_free(skb);
2346 return;
2347 }
2348
2349 skb->destructor = hdd_ipa_uc_rt_debug_destructor;
2350 skb->dev = adapter->dev;
2351 skb->protocol = eth_type_trans(skb, skb->dev);
2352 skb->ip_summed = CHECKSUM_NONE;
2353
2354 cpu_index = wlan_hdd_get_cpu();
2355
2356 ++adapter->hdd_stats.hddTxRxStats.rxPackets[cpu_index];
2357 if (netif_rx_ni(skb) == NET_RX_SUCCESS)
2358 ++adapter->hdd_stats.hddTxRxStats.rxDelivered[cpu_index];
2359 else
2360 ++adapter->hdd_stats.hddTxRxStats.rxRefused[cpu_index];
2361
2362 HDD_IPA_INCREASE_NET_SEND_COUNT(hdd_ipa);
2363 adapter->dev->last_rx = jiffies;
2364}
2365
2366#define FW_RX_DESC_DISCARD_M 0x1
2367#define FW_RX_DESC_FORWARD_M 0x2
2368
2369/**
2370 * hdd_ipa_w2i_cb() - WLAN to IPA callback handler
2371 * @priv: pointer to private data registered with IPA (we register a
2372 * pointer to the global IPA context)
2373 * @evt: the IPA event which triggered the callback
2374 * @data: data associated with the event
2375 *
2376 * Return: None
2377 */
2378static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
2379 unsigned long data)
2380{
2381 struct hdd_ipa_priv *hdd_ipa = NULL;
2382 hdd_adapter_t *adapter = NULL;
2383 cdf_nbuf_t skb;
2384 uint8_t iface_id;
2385 uint8_t session_id;
2386 struct hdd_ipa_iface_context *iface_context;
2387 cdf_nbuf_t copy;
2388 uint8_t fw_desc;
2389 int ret;
2390
2391 hdd_ipa = (struct hdd_ipa_priv *)priv;
2392
2393 switch (evt) {
2394 case IPA_RECEIVE:
2395 skb = (cdf_nbuf_t) data;
2396 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2397 session_id = (uint8_t)skb->cb[0];
2398 iface_id = vdev_to_iface[session_id];
2399 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2400 "IPA_RECEIVE: session_id=%u, iface_id=%u",
2401 session_id, iface_id);
2402 } else {
2403 iface_id = HDD_IPA_GET_IFACE_ID(skb->data);
2404 }
2405
2406 if (iface_id >= HDD_IPA_MAX_IFACE) {
2407 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2408 "IPA_RECEIVE: Invalid iface_id: %u",
2409 iface_id);
2410 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_INFO_HIGH,
2411 "w2i -- skb", skb->data, 8);
2412 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2413 cdf_nbuf_free(skb);
2414 return;
2415 }
2416
2417 iface_context = &hdd_ipa->iface_context[iface_id];
2418 adapter = iface_context->adapter;
2419
2420 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG,
2421 "w2i -- skb", skb->data, 8);
2422 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
2423 hdd_ipa->stats.num_rx_excep++;
2424 skb_pull(skb, HDD_IPA_UC_WLAN_CLD_HDR_LEN);
2425 } else {
2426 skb_pull(skb, HDD_IPA_WLAN_CLD_HDR_LEN);
2427 }
2428
2429 iface_context->stats.num_rx_ipa_excep++;
2430
2431 /* Disable to forward Intra-BSS Rx packets when
2432 * ap_isolate=1 in hostapd.conf
2433 */
2434 if (adapter->sessionCtx.ap.apDisableIntraBssFwd) {
2435 /*
2436 * When INTRA_BSS_FWD_OFFLOAD is enabled, FW will send
2437 * all Rx packets to IPA uC, which need to be forwarded
2438 * to other interface.
2439 * And, IPA driver will send back to WLAN host driver
2440 * through exception pipe with fw_desc field set by FW.
2441 * Here we are checking fw_desc field for FORWARD bit
2442 * set, and forward to Tx. Then copy to kernel stack
2443 * only when DISCARD bit is not set.
2444 */
2445 fw_desc = (uint8_t)skb->cb[1];
2446
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002447
2448 if (fw_desc & FW_RX_DESC_FORWARD_M) {
2449 HDD_IPA_LOG(
2450 CDF_TRACE_LEVEL_DEBUG,
2451 "Forward packet to Tx (fw_desc=%d)",
2452 fw_desc);
2453 copy = cdf_nbuf_copy(skb);
2454 if (copy) {
2455 hdd_ipa->ipa_tx_forward++;
2456 ret = hdd_softap_hard_start_xmit(
2457 (struct sk_buff *)copy,
2458 adapter->dev);
2459 if (ret) {
2460 HDD_IPA_LOG(
2461 CDF_TRACE_LEVEL_DEBUG,
2462 "Forward packet tx fail");
2463 hdd_ipa->stats.
2464 num_tx_bcmc_err++;
2465 } else {
2466 hdd_ipa->stats.num_tx_bcmc++;
2467 }
2468 }
2469 }
Mahesh Kumar Kalikot Veetil221dc672015-11-06 14:27:28 -08002470
2471 if (fw_desc & FW_RX_DESC_DISCARD_M) {
2472 HDD_IPA_INCREASE_INTERNAL_DROP_COUNT(hdd_ipa);
2473 hdd_ipa->ipa_rx_discard++;
2474 cdf_nbuf_free(skb);
2475 break;
2476 }
2477
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002478 } else {
2479 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO_HIGH,
2480 "Intra-BSS FWD is disabled-skip forward to Tx");
2481 }
2482
2483 hdd_ipa_send_skb_to_network(skb, adapter);
2484 break;
2485
2486 default:
2487 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2488 "w2i cb wrong event: 0x%x", evt);
2489 return;
2490 }
2491}
2492
2493/**
2494 * hdd_ipa_nbuf_cb() - IPA TX complete callback
2495 * @skb: packet buffer which was transmitted
2496 *
2497 * Return: None
2498 */
2499static void hdd_ipa_nbuf_cb(cdf_nbuf_t skb)
2500{
2501 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
2502
2503 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "%lx", NBUF_OWNER_PRIV_DATA(skb));
2504 ipa_free_skb((struct ipa_rx_data *)NBUF_OWNER_PRIV_DATA(skb));
2505
2506 hdd_ipa->stats.num_tx_comp_cnt++;
2507
2508 atomic_dec(&hdd_ipa->tx_ref_cnt);
2509
2510 hdd_ipa_rm_try_release(hdd_ipa);
2511}
2512
2513/**
2514 * hdd_ipa_send_pkt_to_tl() - Send an IPA packet to TL
2515 * @iface_context: interface-specific IPA context
2516 * @ipa_tx_desc: packet data descriptor
2517 *
2518 * Return: None
2519 */
2520static void hdd_ipa_send_pkt_to_tl(
2521 struct hdd_ipa_iface_context *iface_context,
2522 struct ipa_rx_data *ipa_tx_desc)
2523{
2524 struct hdd_ipa_priv *hdd_ipa = iface_context->hdd_ipa;
2525 uint8_t interface_id;
2526 hdd_adapter_t *adapter = NULL;
2527 cdf_nbuf_t skb;
2528
2529 cdf_spin_lock_bh(&iface_context->interface_lock);
2530 adapter = iface_context->adapter;
2531 if (!adapter) {
2532 HDD_IPA_LOG(CDF_TRACE_LEVEL_WARN, "Interface Down");
2533 ipa_free_skb(ipa_tx_desc);
2534 iface_context->stats.num_tx_drop++;
2535 cdf_spin_unlock_bh(&iface_context->interface_lock);
2536 hdd_ipa_rm_try_release(hdd_ipa);
2537 return;
2538 }
2539
2540 /*
2541 * During CAC period, data packets shouldn't be sent over the air so
2542 * drop all the packets here
2543 */
2544 if (WLAN_HDD_GET_AP_CTX_PTR(adapter)->dfs_cac_block_tx) {
2545 ipa_free_skb(ipa_tx_desc);
2546 cdf_spin_unlock_bh(&iface_context->interface_lock);
2547 iface_context->stats.num_tx_cac_drop++;
2548 hdd_ipa_rm_try_release(hdd_ipa);
2549 return;
2550 }
2551
2552 interface_id = adapter->sessionId;
2553 ++adapter->stats.tx_packets;
2554
2555 cdf_spin_unlock_bh(&iface_context->interface_lock);
2556
2557 skb = ipa_tx_desc->skb;
2558
2559 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2560 NBUF_OWNER_ID(skb) = IPA_NBUF_OWNER_ID;
2561 NBUF_CALLBACK_FN(skb) = hdd_ipa_nbuf_cb;
2562 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2563 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr
2564 + HDD_IPA_WLAN_FRAG_HEADER
2565 + HDD_IPA_WLAN_IPA_HEADER;
2566 ipa_tx_desc->skb->len -=
2567 HDD_IPA_WLAN_FRAG_HEADER + HDD_IPA_WLAN_IPA_HEADER;
2568 } else
2569 NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr;
2570
2571 NBUF_OWNER_PRIV_DATA(skb) = (unsigned long)ipa_tx_desc;
2572
2573 adapter->stats.tx_bytes += ipa_tx_desc->skb->len;
2574
2575 skb = ol_tx_send_ipa_data_frame(iface_context->tl_context,
2576 ipa_tx_desc->skb);
2577 if (skb) {
2578 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "TLSHIM tx fail");
2579 ipa_free_skb(ipa_tx_desc);
2580 iface_context->stats.num_tx_err++;
2581 hdd_ipa_rm_try_release(hdd_ipa);
2582 return;
2583 }
2584
2585 atomic_inc(&hdd_ipa->tx_ref_cnt);
2586
2587 iface_context->stats.num_tx++;
2588
2589}
2590
2591/**
2592 * hdd_ipa_pm_send_pkt_to_tl() - Send queued packets to TL
2593 * @work: pointer to the scheduled work
2594 *
2595 * Called during PM resume to send packets to TL which were queued
2596 * while host was in the process of suspending.
2597 *
2598 * Return: None
2599 */
2600static void hdd_ipa_pm_send_pkt_to_tl(struct work_struct *work)
2601{
2602 struct hdd_ipa_priv *hdd_ipa = container_of(work,
2603 struct hdd_ipa_priv,
2604 pm_work);
2605 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2606 cdf_nbuf_t skb;
2607 uint32_t dequeued = 0;
2608
2609 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2610
2611 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
2612 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2613
2614 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2615
2616 dequeued++;
2617
2618 hdd_ipa_send_pkt_to_tl(pm_tx_cb->iface_context,
2619 pm_tx_cb->ipa_tx_desc);
2620
2621 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2622 }
2623
2624 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2625
2626 hdd_ipa->stats.num_tx_dequeued += dequeued;
2627 if (dequeued > hdd_ipa->stats.num_max_pm_queue)
2628 hdd_ipa->stats.num_max_pm_queue = dequeued;
2629}
2630
2631/**
2632 * hdd_ipa_i2w_cb() - IPA to WLAN callback
2633 * @priv: pointer to private data registered with IPA (we register a
2634 * pointer to the interface-specific IPA context)
2635 * @evt: the IPA event which triggered the callback
2636 * @data: data associated with the event
2637 *
2638 * Return: None
2639 */
2640static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
2641 unsigned long data)
2642{
2643 struct hdd_ipa_priv *hdd_ipa = NULL;
2644 struct ipa_rx_data *ipa_tx_desc;
2645 struct hdd_ipa_iface_context *iface_context;
2646 cdf_nbuf_t skb;
2647 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
2648 CDF_STATUS status = CDF_STATUS_SUCCESS;
2649
Mukul Sharma81661ae2015-10-30 20:26:02 +05302650 iface_context = (struct hdd_ipa_iface_context *)priv;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002651 if (evt != IPA_RECEIVE) {
2652 skb = (cdf_nbuf_t) data;
2653 dev_kfree_skb_any(skb);
2654 iface_context->stats.num_tx_drop++;
2655 return;
2656 }
2657
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08002658 ipa_tx_desc = (struct ipa_rx_data *)data;
2659
2660 hdd_ipa = iface_context->hdd_ipa;
2661
2662 /*
2663 * When SSR is going on or driver is unloading, just drop the packets.
2664 * During SSR, there is no use in queueing the packets as STA has to
2665 * connect back any way
2666 */
2667 status = wlan_hdd_validate_context(hdd_ipa->hdd_ctx);
2668 if (0 != status) {
2669 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "HDD context is not valid");
2670 ipa_free_skb(ipa_tx_desc);
2671 iface_context->stats.num_tx_drop++;
2672 return;
2673 }
2674
2675 skb = ipa_tx_desc->skb;
2676
2677 HDD_IPA_DBG_DUMP(CDF_TRACE_LEVEL_DEBUG, "i2w", skb->data, 8);
2678
2679 /*
2680 * If PROD resource is not requested here then there may be cases where
2681 * IPA hardware may be clocked down because of not having proper
2682 * dependency graph between WLAN CONS and modem PROD pipes. Adding the
2683 * workaround to request PROD resource while data is going over CONS
2684 * pipe to prevent the IPA hardware clockdown.
2685 */
2686 hdd_ipa_rm_request(hdd_ipa);
2687
2688 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2689 /*
2690 * If host is still suspended then queue the packets and these will be
2691 * drained later when resume completes. When packet is arrived here and
2692 * host is suspended, this means that there is already resume is in
2693 * progress.
2694 */
2695 if (hdd_ipa->suspended) {
2696 cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
2697 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
2698 pm_tx_cb->iface_context = iface_context;
2699 pm_tx_cb->ipa_tx_desc = ipa_tx_desc;
2700 cdf_nbuf_queue_add(&hdd_ipa->pm_queue_head, skb);
2701 hdd_ipa->stats.num_tx_queued++;
2702
2703 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2704 return;
2705 }
2706
2707 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2708
2709 /*
2710 * If we are here means, host is not suspended, wait for the work queue
2711 * to finish.
2712 */
2713#ifdef WLAN_OPEN_SOURCE
2714 flush_work(&hdd_ipa->pm_work);
2715#endif
2716
2717 return hdd_ipa_send_pkt_to_tl(iface_context, ipa_tx_desc);
2718}
2719
2720/**
2721 * hdd_ipa_suspend() - Suspend IPA
2722 * @hdd_ctx: Global HDD context
2723 *
2724 * Return: 0 on success, negativer errno on error
2725 */
2726int hdd_ipa_suspend(hdd_context_t *hdd_ctx)
2727{
2728 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2729
2730 if (!hdd_ipa_is_enabled(hdd_ctx))
2731 return 0;
2732
2733 /*
2734 * Check if IPA is ready for suspend, If we are here means, there is
2735 * high chance that suspend would go through but just to avoid any race
2736 * condition after suspend started, these checks are conducted before
2737 * allowing to suspend.
2738 */
2739 if (atomic_read(&hdd_ipa->tx_ref_cnt))
2740 return -EAGAIN;
2741
2742 cdf_spin_lock_bh(&hdd_ipa->rm_lock);
2743
2744 if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED) {
2745 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2746 return -EAGAIN;
2747 }
2748 cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
2749
2750 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2751 hdd_ipa->suspended = true;
2752 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2753
2754 return 0;
2755}
2756
2757/**
2758 * hdd_ipa_resume() - Resume IPA following suspend
2759 * hdd_ctx: Global HDD context
2760 *
2761 * Return: 0 on success, negative errno on error
2762 */
2763int hdd_ipa_resume(hdd_context_t *hdd_ctx)
2764{
2765 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
2766
2767 if (!hdd_ipa_is_enabled(hdd_ctx))
2768 return 0;
2769
2770 schedule_work(&hdd_ipa->pm_work);
2771
2772 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
2773 hdd_ipa->suspended = false;
2774 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
2775
2776 return 0;
2777}
2778
2779/**
2780 * hdd_ipa_setup_sys_pipe() - Setup all IPA Sys pipes
2781 * @hdd_ipa: Global HDD IPA context
2782 *
2783 * Return: 0 on success, negative errno on error
2784 */
2785static int hdd_ipa_setup_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
2786{
2787 int i, ret = 0;
2788 struct ipa_sys_connect_params *ipa;
2789 uint32_t desc_fifo_sz;
2790
2791 /* The maximum number of descriptors that can be provided to a BAM at
2792 * once is one less than the total number of descriptors that the buffer
2793 * can contain.
2794 * If max_num_of_descriptors = (BAM_PIPE_DESCRIPTOR_FIFO_SIZE / sizeof
2795 * (SPS_DESCRIPTOR)), then (max_num_of_descriptors - 1) descriptors can
2796 * be provided at once.
2797 * Because of above requirement, one extra descriptor will be added to
2798 * make sure hardware always has one descriptor.
2799 */
2800 desc_fifo_sz = hdd_ipa->hdd_ctx->config->IpaDescSize
2801 + sizeof(struct sps_iovec);
2802
2803 /*setup TX pipes */
2804 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
2805 ipa = &hdd_ipa->sys_pipe[i].ipa_sys_params;
2806
2807 ipa->client = hdd_ipa_adapter_2_client[i].cons_client;
2808 ipa->desc_fifo_sz = desc_fifo_sz;
2809 ipa->priv = &hdd_ipa->iface_context[i];
2810 ipa->notify = hdd_ipa_i2w_cb;
2811
2812 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2813 ipa->ipa_ep_cfg.hdr.hdr_len =
2814 HDD_IPA_UC_WLAN_TX_HDR_LEN;
2815 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
2816 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
2817 ipa->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
2818 ipa->ipa_ep_cfg.hdr.hdr_additional_const_len =
2819 HDD_IPA_UC_WLAN_8023_HDR_SIZE;
2820 ipa->ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
2821 } else {
2822 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
2823 }
2824 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
2825
2826 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2827 ipa->keep_ipa_awake = 1;
2828
2829 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
2830 if (ret) {
2831 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed for pipe %d"
2832 " ret: %d", i, ret);
2833 goto setup_sys_pipe_fail;
2834 }
2835 hdd_ipa->sys_pipe[i].conn_hdl_valid = 1;
2836 }
2837
2838 if (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
2839 /*
2840 * Hard code it here, this can be extended if in case
2841 * PROD pipe is also per interface.
2842 * Right now there is no advantage of doing this.
2843 */
2844 hdd_ipa->prod_client = IPA_CLIENT_WLAN1_PROD;
2845
2846 ipa = &hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].ipa_sys_params;
2847
2848 ipa->client = hdd_ipa->prod_client;
2849
2850 ipa->desc_fifo_sz = desc_fifo_sz;
2851 ipa->priv = hdd_ipa;
2852 ipa->notify = hdd_ipa_w2i_cb;
2853
2854 ipa->ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
2855 ipa->ipa_ep_cfg.hdr.hdr_len = HDD_IPA_WLAN_RX_HDR_LEN;
2856 ipa->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
2857 ipa->ipa_ep_cfg.mode.mode = IPA_BASIC;
2858
2859 if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
2860 ipa->keep_ipa_awake = 1;
2861
2862 ret = ipa_setup_sys_pipe(ipa, &(hdd_ipa->sys_pipe[i].conn_hdl));
2863 if (ret) {
2864 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
2865 "Failed for RX pipe: %d", ret);
2866 goto setup_sys_pipe_fail;
2867 }
2868 hdd_ipa->sys_pipe[HDD_IPA_RX_PIPE].conn_hdl_valid = 1;
2869 }
2870
2871 return ret;
2872
2873setup_sys_pipe_fail:
2874
2875 while (--i >= 0) {
2876 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].conn_hdl);
2877 cdf_mem_zero(&hdd_ipa->sys_pipe[i],
2878 sizeof(struct hdd_ipa_sys_pipe));
2879 }
2880
2881 return ret;
2882}
2883
2884/**
2885 * hdd_ipa_teardown_sys_pipe() - Tear down all IPA Sys pipes
2886 * @hdd_ipa: Global HDD IPA context
2887 *
2888 * Return: None
2889 */
2890static void hdd_ipa_teardown_sys_pipe(struct hdd_ipa_priv *hdd_ipa)
2891{
2892 int ret = 0, i;
2893 for (i = 0; i < HDD_IPA_MAX_SYSBAM_PIPE; i++) {
2894 if (hdd_ipa->sys_pipe[i].conn_hdl_valid) {
2895 ret =
2896 ipa_teardown_sys_pipe(hdd_ipa->sys_pipe[i].
2897 conn_hdl);
2898 if (ret)
2899 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Failed: %d",
2900 ret);
2901
2902 hdd_ipa->sys_pipe[i].conn_hdl_valid = 0;
2903 }
2904 }
2905}
2906
2907/**
2908 * hdd_ipa_register_interface() - register IPA interface
2909 * @hdd_ipa: Global IPA context
2910 * @iface_context: Per-interface IPA context
2911 *
2912 * Return: 0 on success, negative errno on error
2913 */
2914static int hdd_ipa_register_interface(struct hdd_ipa_priv *hdd_ipa,
2915 struct hdd_ipa_iface_context
2916 *iface_context)
2917{
2918 struct ipa_tx_intf tx_intf;
2919 struct ipa_rx_intf rx_intf;
2920 struct ipa_ioc_tx_intf_prop *tx_prop = NULL;
2921 struct ipa_ioc_rx_intf_prop *rx_prop = NULL;
2922 char *ifname = iface_context->adapter->dev->name;
2923
2924 char ipv4_hdr_name[IPA_RESOURCE_NAME_MAX];
2925 char ipv6_hdr_name[IPA_RESOURCE_NAME_MAX];
2926
2927 int num_prop = 1;
2928 int ret = 0;
2929
2930 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx))
2931 num_prop++;
2932
2933 /* Allocate TX properties for TOS categories, 1 each for IPv4 & IPv6 */
2934 tx_prop =
2935 cdf_mem_malloc(sizeof(struct ipa_ioc_tx_intf_prop) * num_prop);
2936 if (!tx_prop) {
2937 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "tx_prop allocation failed");
2938 goto register_interface_fail;
2939 }
2940
2941 /* Allocate RX properties, 1 each for IPv4 & IPv6 */
2942 rx_prop =
2943 cdf_mem_malloc(sizeof(struct ipa_ioc_rx_intf_prop) * num_prop);
2944 if (!rx_prop) {
2945 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "rx_prop allocation failed");
2946 goto register_interface_fail;
2947 }
2948
2949 cdf_mem_zero(&tx_intf, sizeof(tx_intf));
2950 cdf_mem_zero(&rx_intf, sizeof(rx_intf));
2951
2952 snprintf(ipv4_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
2953 ifname, HDD_IPA_IPV4_NAME_EXT);
2954 snprintf(ipv6_hdr_name, IPA_RESOURCE_NAME_MAX, "%s%s",
2955 ifname, HDD_IPA_IPV6_NAME_EXT);
2956
2957 rx_prop[IPA_IP_v4].ip = IPA_IP_v4;
2958 rx_prop[IPA_IP_v4].src_pipe = iface_context->prod_client;
2959 rx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
2960 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
2961
2962 /*
2963 * Interface ID is 3rd byte in the CLD header. Add the meta data and
2964 * mask to identify the interface in IPA hardware
2965 */
2966 rx_prop[IPA_IP_v4].attrib.meta_data =
2967 htonl(iface_context->adapter->sessionId << 16);
2968 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
2969
2970 rx_intf.num_props++;
2971 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
2972 rx_prop[IPA_IP_v6].ip = IPA_IP_v6;
2973 rx_prop[IPA_IP_v6].src_pipe = iface_context->prod_client;
2974 rx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
2975 rx_prop[IPA_IP_v4].attrib.attrib_mask = IPA_FLT_META_DATA;
2976 rx_prop[IPA_IP_v4].attrib.meta_data =
2977 htonl(iface_context->adapter->sessionId << 16);
2978 rx_prop[IPA_IP_v4].attrib.meta_data_mask = htonl(0x00FF0000);
2979
2980 rx_intf.num_props++;
2981 }
2982
2983 tx_prop[IPA_IP_v4].ip = IPA_IP_v4;
2984 tx_prop[IPA_IP_v4].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
2985 tx_prop[IPA_IP_v4].dst_pipe = IPA_CLIENT_WLAN1_CONS;
2986 tx_prop[IPA_IP_v4].alt_dst_pipe = iface_context->cons_client;
2987 strlcpy(tx_prop[IPA_IP_v4].hdr_name, ipv4_hdr_name,
2988 IPA_RESOURCE_NAME_MAX);
2989 tx_intf.num_props++;
2990
2991 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
2992 tx_prop[IPA_IP_v6].ip = IPA_IP_v6;
2993 tx_prop[IPA_IP_v6].hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
2994 tx_prop[IPA_IP_v6].dst_pipe = IPA_CLIENT_WLAN1_CONS;
2995 tx_prop[IPA_IP_v6].alt_dst_pipe = iface_context->cons_client;
2996 strlcpy(tx_prop[IPA_IP_v6].hdr_name, ipv6_hdr_name,
2997 IPA_RESOURCE_NAME_MAX);
2998 tx_intf.num_props++;
2999 }
3000
3001 tx_intf.prop = tx_prop;
3002 rx_intf.prop = rx_prop;
3003
3004 /* Call the ipa api to register interface */
3005 ret = ipa_register_intf(ifname, &tx_intf, &rx_intf);
3006
3007register_interface_fail:
3008 cdf_mem_free(tx_prop);
3009 cdf_mem_free(rx_prop);
3010 return ret;
3011}
3012
3013/**
3014 * hdd_remove_ipa_header() - Remove a specific header from IPA
3015 * @name: Name of the header to be removed
3016 *
3017 * Return: None
3018 */
3019static void hdd_ipa_remove_header(char *name)
3020{
3021 struct ipa_ioc_get_hdr hdrlookup;
3022 int ret = 0, len;
3023 struct ipa_ioc_del_hdr *ipa_hdr;
3024
3025 cdf_mem_zero(&hdrlookup, sizeof(hdrlookup));
3026 strlcpy(hdrlookup.name, name, sizeof(hdrlookup.name));
3027 ret = ipa_get_hdr(&hdrlookup);
3028 if (ret) {
3029 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Hdr deleted already %s, %d",
3030 name, ret);
3031 return;
3032 }
3033
3034 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "hdl: 0x%x", hdrlookup.hdl);
3035 len = sizeof(struct ipa_ioc_del_hdr) + sizeof(struct ipa_hdr_del) * 1;
3036 ipa_hdr = (struct ipa_ioc_del_hdr *)cdf_mem_malloc(len);
3037 if (ipa_hdr == NULL) {
3038 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "ipa_hdr allocation failed");
3039 return;
3040 }
3041 ipa_hdr->num_hdls = 1;
3042 ipa_hdr->commit = 0;
3043 ipa_hdr->hdl[0].hdl = hdrlookup.hdl;
3044 ipa_hdr->hdl[0].status = -1;
3045 ret = ipa_del_hdr(ipa_hdr);
3046 if (ret != 0)
3047 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "Delete header failed: %d",
3048 ret);
3049
3050 cdf_mem_free(ipa_hdr);
3051}
3052
3053/**
3054 * hdd_ipa_add_header_info() - Add IPA header for a given interface
3055 * @hdd_ipa: Global HDD IPA context
3056 * @iface_context: Interface-specific HDD IPA context
3057 * @mac_addr: Interface MAC address
3058 *
3059 * Return: 0 on success, negativer errno value on error
3060 */
3061static int hdd_ipa_add_header_info(struct hdd_ipa_priv *hdd_ipa,
3062 struct hdd_ipa_iface_context *iface_context,
3063 uint8_t *mac_addr)
3064{
3065 hdd_adapter_t *adapter = iface_context->adapter;
3066 char *ifname;
3067 struct ipa_ioc_add_hdr *ipa_hdr = NULL;
3068 int ret = -EINVAL;
3069 struct hdd_ipa_tx_hdr *tx_hdr = NULL;
3070 struct hdd_ipa_uc_tx_hdr *uc_tx_hdr = NULL;
3071
3072 ifname = adapter->dev->name;
3073
3074 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "Add Partial hdr: %s, %pM",
3075 ifname, mac_addr);
3076
3077 /* dynamically allocate the memory to add the hdrs */
3078 ipa_hdr = cdf_mem_malloc(sizeof(struct ipa_ioc_add_hdr)
3079 + sizeof(struct ipa_hdr_add));
3080 if (!ipa_hdr) {
3081 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3082 "%s: ipa_hdr allocation failed", ifname);
3083 ret = -ENOMEM;
3084 goto end;
3085 }
3086
3087 ipa_hdr->commit = 0;
3088 ipa_hdr->num_hdrs = 1;
3089
3090 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3091 uc_tx_hdr = (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3092 memcpy(uc_tx_hdr, &ipa_uc_tx_hdr, HDD_IPA_UC_WLAN_TX_HDR_LEN);
3093 memcpy(uc_tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3094 uc_tx_hdr->ipa_hd.vdev_id = iface_context->adapter->sessionId;
3095 HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG,
3096 "ifname=%s, vdev_id=%d",
3097 ifname, uc_tx_hdr->ipa_hd.vdev_id);
3098 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3099 ifname, HDD_IPA_IPV4_NAME_EXT);
3100 ipa_hdr->hdr[0].hdr_len = HDD_IPA_UC_WLAN_TX_HDR_LEN;
3101 ipa_hdr->hdr[0].type = IPA_HDR_L2_ETHERNET_II;
3102 ipa_hdr->hdr[0].is_partial = 1;
3103 ipa_hdr->hdr[0].hdr_hdl = 0;
3104 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3105 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3106
3107 ret = ipa_add_hdr(ipa_hdr);
3108 } else {
3109 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3110
3111 /* Set the Source MAC */
3112 memcpy(tx_hdr, &ipa_tx_hdr, HDD_IPA_WLAN_TX_HDR_LEN);
3113 memcpy(tx_hdr->eth.h_source, mac_addr, ETH_ALEN);
3114
3115 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3116 ifname, HDD_IPA_IPV4_NAME_EXT);
3117 ipa_hdr->hdr[0].hdr_len = HDD_IPA_WLAN_TX_HDR_LEN;
3118 ipa_hdr->hdr[0].is_partial = 1;
3119 ipa_hdr->hdr[0].hdr_hdl = 0;
3120 ipa_hdr->hdr[0].is_eth2_ofst_valid = 1;
3121 ipa_hdr->hdr[0].eth2_ofst = HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3122
3123 /* Set the type to IPV4 in the header */
3124 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IP);
3125
3126 ret = ipa_add_hdr(ipa_hdr);
3127 }
3128 if (ret) {
3129 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "%s IPv4 add hdr failed: %d",
3130 ifname, ret);
3131 goto end;
3132 }
3133
3134 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv4 hdr_hdl: 0x%x",
3135 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3136
3137 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3138 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3139 ifname, HDD_IPA_IPV6_NAME_EXT);
3140
3141 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3142 uc_tx_hdr =
3143 (struct hdd_ipa_uc_tx_hdr *)ipa_hdr->hdr[0].hdr;
3144 uc_tx_hdr->eth.h_proto = cpu_to_be16(ETH_P_IPV6);
3145 } else {
3146 /* Set the type to IPV6 in the header */
3147 tx_hdr = (struct hdd_ipa_tx_hdr *)ipa_hdr->hdr[0].hdr;
3148 tx_hdr->llc_snap.eth_type = cpu_to_be16(ETH_P_IPV6);
3149 }
3150
3151 ret = ipa_add_hdr(ipa_hdr);
3152 if (ret) {
3153 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3154 "%s: IPv6 add hdr failed: %d", ifname, ret);
3155 goto clean_ipv4_hdr;
3156 }
3157
3158 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: IPv6 hdr_hdl: 0x%x",
3159 ipa_hdr->hdr[0].name, ipa_hdr->hdr[0].hdr_hdl);
3160 }
3161
3162 cdf_mem_free(ipa_hdr);
3163
3164 return ret;
3165
3166clean_ipv4_hdr:
3167 snprintf(ipa_hdr->hdr[0].name, IPA_RESOURCE_NAME_MAX, "%s%s",
3168 ifname, HDD_IPA_IPV4_NAME_EXT);
3169 hdd_ipa_remove_header(ipa_hdr->hdr[0].name);
3170end:
3171 if (ipa_hdr)
3172 cdf_mem_free(ipa_hdr);
3173
3174 return ret;
3175}
3176
3177/**
3178 * hdd_ipa_clean_hdr() - Cleanup IPA on a given adapter
3179 * @adapter: Adapter upon which IPA was previously configured
3180 *
3181 * Return: None
3182 */
3183static void hdd_ipa_clean_hdr(hdd_adapter_t *adapter)
3184{
3185 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3186 int ret;
3187 char name_ipa[IPA_RESOURCE_NAME_MAX];
3188
3189 /* Remove the headers */
3190 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3191 adapter->dev->name, HDD_IPA_IPV4_NAME_EXT);
3192 hdd_ipa_remove_header(name_ipa);
3193
3194 if (hdd_ipa_is_ipv6_enabled(hdd_ipa->hdd_ctx)) {
3195 snprintf(name_ipa, IPA_RESOURCE_NAME_MAX, "%s%s",
3196 adapter->dev->name, HDD_IPA_IPV6_NAME_EXT);
3197 hdd_ipa_remove_header(name_ipa);
3198 }
3199 /* unregister the interface with IPA */
3200 ret = ipa_deregister_intf(adapter->dev->name);
3201 if (ret)
3202 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3203 "%s: ipa_deregister_intf fail: %d",
3204 adapter->dev->name, ret);
3205}
3206
3207/**
3208 * hdd_ipa_cleanup_iface() - Cleanup IPA on a given interface
3209 * @iface_context: interface-specific IPA context
3210 *
3211 * Return: None
3212 */
3213static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context)
3214{
3215 if (iface_context == NULL)
3216 return;
3217
3218 hdd_ipa_clean_hdr(iface_context->adapter);
3219
3220 cdf_spin_lock_bh(&iface_context->interface_lock);
3221 iface_context->adapter->ipa_context = NULL;
3222 iface_context->adapter = NULL;
3223 iface_context->tl_context = NULL;
3224 cdf_spin_unlock_bh(&iface_context->interface_lock);
3225 iface_context->ifa_address = 0;
3226 if (!iface_context->hdd_ipa->num_iface) {
3227 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3228 "NUM INTF 0, Invalid");
3229 CDF_ASSERT(0);
3230 }
3231 iface_context->hdd_ipa->num_iface--;
3232}
3233
3234/**
3235 * hdd_ipa_setup_iface() - Setup IPA on a given interface
3236 * @hdd_ipa: HDD IPA global context
3237 * @adapter: Interface upon which IPA is being setup
3238 * @sta_id: Station ID of the API instance
3239 *
3240 * Return: 0 on success, negative errno value on error
3241 */
3242static int hdd_ipa_setup_iface(struct hdd_ipa_priv *hdd_ipa,
3243 hdd_adapter_t *adapter, uint8_t sta_id)
3244{
3245 struct hdd_ipa_iface_context *iface_context = NULL;
3246 void *tl_context = NULL;
3247 int i, ret = 0;
3248
3249 /* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
3250 * channel change indication. Since these indications are sent by lower
3251 * layer as SAP updates and IPA doesn't have to do anything for these
3252 * updates so ignoring!
3253 */
3254 if (WLAN_HDD_SOFTAP == adapter->device_mode && adapter->ipa_context)
3255 return 0;
3256
3257 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3258 if (hdd_ipa->iface_context[i].adapter == NULL) {
3259 iface_context = &(hdd_ipa->iface_context[i]);
3260 break;
3261 }
3262 }
3263
3264 if (iface_context == NULL) {
3265 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3266 "All the IPA interfaces are in use");
3267 ret = -ENOMEM;
3268 goto end;
3269 }
3270
3271 adapter->ipa_context = iface_context;
3272 iface_context->adapter = adapter;
3273 iface_context->sta_id = sta_id;
3274 tl_context = ol_txrx_get_vdev_by_sta_id(sta_id);
3275
3276 if (tl_context == NULL) {
3277 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3278 "Not able to get TL context sta_id: %d", sta_id);
3279 ret = -EINVAL;
3280 goto end;
3281 }
3282
3283 iface_context->tl_context = tl_context;
3284
3285 ret = hdd_ipa_add_header_info(hdd_ipa, iface_context,
3286 adapter->dev->dev_addr);
3287
3288 if (ret)
3289 goto end;
3290
3291 /* Configure the TX and RX pipes filter rules */
3292 ret = hdd_ipa_register_interface(hdd_ipa, iface_context);
3293 if (ret)
3294 goto cleanup_header;
3295
3296 hdd_ipa->num_iface++;
3297 return ret;
3298
3299cleanup_header:
3300
3301 hdd_ipa_clean_hdr(adapter);
3302end:
3303 if (iface_context)
3304 hdd_ipa_cleanup_iface(iface_context);
3305 return ret;
3306}
3307
3308/**
3309 * hdd_ipa_msg_free_fn() - Free an IPA message
3310 * @buff: pointer to the IPA message
3311 * @len: length of the IPA message
3312 * @type: type of IPA message
3313 *
3314 * Return: None
3315 */
3316static void hdd_ipa_msg_free_fn(void *buff, uint32_t len, uint32_t type)
3317{
3318 hddLog(LOG1, "msg type:%d, len:%d", type, len);
3319 ghdd_ipa->stats.num_free_msg++;
3320 cdf_mem_free(buff);
3321}
3322
3323/**
3324 * hdd_ipa_send_mcc_scc_msg() - send IPA WLAN_SWITCH_TO_MCC/SCC message
3325 * @mcc_mode: 0=MCC/1=SCC
3326 *
3327 * Return: 0 on success, negative errno value on error
3328 */
3329int hdd_ipa_send_mcc_scc_msg(hdd_context_t *pHddCtx, bool mcc_mode)
3330{
3331 hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
3332 CDF_STATUS status;
3333 hdd_adapter_t *pAdapter;
3334 struct ipa_msg_meta meta;
3335 struct ipa_wlan_msg *msg;
3336 int ret;
3337
3338 if (!hdd_ipa_uc_sta_is_enabled(pHddCtx))
3339 return -EINVAL;
3340
3341 if (!pHddCtx->mcc_mode) {
3342 /* Flush TxRx queue for each adapter before switch to SCC */
3343 status = hdd_get_front_adapter(pHddCtx, &adapter_node);
3344 while (NULL != adapter_node && CDF_STATUS_SUCCESS == status) {
3345 pAdapter = adapter_node->pAdapter;
3346 if (pAdapter->device_mode == WLAN_HDD_INFRA_STATION ||
3347 pAdapter->device_mode == WLAN_HDD_SOFTAP) {
3348 hddLog(CDF_TRACE_LEVEL_INFO,
3349 "MCC->SCC: Flush TxRx queue(d_mode=%d)",
3350 pAdapter->device_mode);
3351 hdd_deinit_tx_rx(pAdapter);
3352 }
3353 status = hdd_get_next_adapter(
3354 pHddCtx, adapter_node, &next);
3355 adapter_node = next;
3356 }
3357 }
3358
3359 /* Send SCC/MCC Switching event to IPA */
3360 meta.msg_len = sizeof(*msg);
3361 msg = cdf_mem_malloc(meta.msg_len);
3362 if (msg == NULL) {
3363 hddLog(LOGE, "msg allocation failed");
3364 return -ENOMEM;
3365 }
3366
3367 meta.msg_type = mcc_mode ?
3368 WLAN_SWITCH_TO_MCC : WLAN_SWITCH_TO_SCC;
3369 hddLog(LOG1, "ipa_send_msg(Evt:%d)", meta.msg_type);
3370
3371 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3372
3373 if (ret) {
3374 hddLog(LOGE, "ipa_send_msg(Evt:%d) - fail=%d",
3375 meta.msg_type, ret);
3376 cdf_mem_free(msg);
3377 }
3378
3379 return ret;
3380}
3381
3382/**
3383 * hdd_ipa_wlan_event_to_str() - convert IPA WLAN event to string
3384 * @event: IPA WLAN event to be converted to a string
3385 *
3386 * Return: ASCII string representing the IPA WLAN event
3387 */
3388static inline char *hdd_ipa_wlan_event_to_str(enum ipa_wlan_event event)
3389{
3390 switch (event) {
3391 case WLAN_CLIENT_CONNECT:
3392 return "WLAN_CLIENT_CONNECT";
3393 case WLAN_CLIENT_DISCONNECT:
3394 return "WLAN_CLIENT_DISCONNECT";
3395 case WLAN_CLIENT_POWER_SAVE_MODE:
3396 return "WLAN_CLIENT_POWER_SAVE_MODE";
3397 case WLAN_CLIENT_NORMAL_MODE:
3398 return "WLAN_CLIENT_NORMAL_MODE";
3399 case SW_ROUTING_ENABLE:
3400 return "SW_ROUTING_ENABLE";
3401 case SW_ROUTING_DISABLE:
3402 return "SW_ROUTING_DISABLE";
3403 case WLAN_AP_CONNECT:
3404 return "WLAN_AP_CONNECT";
3405 case WLAN_AP_DISCONNECT:
3406 return "WLAN_AP_DISCONNECT";
3407 case WLAN_STA_CONNECT:
3408 return "WLAN_STA_CONNECT";
3409 case WLAN_STA_DISCONNECT:
3410 return "WLAN_STA_DISCONNECT";
3411 case WLAN_CLIENT_CONNECT_EX:
3412 return "WLAN_CLIENT_CONNECT_EX";
3413
3414 case IPA_WLAN_EVENT_MAX:
3415 default:
3416 return "UNKNOWN";
3417 }
3418}
3419
3420/**
3421 * hdd_ipa_wlan_evt() - IPA event handler
3422 * @adapter: adapter upon which the event was received
3423 * @sta_id: station id for the event
3424 * @type: the event type
3425 * @mac_address: MAC address associated with the event
3426 *
3427 * Return: 0 on success, negative errno value on error
3428 */
3429int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
3430 enum ipa_wlan_event type, uint8_t *mac_addr)
3431{
3432 struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
3433 struct ipa_msg_meta meta;
3434 struct ipa_wlan_msg *msg;
3435 struct ipa_wlan_msg_ex *msg_ex = NULL;
3436 int ret;
3437
3438 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: %s evt, MAC: %pM sta_id: %d",
3439 adapter->dev->name, hdd_ipa_wlan_event_to_str(type),
3440 mac_addr, sta_id);
3441
3442 if (type >= IPA_WLAN_EVENT_MAX)
3443 return -EINVAL;
3444
3445 if (WARN_ON(is_zero_ether_addr(mac_addr)))
3446 return -EINVAL;
3447
3448 if (!hdd_ipa || !hdd_ipa_is_enabled(hdd_ipa->hdd_ctx)) {
3449 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "IPA OFFLOAD NOT ENABLED");
3450 return -EINVAL;
3451 }
3452
3453 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3454 !hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
3455 (WLAN_HDD_SOFTAP != adapter->device_mode)) {
3456 return 0;
3457 }
3458
3459 /*
3460 * During IPA UC resource loading/unloading new events can be issued.
3461 * Store the events separately and handle them later.
3462 */
3463 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx) &&
3464 ((hdd_ipa->resource_loading) ||
3465 (hdd_ipa->resource_unloading))) {
3466 struct ipa_uc_pending_event *pending_evet = NULL;
3467
3468 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3469 "%s, RL/RUL inprogress", __func__);
3470 pending_evet = (struct ipa_uc_pending_event *)cdf_mem_malloc(
3471 sizeof(struct ipa_uc_pending_event));
3472 if (!pending_evet) {
3473 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3474 "Pending event memory alloc fail");
3475 return -ENOMEM;
3476 }
3477 pending_evet->adapter = adapter;
3478 pending_evet->sta_id = sta_id;
3479 pending_evet->type = type;
3480 cdf_mem_copy(pending_evet->mac_addr,
3481 mac_addr,
3482 CDF_MAC_ADDR_SIZE);
3483 cdf_list_insert_back(&hdd_ipa->pending_event,
3484 &pending_evet->node);
3485 return 0;
3486 }
3487
3488 hdd_ipa->stats.event[type]++;
3489
3490 switch (type) {
3491 case WLAN_STA_CONNECT:
3492 /* STA already connected and without disconnect, connect again
3493 * This is Roaming scenario
3494 */
3495 if (hdd_ipa->sta_connected)
3496 hdd_ipa_cleanup_iface(adapter->ipa_context);
3497
3498 if ((hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) &&
3499 (!hdd_ipa->sta_connected))
3500 hdd_ipa_uc_offload_enable_disable(adapter,
3501 SIR_STA_RX_DATA_OFFLOAD, 1);
3502
3503 cdf_mutex_acquire(&hdd_ipa->event_lock);
3504
3505 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3506 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3507 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3508 msg_ex->name, meta.msg_type);
3509 } else if ((!hdd_ipa->sap_num_connected_sta) &&
3510 (!hdd_ipa->sta_connected)) {
3511 /* Enable IPA UC TX PIPE when STA connected */
3512 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
3513 if (!ret) {
3514 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3515 "handle 1st con ret %d", ret);
3516 } else {
3517 cdf_mutex_release(&hdd_ipa->event_lock);
3518 hdd_ipa_uc_offload_enable_disable(adapter,
3519 SIR_STA_RX_DATA_OFFLOAD, 0);
3520 goto end;
3521 }
3522 }
3523 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3524 if (ret) {
3525 cdf_mutex_release(&hdd_ipa->event_lock);
3526 hdd_ipa_uc_offload_enable_disable(adapter,
3527 SIR_STA_RX_DATA_OFFLOAD, 0);
3528 goto end;
3529
3530#ifdef IPA_UC_OFFLOAD
3531 vdev_to_iface[adapter->sessionId] =
3532 ((struct hdd_ipa_iface_context *)
3533 (adapter->ipa_context))->iface_id;
3534#endif /* IPA_UC_OFFLOAD */
3535 }
3536
3537 cdf_mutex_release(&hdd_ipa->event_lock);
3538
3539 hdd_ipa->sta_connected = 1;
3540 break;
3541
3542 case WLAN_AP_CONNECT:
3543 /* For DFS channel we get two start_bss event (before and after
3544 * CAC). Also when ACS range includes both DFS and non DFS
3545 * channels, we could possibly change channel many times due to
3546 * RADAR detection and chosen channel may not be a DFS channels.
3547 * So dont return error here. Just discard the event.
3548 */
3549 if (adapter->ipa_context)
3550 return 0;
3551
3552 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3553 hdd_ipa_uc_offload_enable_disable(adapter,
3554 SIR_AP_RX_DATA_OFFLOAD, 1);
3555 }
3556 cdf_mutex_acquire(&hdd_ipa->event_lock);
3557 ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
3558 if (ret) {
3559 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3560 "%s: Evt: %d, Interface setup failed",
3561 msg_ex->name, meta.msg_type);
3562 cdf_mutex_release(&hdd_ipa->event_lock);
3563 goto end;
3564
3565#ifdef IPA_UC_OFFLOAD
3566 vdev_to_iface[adapter->sessionId] =
3567 ((struct hdd_ipa_iface_context *)
3568 (adapter->ipa_context))->iface_id;
3569#endif /* IPA_UC_OFFLOAD */
3570 }
3571 cdf_mutex_release(&hdd_ipa->event_lock);
3572 break;
3573
3574 case WLAN_STA_DISCONNECT:
3575 cdf_mutex_acquire(&hdd_ipa->event_lock);
3576 hdd_ipa_cleanup_iface(adapter->ipa_context);
3577
3578 if (!hdd_ipa->sta_connected) {
3579 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3580 "%s: Evt: %d, STA already disconnected",
3581 msg_ex->name, meta.msg_type);
3582 cdf_mutex_release(&hdd_ipa->event_lock);
3583 return -EINVAL;
3584 }
3585 hdd_ipa->sta_connected = 0;
3586 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3587 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3588 "%s: IPA UC OFFLOAD NOT ENABLED",
3589 msg_ex->name);
3590 } else {
3591 /* Disable IPA UC TX PIPE when STA disconnected */
3592 if ((!hdd_ipa->sap_num_connected_sta) ||
3593 ((!hdd_ipa->num_iface) &&
3594 (HDD_IPA_UC_NUM_WDI_PIPE ==
3595 hdd_ipa->activated_fw_pipe))) {
3596 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3597 }
3598 }
3599
3600 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
3601 hdd_ipa_uc_offload_enable_disable(adapter,
3602 SIR_STA_RX_DATA_OFFLOAD, 0);
3603 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3604 }
3605
3606 cdf_mutex_release(&hdd_ipa->event_lock);
3607 break;
3608
3609 case WLAN_AP_DISCONNECT:
3610 if (!adapter->ipa_context) {
3611 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3612 "%s: Evt: %d, SAP already disconnected",
3613 msg_ex->name, meta.msg_type);
3614 return -EINVAL;
3615 }
3616
3617 cdf_mutex_acquire(&hdd_ipa->event_lock);
3618 hdd_ipa_cleanup_iface(adapter->ipa_context);
3619 if ((!hdd_ipa->num_iface) &&
3620 (HDD_IPA_UC_NUM_WDI_PIPE ==
3621 hdd_ipa->activated_fw_pipe)) {
3622 if (hdd_ipa->hdd_ctx->isUnloadInProgress) {
3623 /*
3624 * We disable WDI pipes directly here since
3625 * IPA_OPCODE_TX/RX_SUSPEND message will not be
3626 * processed when unloading WLAN driver is in
3627 * progress
3628 */
3629 hdd_ipa_uc_disable_pipes(hdd_ipa);
3630 } else {
3631 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3632 "NO INTF left but still pipe clean up");
3633 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3634 }
3635 }
3636
3637 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3638 hdd_ipa_uc_offload_enable_disable(adapter,
3639 SIR_AP_RX_DATA_OFFLOAD, 0);
3640 vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
3641 }
3642 cdf_mutex_release(&hdd_ipa->event_lock);
3643 break;
3644
3645 case WLAN_CLIENT_CONNECT_EX:
3646 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%d %d",
3647 adapter->dev->ifindex, sta_id);
3648
3649 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3650 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3651 "%s: Evt: %d, IPA UC OFFLOAD NOT ENABLED",
3652 adapter->dev->name, meta.msg_type);
3653 return 0;
3654 }
3655
3656 cdf_mutex_acquire(&hdd_ipa->event_lock);
3657 if (hdd_ipa_uc_find_add_assoc_sta(hdd_ipa,
3658 true, sta_id)) {
3659 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3660 "%s: STA ID %d found, not valid",
3661 adapter->dev->name, sta_id);
3662 cdf_mutex_release(&hdd_ipa->event_lock);
3663 return 0;
3664 }
3665 hdd_ipa->sap_num_connected_sta++;
3666 hdd_ipa->pending_cons_req = false;
3667 cdf_mutex_release(&hdd_ipa->event_lock);
3668
3669 meta.msg_type = type;
3670 meta.msg_len = (sizeof(struct ipa_wlan_msg_ex) +
3671 sizeof(struct ipa_wlan_hdr_attrib_val));
3672 msg_ex = cdf_mem_malloc(meta.msg_len);
3673
3674 if (msg_ex == NULL) {
3675 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3676 "msg_ex allocation failed");
3677 return -ENOMEM;
3678 }
3679 strlcpy(msg_ex->name, adapter->dev->name,
3680 IPA_RESOURCE_NAME_MAX);
3681 msg_ex->num_of_attribs = 1;
3682 msg_ex->attribs[0].attrib_type = WLAN_HDR_ATTRIB_MAC_ADDR;
3683 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3684 msg_ex->attribs[0].offset =
3685 HDD_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3686 } else {
3687 msg_ex->attribs[0].offset =
3688 HDD_IPA_WLAN_HDR_DES_MAC_OFFSET;
3689 }
3690 memcpy(msg_ex->attribs[0].u.mac_addr, mac_addr,
3691 IPA_MAC_ADDR_SIZE);
3692
3693 ret = ipa_send_msg(&meta, msg_ex, hdd_ipa_msg_free_fn);
3694
3695 if (ret) {
3696 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d : %d",
3697 msg_ex->name, meta.msg_type, ret);
3698 cdf_mem_free(msg_ex);
3699 return ret;
3700 }
3701 hdd_ipa->stats.num_send_msg++;
3702
3703 cdf_mutex_acquire(&hdd_ipa->event_lock);
3704 /* Enable IPA UC Data PIPEs when first STA connected */
3705 if ((1 == hdd_ipa->sap_num_connected_sta)
3706 && (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)
3707 || !hdd_ipa->sta_connected)) {
3708 ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
3709 if (ret) {
3710 cdf_mutex_release(&hdd_ipa->event_lock);
3711 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3712 "%s: handle 1st con ret %d",
3713 adapter->dev->name, ret);
3714 return ret;
3715 }
3716 }
3717 cdf_mutex_release(&hdd_ipa->event_lock);
3718
3719 return ret;
3720
3721 case WLAN_CLIENT_DISCONNECT:
3722 if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3723 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3724 "%s: IPA UC OFFLOAD NOT ENABLED",
3725 msg_ex->name);
3726 return 0;
3727 }
3728
3729 cdf_mutex_acquire(&hdd_ipa->event_lock);
3730 if (!hdd_ipa_uc_find_add_assoc_sta(hdd_ipa, false, sta_id)) {
3731 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3732 "%s: STA ID %d NOT found, not valid",
3733 msg_ex->name, sta_id);
3734 cdf_mutex_release(&hdd_ipa->event_lock);
3735 return 0;
3736 }
3737 hdd_ipa->sap_num_connected_sta--;
3738 /* Disable IPA UC TX PIPE when last STA disconnected */
3739 if (!hdd_ipa->sap_num_connected_sta
3740 && (!hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) ||
3741 !hdd_ipa->sta_connected)
3742 && (false == hdd_ipa->resource_unloading)
3743 && (HDD_IPA_UC_NUM_WDI_PIPE ==
3744 hdd_ipa->activated_fw_pipe))
3745 hdd_ipa_uc_handle_last_discon(hdd_ipa);
3746 cdf_mutex_release(&hdd_ipa->event_lock);
3747 break;
3748
3749 default:
3750 return 0;
3751 }
3752
3753 meta.msg_len = sizeof(struct ipa_wlan_msg);
3754 msg = cdf_mem_malloc(meta.msg_len);
3755 if (msg == NULL) {
3756 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR, "msg allocation failed");
3757 return -ENOMEM;
3758 }
3759
3760 meta.msg_type = type;
3761 strlcpy(msg->name, adapter->dev->name, IPA_RESOURCE_NAME_MAX);
3762 memcpy(msg->mac_addr, mac_addr, ETH_ALEN);
3763
3764 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d",
3765 msg->name, meta.msg_type);
3766
3767 ret = ipa_send_msg(&meta, msg, hdd_ipa_msg_free_fn);
3768
3769 if (ret) {
3770 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO, "%s: Evt: %d fail:%d",
3771 msg->name, meta.msg_type, ret);
3772 cdf_mem_free(msg);
3773 return ret;
3774 }
3775
3776 hdd_ipa->stats.num_send_msg++;
3777
3778end:
3779 return ret;
3780}
3781
3782/**
3783 * hdd_ipa_rm_state_to_str() - Convert IPA RM state to string
3784 * @state: IPA RM state value
3785 *
3786 * Return: ASCII string representing the IPA RM state
3787 */
3788static inline char *hdd_ipa_rm_state_to_str(enum hdd_ipa_rm_state state)
3789{
3790 switch (state) {
3791 case HDD_IPA_RM_RELEASED:
3792 return "RELEASED";
3793 case HDD_IPA_RM_GRANT_PENDING:
3794 return "GRANT_PENDING";
3795 case HDD_IPA_RM_GRANTED:
3796 return "GRANTED";
3797 }
3798
3799 return "UNKNOWN";
3800}
3801
3802/**
3803 * hdd_ipa_init() - IPA initialization function
3804 * @hdd_ctx: HDD global context
3805 *
3806 * Allocate hdd_ipa resources, ipa pipe resource and register
3807 * wlan interface with IPA module.
3808 *
3809 * Return: CDF_STATUS enumeration
3810 */
3811CDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx)
3812{
3813 struct hdd_ipa_priv *hdd_ipa = NULL;
3814 int ret, i;
3815 struct hdd_ipa_iface_context *iface_context = NULL;
3816
3817 if (!hdd_ipa_is_enabled(hdd_ctx))
3818 return CDF_STATUS_SUCCESS;
3819
3820 hdd_ipa = cdf_mem_malloc(sizeof(*hdd_ipa));
3821 if (!hdd_ipa) {
3822 HDD_IPA_LOG(CDF_TRACE_LEVEL_FATAL, "hdd_ipa allocation failed");
3823 goto fail_setup_rm;
3824 }
3825
3826 hdd_ctx->hdd_ipa = hdd_ipa;
3827 ghdd_ipa = hdd_ipa;
3828 hdd_ipa->hdd_ctx = hdd_ctx;
3829 hdd_ipa->num_iface = 0;
3830
3831 /* Create the interface context */
3832 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3833 iface_context = &hdd_ipa->iface_context[i];
3834 iface_context->hdd_ipa = hdd_ipa;
3835 iface_context->cons_client =
3836 hdd_ipa_adapter_2_client[i].cons_client;
3837 iface_context->prod_client =
3838 hdd_ipa_adapter_2_client[i].prod_client;
3839 iface_context->iface_id = i;
3840 iface_context->adapter = NULL;
3841 cdf_spinlock_init(&iface_context->interface_lock);
3842 }
3843
3844#ifdef CONFIG_CNSS
3845 cnss_init_work(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
3846#else
3847 INIT_WORK(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
3848#endif
3849 cdf_spinlock_init(&hdd_ipa->pm_lock);
3850 cdf_nbuf_queue_init(&hdd_ipa->pm_queue_head);
3851
3852 ret = hdd_ipa_setup_rm(hdd_ipa);
3853 if (ret)
3854 goto fail_setup_rm;
3855
3856 if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
3857 hdd_ipa_uc_rt_debug_init(hdd_ctx);
3858 cdf_mem_zero(&hdd_ipa->stats, sizeof(hdd_ipa->stats));
3859 hdd_ipa->sap_num_connected_sta = 0;
3860 hdd_ipa->ipa_tx_packets_diff = 0;
3861 hdd_ipa->ipa_rx_packets_diff = 0;
3862 hdd_ipa->ipa_p_tx_packets = 0;
3863 hdd_ipa->ipa_p_rx_packets = 0;
3864 hdd_ipa->resource_loading = false;
3865 hdd_ipa->resource_unloading = false;
3866 hdd_ipa->sta_connected = 0;
Leo Change3e49442015-10-26 20:07:13 -07003867 hdd_ipa->ipa_pipes_down = true;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003868 /* Setup IPA sys_pipe for MCC */
3869 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
3870 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
3871 if (ret)
3872 goto fail_create_sys_pipe;
3873 }
3874 hdd_ipa_uc_ol_init(hdd_ctx);
3875 } else {
3876 ret = hdd_ipa_setup_sys_pipe(hdd_ipa);
3877 if (ret)
3878 goto fail_create_sys_pipe;
3879 }
3880
3881 return CDF_STATUS_SUCCESS;
3882
3883fail_create_sys_pipe:
3884 hdd_ipa_destroy_rm_resource(hdd_ipa);
3885fail_setup_rm:
3886 if (hdd_ipa)
3887 cdf_mem_free(hdd_ipa);
3888
3889 return CDF_STATUS_E_FAILURE;
3890}
3891
3892/**
3893 * hdd_ipa_cleanup - IPA cleanup function
3894 * @hdd_ctx: HDD global context
3895 *
3896 * Return: CDF_STATUS enumeration
3897 */
3898CDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx)
3899{
3900 struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
3901 int i;
3902 struct hdd_ipa_iface_context *iface_context = NULL;
3903 cdf_nbuf_t skb;
3904 struct hdd_ipa_pm_tx_cb *pm_tx_cb = NULL;
3905
3906 if (!hdd_ipa_is_enabled(hdd_ctx))
3907 return CDF_STATUS_SUCCESS;
3908
3909 if (!hdd_ipa_uc_is_enabled(hdd_ctx)) {
3910 unregister_inetaddr_notifier(&hdd_ipa->ipv4_notifier);
3911 hdd_ipa_teardown_sys_pipe(hdd_ipa);
3912 }
3913
3914 /* Teardown IPA sys_pipe for MCC */
3915 if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx))
3916 hdd_ipa_teardown_sys_pipe(hdd_ipa);
3917
3918 hdd_ipa_destroy_rm_resource(hdd_ipa);
3919
3920#ifdef WLAN_OPEN_SOURCE
3921 cancel_work_sync(&hdd_ipa->pm_work);
3922#endif
3923
3924 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
3925
3926 while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
3927 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
3928
3929 pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
3930 ipa_free_skb(pm_tx_cb->ipa_tx_desc);
3931
3932 cdf_spin_lock_bh(&hdd_ipa->pm_lock);
3933 }
3934 cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
3935
3936 cdf_spinlock_destroy(&hdd_ipa->pm_lock);
3937
3938 /* destory the interface lock */
3939 for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
3940 iface_context = &hdd_ipa->iface_context[i];
3941 cdf_spinlock_destroy(&iface_context->interface_lock);
3942 }
3943
3944 /* This should never hit but still make sure that there are no pending
3945 * descriptor in IPA hardware
3946 */
3947 if (hdd_ipa->pending_hw_desc_cnt != 0) {
3948 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3949 "IPA Pending write done: %d Waiting!",
3950 hdd_ipa->pending_hw_desc_cnt);
3951
3952 for (i = 0; hdd_ipa->pending_hw_desc_cnt != 0 && i < 10; i++) {
3953 usleep_range(100, 100);
3954 }
3955
3956 HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
3957 "IPA Pending write done: desc: %d %s(%d)!",
3958 hdd_ipa->pending_hw_desc_cnt,
3959 hdd_ipa->pending_hw_desc_cnt == 0 ? "completed"
3960 : "leak", i);
3961 }
3962 if (hdd_ipa_uc_is_enabled(hdd_ctx)) {
3963 hdd_ipa_uc_rt_debug_deinit(hdd_ctx);
3964 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3965 "%s: Disconnect TX PIPE", __func__);
3966 ipa_disconnect_wdi_pipe(hdd_ipa->tx_pipe_handle);
3967 HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
3968 "%s: Disconnect RX PIPE", __func__);
3969 ipa_disconnect_wdi_pipe(hdd_ipa->rx_pipe_handle);
3970 cdf_mutex_destroy(&hdd_ipa->event_lock);
Yun Parke59b3912015-11-09 13:19:06 -08003971 cdf_mutex_destroy(&hdd_ipa->ipa_lock);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003972 cdf_list_destroy(&hdd_ipa->pending_event);
3973
3974#ifdef WLAN_OPEN_SOURCE
3975 for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
3976 cancel_work_sync(&hdd_ipa->uc_op_work[i].work);
3977 hdd_ipa->uc_op_work[i].msg = NULL;
3978 }
3979#endif
3980 }
3981
3982 cdf_mem_free(hdd_ipa);
3983 hdd_ctx->hdd_ipa = NULL;
3984
3985 return CDF_STATUS_SUCCESS;
3986}
3987#endif /* IPA_OFFLOAD */