blob: 246d7847ee69d236715fd74e5e15ea13f9ede0fe [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Jeff Johnsonbacec092016-12-20 14:08:47 -08002 * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <ol_cfg.h>
29#include <ol_if_athvar.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080030#include <cdp_txrx_cfg.h>
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080031#include <cdp_txrx_handle.h>
Yun Parkd1b045e2017-04-05 14:11:35 -070032
33unsigned int vow_config;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080034
35#ifdef QCA_LL_TX_FLOW_CONTROL_V2
36/**
37 * ol_tx_set_flow_control_parameters() - set flow control parameters
38 * @cfg_ctx: cfg context
39 * @cfg_param: cfg parameters
40 *
41 * Return: none
42 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080043void ol_tx_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
Leo Chang98726762016-10-28 11:07:18 -070044 struct txrx_pdev_cfg_param_t *cfg_param)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080045{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -080046 struct txrx_pdev_cfg_t *cfg_ctx = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080047 cfg_ctx->tx_flow_start_queue_offset =
Leo Chang98726762016-10-28 11:07:18 -070048 cfg_param->tx_flow_start_queue_offset;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080049 cfg_ctx->tx_flow_stop_queue_th =
Leo Chang98726762016-10-28 11:07:18 -070050 cfg_param->tx_flow_stop_queue_th;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080051}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080052#endif
53
Siddarth Poddarb2011f62016-04-27 20:45:42 +053054#ifdef CONFIG_HL_SUPPORT
55
56/**
57 * ol_pdev_cfg_param_update() - assign download size of tx frame for txrx
58 * pdev that will be used across datapath
59 * @cfg_ctx: ptr to config parameter for txrx pdev
60 *
61 * Return: None
62 */
63static inline
64void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
65{
66 cfg_ctx->is_high_latency = 1;
67 /* 802.1Q and SNAP / LLC headers are accounted for elsewhere */
68 cfg_ctx->tx_download_size = 1500;
69 cfg_ctx->tx_free_at_download = 0;
70}
71#else
72
73static inline
74void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
75{
76 /*
77 * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
78 * Include payload, up to the end of UDP header for IPv4 case
79 */
80 cfg_ctx->tx_download_size = 16;
81}
82#endif
83
DARAM SUDHAe83cda62015-05-16 08:11:57 +053084#if CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
85static inline
86uint8_t ol_defrag_timeout_check(void)
87{
88 return 1;
89}
90#else
91static inline
92uint8_t ol_defrag_timeout_check(void)
93{
94 return 0;
95}
96#endif
97
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080098/* FIX THIS -
99 * For now, all these configuration parameters are hardcoded.
100 * Many of these should actually be determined dynamically instead.
101 */
102
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800103struct cdp_cfg *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800104{
Leo Chang98726762016-10-28 11:07:18 -0700105 struct txrx_pdev_cfg_param_t *cfg_param = pcfg_param;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800106 struct txrx_pdev_cfg_t *cfg_ctx;
gbian62edd7e2017-03-07 13:12:13 +0800107 int i;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800108
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530109 cfg_ctx = qdf_mem_malloc(sizeof(*cfg_ctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800110 if (!cfg_ctx) {
111 printk(KERN_ERR "cfg ctx allocation failed\n");
112 return NULL;
113 }
114
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530115 ol_pdev_cfg_param_update(cfg_ctx);
116
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800117 /* temporarily diabled PN check for Riva/Pronto */
118 cfg_ctx->rx_pn_check = 1;
DARAM SUDHAe83cda62015-05-16 08:11:57 +0530119 cfg_ctx->defrag_timeout_check = ol_defrag_timeout_check();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800120 cfg_ctx->max_peer_id = 511;
121 cfg_ctx->max_vdev = CFG_TGT_NUM_VDEV;
122 cfg_ctx->pn_rx_fwd_check = 1;
123 cfg_ctx->frame_type = wlan_frm_fmt_802_3;
124 cfg_ctx->max_thruput_mbps = 800;
125 cfg_ctx->max_nbuf_frags = 1;
126 cfg_ctx->vow_config = vow_config;
127 cfg_ctx->target_tx_credit = CFG_TGT_NUM_MSDU_DESC;
128 cfg_ctx->throttle_period_ms = 40;
Poddar, Siddarth83905022016-04-16 17:56:08 -0700129 cfg_ctx->dutycycle_level[0] = THROTTLE_DUTY_CYCLE_LEVEL0;
130 cfg_ctx->dutycycle_level[1] = THROTTLE_DUTY_CYCLE_LEVEL1;
131 cfg_ctx->dutycycle_level[2] = THROTTLE_DUTY_CYCLE_LEVEL2;
132 cfg_ctx->dutycycle_level[3] = THROTTLE_DUTY_CYCLE_LEVEL3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800133 cfg_ctx->rx_fwd_disabled = 0;
134 cfg_ctx->is_packet_log_enabled = 0;
Leo Chang98726762016-10-28 11:07:18 -0700135 cfg_ctx->is_full_reorder_offload = cfg_param->is_full_reorder_offload;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136 cfg_ctx->ipa_uc_rsc.uc_offload_enabled =
Leo Chang98726762016-10-28 11:07:18 -0700137 cfg_param->is_uc_offload_enabled;
138 cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param->uc_tx_buffer_count;
139 cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param->uc_tx_buffer_size;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800140 cfg_ctx->ipa_uc_rsc.rx_ind_ring_size =
Leo Chang98726762016-10-28 11:07:18 -0700141 cfg_param->uc_rx_indication_ring_count;
142 cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param->uc_tx_partition_base;
143 cfg_ctx->enable_rxthread = cfg_param->enable_rxthread;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800144 cfg_ctx->ip_tcp_udp_checksum_offload =
Leo Chang98726762016-10-28 11:07:18 -0700145 cfg_param->ip_tcp_udp_checksum_offload;
146 cfg_ctx->ce_classify_enabled = cfg_param->ce_classify_enabled;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800147
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800148 ol_tx_set_flow_control_parameters((struct cdp_cfg *)cfg_ctx, cfg_param);
gbian62edd7e2017-03-07 13:12:13 +0800149
150 for (i = 0; i < OL_TX_NUM_WMM_AC; i++) {
151 cfg_ctx->ac_specs[i].wrr_skip_weight =
152 cfg_param->ac_specs[i].wrr_skip_weight;
153 cfg_ctx->ac_specs[i].credit_threshold =
154 cfg_param->ac_specs[i].credit_threshold;
155 cfg_ctx->ac_specs[i].send_limit =
156 cfg_param->ac_specs[i].send_limit;
157 cfg_ctx->ac_specs[i].credit_reserve =
158 cfg_param->ac_specs[i].credit_reserve;
159 cfg_ctx->ac_specs[i].discard_weight =
160 cfg_param->ac_specs[i].discard_weight;
161 }
162
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800163 return (struct cdp_cfg *)cfg_ctx;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800164}
165
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800166int ol_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800167{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800168 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800169 return cfg->is_high_latency;
170}
171
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800172int ol_cfg_max_peer_id(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800173{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800174 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800175 /*
176 * TBDXXX - this value must match the peer table
177 * size allocated in FW
178 */
179 return cfg->max_peer_id;
180}
181
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800182int ol_cfg_max_vdevs(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800183{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800184 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800185 return cfg->max_vdev;
186}
187
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800188int ol_cfg_rx_pn_check(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800189{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800190 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800191 return cfg->rx_pn_check;
192}
193
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800194int ol_cfg_rx_fwd_check(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800195{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800196 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800197 return cfg->pn_rx_fwd_check;
198}
199
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800200void ol_set_cfg_rx_fwd_disabled(struct cdp_cfg *cfg_pdev,
201 uint8_t disable_rx_fwd)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800202{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800203 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800204 cfg->rx_fwd_disabled = disable_rx_fwd;
205}
206
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800207void ol_set_cfg_packet_log_enabled(struct cdp_cfg *cfg_pdev, uint8_t val)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800208{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800209 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800210 cfg->is_packet_log_enabled = val;
211}
212
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800213uint8_t ol_cfg_is_packet_log_enabled(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800214{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800215 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800216 return cfg->is_packet_log_enabled;
217}
218
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800219int ol_cfg_rx_fwd_disabled(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800220{
221#if defined(ATHR_WIN_NWF)
222 /* for Windows, let the OS handle the forwarding */
223 return 1;
224#else
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800225 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226 return cfg->rx_fwd_disabled;
227#endif
228}
229
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800230int ol_cfg_rx_fwd_inter_bss(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800231{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800232 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800233 return cfg->rx_fwd_inter_bss;
234}
235
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800236enum wlan_frm_fmt ol_cfg_frame_type(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800237{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800238 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800239 return cfg->frame_type;
240}
241
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800242int ol_cfg_max_thruput_mbps(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800243{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800244 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800245 return cfg->max_thruput_mbps;
246}
247
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800248int ol_cfg_netbuf_frags_max(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800249{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800250 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800251 return cfg->max_nbuf_frags;
252}
253
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800254int ol_cfg_tx_free_at_download(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800255{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800256 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800257 return cfg->tx_free_at_download;
258}
259
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800260void ol_cfg_set_tx_free_at_download(struct cdp_cfg *cfg_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530261{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800262 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530263 cfg->tx_free_at_download = 1;
264}
265
266
267#ifdef CONFIG_HL_SUPPORT
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800268uint16_t ol_cfg_target_tx_credit(struct cdp_cfg *cfg_pdev)
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530269{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800270 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530271 return cfg->target_tx_credit;
272}
273#else
274
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800275uint16_t ol_cfg_target_tx_credit(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800276{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800277 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800278 uint16_t rc;
279 uint16_t vow_max_sta = (cfg->vow_config & 0xffff0000) >> 16;
280 uint16_t vow_max_desc_persta = cfg->vow_config & 0x0000ffff;
281
282 rc = (cfg->target_tx_credit + (vow_max_sta * vow_max_desc_persta));
283
284 return rc;
285}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530286#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800287
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800288int ol_cfg_tx_download_size(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800289{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800290 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800291 return cfg->tx_download_size;
292}
293
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800294int ol_cfg_rx_host_defrag_timeout_duplicate_check(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800295{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800296 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800297 return cfg->defrag_timeout_check;
298}
299
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800300int ol_cfg_throttle_period_ms(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800302 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800303 return cfg->throttle_period_ms;
304}
305
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800306int ol_cfg_throttle_duty_cycle_level(struct cdp_cfg *cfg_pdev, int level)
Poddar, Siddarth83905022016-04-16 17:56:08 -0700307{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800308 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Poddar, Siddarth83905022016-04-16 17:56:08 -0700309 return cfg->dutycycle_level[level];
310}
311
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800312int ol_cfg_is_full_reorder_offload(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800313{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800314 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800315 return cfg->is_full_reorder_offload;
316}
317
318/**
319 * ol_cfg_is_rx_thread_enabled() - return rx_thread is enable/disable
320 * @pdev : handle to the physical device
321 *
322 * Return: 1 - enable, 0 - disable
323 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800324int ol_cfg_is_rx_thread_enabled(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800326 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800327 return cfg->enable_rxthread;
328}
329
330#ifdef QCA_LL_TX_FLOW_CONTROL_V2
331/**
332 * ol_cfg_get_tx_flow_stop_queue_th() - return stop queue threshold
333 * @pdev : handle to the physical device
334 *
335 * Return: stop queue threshold
336 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800337int ol_cfg_get_tx_flow_stop_queue_th(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800338{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800339 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800340 return cfg->tx_flow_stop_queue_th;
341}
342
343/**
344 * ol_cfg_get_tx_flow_start_queue_offset() - return start queue offset
345 * @pdev : handle to the physical device
346 *
347 * Return: start queue offset
348 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800349int ol_cfg_get_tx_flow_start_queue_offset(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800350{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800351 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800352 return cfg->tx_flow_start_queue_offset;
353}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530354
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800355#endif
356
357#ifdef IPA_OFFLOAD
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800358unsigned int ol_cfg_ipa_uc_offload_enabled(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800359{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800360 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800361 return (unsigned int)cfg->ipa_uc_rsc.uc_offload_enabled;
362}
363
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800364unsigned int ol_cfg_ipa_uc_tx_buf_size(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800365{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800366 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800367 return cfg->ipa_uc_rsc.tx_buf_size;
368}
369
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800370unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800371{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800372 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 return cfg->ipa_uc_rsc.tx_max_buf_cnt;
374}
375
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800376unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800377{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800378 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379 return cfg->ipa_uc_rsc.rx_ind_ring_size;
380}
381
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800382unsigned int ol_cfg_ipa_uc_tx_partition_base(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800383{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800384 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800385 return cfg->ipa_uc_rsc.tx_partition_base;
386}
Yun Park4d968df2016-10-11 11:44:15 -0700387
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800388void ol_cfg_set_ipa_uc_tx_partition_base(struct cdp_cfg *cfg_pdev, uint32_t val)
Yun Park4d968df2016-10-11 11:44:15 -0700389{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800390 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Yun Park4d968df2016-10-11 11:44:15 -0700391 cfg->ipa_uc_rsc.tx_partition_base = val;
392}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800393#endif /* IPA_OFFLOAD */
394
395/**
396 * ol_cfg_is_ce_classify_enabled() - Return if CE classification is enabled
397 * or disabled
398 * @pdev : handle to the physical device
399 *
400 * Return: 1 - enabled, 0 - disabled
401 */
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800402bool ol_cfg_is_ce_classify_enabled(struct cdp_cfg *cfg_pdev)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800403{
Venkata Sharath Chandra Manchala0d44d452016-11-23 17:48:15 -0800404 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)cfg_pdev;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800405 return cfg->ce_classify_enabled;
406}
gbian62edd7e2017-03-07 13:12:13 +0800407
408/**
409 * ol_cfg_get_wrr_skip_weight() - brief Query for the param of wrr_skip_weight
410 * @pdev: handle to the physical device.
411 * @ac: access control, it will be BE, BK, VI, VO
412 *
413 * Return: wrr_skip_weight for specified ac.
414 */
415int ol_cfg_get_wrr_skip_weight(ol_pdev_handle pdev, int ac)
416{
417 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
418
419 if (ac >= OL_TX_WMM_AC_BE && ac <= OL_TX_WMM_AC_VO)
420 return cfg->ac_specs[ac].wrr_skip_weight;
421
422 return 0;
423}
424
425/**
426 * ol_cfg_get_credit_threshold() - Query for the param of credit_threshold
427 * @pdev: handle to the physical device.
428 * @ac: access control, it will be BE, BK, VI, VO
429 *
430 * Return: credit_threshold for specified ac.
431 */
432uint32_t ol_cfg_get_credit_threshold(ol_pdev_handle pdev, int ac)
433{
434 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
435
436 if (ac >= OL_TX_WMM_AC_BE && ac <= OL_TX_WMM_AC_VO)
437 return cfg->ac_specs[ac].credit_threshold;
438
439 return 0;
440}
441
442/**
443 * ol_cfg_get_send_limit() - Query for the param of send_limit
444 * @pdev: handle to the physical device.
445 * @ac: access control, it will be BE, BK, VI, VO
446 *
447 * Return: send_limit for specified ac.
448 */
449uint16_t ol_cfg_get_send_limit(ol_pdev_handle pdev, int ac)
450{
451 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
452
453 if (ac >= OL_TX_WMM_AC_BE && ac <= OL_TX_WMM_AC_VO)
454 return cfg->ac_specs[ac].send_limit;
455
456 return 0;
457}
458
459/**
460 * ol_cfg_get_credit_reserve() - Query for the param of credit_reserve
461 * @pdev: handle to the physical device.
462 * @ac: access control, it will be BE, BK, VI, VO
463 *
464 * Return: credit_reserve for specified ac.
465 */
466int ol_cfg_get_credit_reserve(ol_pdev_handle pdev, int ac)
467{
468 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
469
470 if (ac >= OL_TX_WMM_AC_BE && ac <= OL_TX_WMM_AC_VO)
471 return cfg->ac_specs[ac].credit_reserve;
472
473 return 0;
474}
475
476/**
477 * ol_cfg_get_discard_weight() - Query for the param of discard_weight
478 * @pdev: handle to the physical device.
479 * @ac: access control, it will be BE, BK, VI, VO
480 *
481 * Return: discard_weight for specified ac.
482 */
483int ol_cfg_get_discard_weight(ol_pdev_handle pdev, int ac)
484{
485 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
486
487 if (ac >= OL_TX_WMM_AC_BE && ac <= OL_TX_WMM_AC_VO)
488 return cfg->ac_specs[ac].discard_weight;
489
490 return 0;
491}