blob: 29140a5f93e2775ee0a111c5335d6624179bd565 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Anurag Chouhan6d760662016-02-20 16:05:43 +05302 * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#include <ol_cfg.h>
29#include <ol_if_athvar.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080030#include <cdp_txrx_cfg.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080031
32unsigned int vow_config = 0;
33
34#ifdef QCA_LL_TX_FLOW_CONTROL_V2
35/**
36 * ol_tx_set_flow_control_parameters() - set flow control parameters
37 * @cfg_ctx: cfg context
38 * @cfg_param: cfg parameters
39 *
40 * Return: none
41 */
42static
43void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
44 struct txrx_pdev_cfg_param_t cfg_param)
45{
46 cfg_ctx->tx_flow_start_queue_offset =
47 cfg_param.tx_flow_start_queue_offset;
48 cfg_ctx->tx_flow_stop_queue_th =
49 cfg_param.tx_flow_stop_queue_th;
50}
51#else
52static
53void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
54 struct txrx_pdev_cfg_param_t cfg_param)
55{
56 return;
57}
58#endif
59
Siddarth Poddarb2011f62016-04-27 20:45:42 +053060#ifdef CONFIG_HL_SUPPORT
61
62/**
63 * ol_pdev_cfg_param_update() - assign download size of tx frame for txrx
64 * pdev that will be used across datapath
65 * @cfg_ctx: ptr to config parameter for txrx pdev
66 *
67 * Return: None
68 */
69static inline
70void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
71{
72 cfg_ctx->is_high_latency = 1;
73 /* 802.1Q and SNAP / LLC headers are accounted for elsewhere */
74 cfg_ctx->tx_download_size = 1500;
75 cfg_ctx->tx_free_at_download = 0;
76}
77#else
78
79static inline
80void ol_pdev_cfg_param_update(struct txrx_pdev_cfg_t *cfg_ctx)
81{
82 /*
83 * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
84 * Include payload, up to the end of UDP header for IPv4 case
85 */
86 cfg_ctx->tx_download_size = 16;
87}
88#endif
89
DARAM SUDHAe83cda62015-05-16 08:11:57 +053090#if CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
91static inline
92uint8_t ol_defrag_timeout_check(void)
93{
94 return 1;
95}
96#else
97static inline
98uint8_t ol_defrag_timeout_check(void)
99{
100 return 0;
101}
102#endif
103
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800104/* FIX THIS -
105 * For now, all these configuration parameters are hardcoded.
106 * Many of these should actually be determined dynamically instead.
107 */
108
Dhanashri Atreb08959a2016-03-01 17:28:03 -0800109/**
110 * ol_pdev_cfg_attach - setup configuration parameters
111 *
112 *@osdev - OS handle needed as an argument for some OS primitives
113 *@cfg_param - configuration parameters
114 *
115 * Allocation configuration context that will be used across data path
116 *
117 * Return: the control device object
118 */
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530119
Anurag Chouhan6d760662016-02-20 16:05:43 +0530120ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800121 struct txrx_pdev_cfg_param_t cfg_param)
122{
123 struct txrx_pdev_cfg_t *cfg_ctx;
124
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530125 cfg_ctx = qdf_mem_malloc(sizeof(*cfg_ctx));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800126 if (!cfg_ctx) {
127 printk(KERN_ERR "cfg ctx allocation failed\n");
128 return NULL;
129 }
130
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530131 ol_pdev_cfg_param_update(cfg_ctx);
132
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800133 /* temporarily diabled PN check for Riva/Pronto */
134 cfg_ctx->rx_pn_check = 1;
DARAM SUDHAe83cda62015-05-16 08:11:57 +0530135 cfg_ctx->defrag_timeout_check = ol_defrag_timeout_check();
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800136 cfg_ctx->max_peer_id = 511;
137 cfg_ctx->max_vdev = CFG_TGT_NUM_VDEV;
138 cfg_ctx->pn_rx_fwd_check = 1;
139 cfg_ctx->frame_type = wlan_frm_fmt_802_3;
140 cfg_ctx->max_thruput_mbps = 800;
141 cfg_ctx->max_nbuf_frags = 1;
142 cfg_ctx->vow_config = vow_config;
143 cfg_ctx->target_tx_credit = CFG_TGT_NUM_MSDU_DESC;
144 cfg_ctx->throttle_period_ms = 40;
Poddar, Siddarth83905022016-04-16 17:56:08 -0700145 cfg_ctx->dutycycle_level[0] = THROTTLE_DUTY_CYCLE_LEVEL0;
146 cfg_ctx->dutycycle_level[1] = THROTTLE_DUTY_CYCLE_LEVEL1;
147 cfg_ctx->dutycycle_level[2] = THROTTLE_DUTY_CYCLE_LEVEL2;
148 cfg_ctx->dutycycle_level[3] = THROTTLE_DUTY_CYCLE_LEVEL3;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800149 cfg_ctx->rx_fwd_disabled = 0;
150 cfg_ctx->is_packet_log_enabled = 0;
151 cfg_ctx->is_full_reorder_offload = cfg_param.is_full_reorder_offload;
152 cfg_ctx->ipa_uc_rsc.uc_offload_enabled =
153 cfg_param.is_uc_offload_enabled;
154 cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param.uc_tx_buffer_count;
155 cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param.uc_tx_buffer_size;
156 cfg_ctx->ipa_uc_rsc.rx_ind_ring_size =
157 cfg_param.uc_rx_indication_ring_count;
158 cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param.uc_tx_partition_base;
159 cfg_ctx->enable_rxthread = cfg_param.enable_rxthread;
160 cfg_ctx->ip_tcp_udp_checksum_offload =
161 cfg_param.ip_tcp_udp_checksum_offload;
162 cfg_ctx->ce_classify_enabled = cfg_param.ce_classify_enabled;
163
164 ol_tx_set_flow_control_parameters(cfg_ctx, cfg_param);
165 return (ol_pdev_handle) cfg_ctx;
166}
167
168int ol_cfg_is_high_latency(ol_pdev_handle pdev)
169{
170 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
171 return cfg->is_high_latency;
172}
173
174int ol_cfg_max_peer_id(ol_pdev_handle pdev)
175{
176 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
177 /*
178 * TBDXXX - this value must match the peer table
179 * size allocated in FW
180 */
181 return cfg->max_peer_id;
182}
183
184int ol_cfg_max_vdevs(ol_pdev_handle pdev)
185{
186 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
187 return cfg->max_vdev;
188}
189
190int ol_cfg_rx_pn_check(ol_pdev_handle pdev)
191{
192 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
193 return cfg->rx_pn_check;
194}
195
196int ol_cfg_rx_fwd_check(ol_pdev_handle pdev)
197{
198 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
199 return cfg->pn_rx_fwd_check;
200}
201
Dhanashri Atreb08959a2016-03-01 17:28:03 -0800202/**
203 * ol_set_cfg_rx_fwd_disabled - set rx fwd disable/enable
204 *
205 * @pdev - handle to the physical device
206 * @disable_rx_fwd 1 -> no rx->tx forward -> rx->tx forward
207 *
208 * Choose whether to forward rx frames to tx (where applicable) within the
209 * WLAN driver, or to leave all forwarding up to the operating system.
210 * Currently only intra-bss fwd is supported.
211 *
212 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800213void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disable_rx_fwd)
214{
215 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
216 cfg->rx_fwd_disabled = disable_rx_fwd;
217}
218
Dhanashri Atreb08959a2016-03-01 17:28:03 -0800219/**
220 * ol_set_cfg_packet_log_enabled - Set packet log config in HTT
221 * config based on CFG ini configuration
222 *
223 * @pdev - handle to the physical device
224 * @val - 0 - disable, 1 - enable
225 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800226void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, uint8_t val)
227{
228 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
229 cfg->is_packet_log_enabled = val;
230}
231
232uint8_t ol_cfg_is_packet_log_enabled(ol_pdev_handle pdev)
233{
234 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
235 return cfg->is_packet_log_enabled;
236}
237
238int ol_cfg_rx_fwd_disabled(ol_pdev_handle pdev)
239{
240#if defined(ATHR_WIN_NWF)
241 /* for Windows, let the OS handle the forwarding */
242 return 1;
243#else
244 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
245 return cfg->rx_fwd_disabled;
246#endif
247}
248
249int ol_cfg_rx_fwd_inter_bss(ol_pdev_handle pdev)
250{
251 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
252 return cfg->rx_fwd_inter_bss;
253}
254
255enum wlan_frm_fmt ol_cfg_frame_type(ol_pdev_handle pdev)
256{
257 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
258 return cfg->frame_type;
259}
260
261int ol_cfg_max_thruput_mbps(ol_pdev_handle pdev)
262{
263 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
264 return cfg->max_thruput_mbps;
265}
266
267int ol_cfg_netbuf_frags_max(ol_pdev_handle pdev)
268{
269 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
270 return cfg->max_nbuf_frags;
271}
272
273int ol_cfg_tx_free_at_download(ol_pdev_handle pdev)
274{
275 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
276 return cfg->tx_free_at_download;
277}
278
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530279void ol_cfg_set_tx_free_at_download(ol_pdev_handle pdev)
280{
281 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
282 cfg->tx_free_at_download = 1;
283}
284
285
286#ifdef CONFIG_HL_SUPPORT
287uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
288{
289 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
290 return cfg->target_tx_credit;
291}
292#else
293
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800294uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
295{
296 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
297 uint16_t rc;
298 uint16_t vow_max_sta = (cfg->vow_config & 0xffff0000) >> 16;
299 uint16_t vow_max_desc_persta = cfg->vow_config & 0x0000ffff;
300
301 rc = (cfg->target_tx_credit + (vow_max_sta * vow_max_desc_persta));
302
303 return rc;
304}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530305#endif
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306
307int ol_cfg_tx_download_size(ol_pdev_handle pdev)
308{
309 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
310 return cfg->tx_download_size;
311}
312
313int ol_cfg_rx_host_defrag_timeout_duplicate_check(ol_pdev_handle pdev)
314{
315 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
316 return cfg->defrag_timeout_check;
317}
318
319int ol_cfg_throttle_period_ms(ol_pdev_handle pdev)
320{
321 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
322 return cfg->throttle_period_ms;
323}
324
Poddar, Siddarth83905022016-04-16 17:56:08 -0700325int ol_cfg_throttle_duty_cycle_level(ol_pdev_handle pdev, int level)
326{
327 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
328 return cfg->dutycycle_level[level];
329}
330
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800331int ol_cfg_is_full_reorder_offload(ol_pdev_handle pdev)
332{
333 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
334 return cfg->is_full_reorder_offload;
335}
336
337/**
338 * ol_cfg_is_rx_thread_enabled() - return rx_thread is enable/disable
339 * @pdev : handle to the physical device
340 *
341 * Return: 1 - enable, 0 - disable
342 */
343int ol_cfg_is_rx_thread_enabled(ol_pdev_handle pdev)
344{
345 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
346 return cfg->enable_rxthread;
347}
348
349#ifdef QCA_LL_TX_FLOW_CONTROL_V2
350/**
351 * ol_cfg_get_tx_flow_stop_queue_th() - return stop queue threshold
352 * @pdev : handle to the physical device
353 *
354 * Return: stop queue threshold
355 */
356int ol_cfg_get_tx_flow_stop_queue_th(ol_pdev_handle pdev)
357{
358 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
359 return cfg->tx_flow_stop_queue_th;
360}
361
362/**
363 * ol_cfg_get_tx_flow_start_queue_offset() - return start queue offset
364 * @pdev : handle to the physical device
365 *
366 * Return: start queue offset
367 */
368int ol_cfg_get_tx_flow_start_queue_offset(ol_pdev_handle pdev)
369{
370 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
371 return cfg->tx_flow_start_queue_offset;
372}
Siddarth Poddarb2011f62016-04-27 20:45:42 +0530373
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800374#endif
375
376#ifdef IPA_OFFLOAD
377unsigned int ol_cfg_ipa_uc_offload_enabled(ol_pdev_handle pdev)
378{
379 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
380 return (unsigned int)cfg->ipa_uc_rsc.uc_offload_enabled;
381}
382
383unsigned int ol_cfg_ipa_uc_tx_buf_size(ol_pdev_handle pdev)
384{
385 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
386 return cfg->ipa_uc_rsc.tx_buf_size;
387}
388
389unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(ol_pdev_handle pdev)
390{
391 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
392 return cfg->ipa_uc_rsc.tx_max_buf_cnt;
393}
394
395unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev)
396{
397 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
398 return cfg->ipa_uc_rsc.rx_ind_ring_size;
399}
400
401unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev)
402{
403 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
404 return cfg->ipa_uc_rsc.tx_partition_base;
405}
Yun Park4d968df2016-10-11 11:44:15 -0700406
407void ol_cfg_set_ipa_uc_tx_partition_base(ol_pdev_handle pdev, uint32_t val)
408{
409 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
410 cfg->ipa_uc_rsc.tx_partition_base = val;
411}
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800412#endif /* IPA_OFFLOAD */
413
414/**
415 * ol_cfg_is_ce_classify_enabled() - Return if CE classification is enabled
416 * or disabled
417 * @pdev : handle to the physical device
418 *
419 * Return: 1 - enabled, 0 - disabled
420 */
421bool ol_cfg_is_ce_classify_enabled(ol_pdev_handle pdev)
422{
423 struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
424 return cfg->ce_classify_enabled;
425}