blob: 1dbd7b7139e461dde132b9609c70945486294a22 [file] [log] [blame]
jitiphil869b9f72018-09-25 17:14:01 +05301/*
hangtian2b9856f2019-01-25 11:50:39 +08002 * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
jitiphil869b9f72018-09-25 17:14:01 +05303 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19/**
20 * DOC: This file contains centralized definitions of converged configuration.
21 */
22
23#ifndef __HDD_DP_CONFIG_H
24#define __HDD_DP_CONFIG_H
25
26#define CFG_ENABLE_RX_THREAD BIT(0)
27#define CFG_ENABLE_RPS BIT(1)
28#define CFG_ENABLE_NAPI BIT(2)
29#define CFG_ENABLE_DYNAMIC_RPS BIT(3)
30#define CFG_ENABLE_DP_RX_THREADS BIT(4)
31#define CFG_RX_MODE_MAX (CFG_ENABLE_RX_THREAD | \
32 CFG_ENABLE_RPS | \
33 CFG_ENABLE_NAPI | \
34 CFG_ENABLE_DYNAMIC_RPS | \
35 CFG_ENABLE_DP_RX_THREADS)
36#ifdef MDM_PLATFORM
37#define CFG_RX_MODE_DEFAULT 0
38#elif defined(HELIUMPLUS)
39#define CFG_RX_MODE_DEFAULT CFG_ENABLE_NAPI
40#elif defined(QCA_WIFI_QCA6290_11AX)
41#define CFG_RX_MODE_DEFAULT (CFG_ENABLE_DP_RX_THREADS | CFG_ENABLE_NAPI)
42#else
43#define CFG_RX_MODE_DEFAULT (CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI)
44#endif
45
46#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
47
48/*
49 * <ini>
50 * TxFlowLowWaterMark - Low watermark for pausing network queues
51 *
52 * @Min: 0
53 * @Max: 1000
54 * @Default: 300
55 *
56 * This ini specifies the low watermark of data packets transmitted
57 * before pausing netif queues in tx flow path. It is only applicable
58 * where legacy flow control is used i.e.for Rome.
59 *
60 * Related: TxFlowHighWaterMarkOffset, TxFlowMaxQueueDepth,
61 * TxLbwFlowLowWaterMark, TxLbwFlowHighWaterMarkOffset,
62 * TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
63 * TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
64 *
65 * Supported Feature: Dynamic Flow Control
66 *
67 * Usage: Internal
68 *
69 * </ini>
70 */
71#define CFG_DP_LL_TX_FLOW_LWM \
72 CFG_INI_UINT( \
73 "TxFlowLowWaterMark", \
74 0, \
75 1000, \
76 300, \
77 CFG_VALUE_OR_DEFAULT, \
78 "Low watermark for pausing network queues")
79
80/*
81 * <ini>
82 * TxFlowHighWaterMarkOffset - High Watermark offset to unpause Netif queues
83 * @Min: 0
84 * @Max: 300
85 * @Default: 94
86 *
87 * This ini specifies the offset to upause the netif queues
88 * when they are paused due to insufficient descriptors as guided by
89 * ini TxFlowLowWaterMark. It is only applicable where legacy flow control
90 * is used i.e.for Rome.
91 *
92 * Related: TxFlowLowWaterMark, TxFlowMaxQueueDepth,
93 * TxLbwFlowLowWaterMark, TxLbwFlowHighWaterMarkOffset,
94 * TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
95 * TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
96 *
97 * Supported Feature: Dynamic Flow Control
98 *
99 * Usage: Internal
100 *
101 * </ini>
102 */
103#define CFG_DP_LL_TX_FLOW_HWM_OFFSET \
104 CFG_INI_UINT( \
105 "TxFlowHighWaterMarkOffset", \
106 0, \
107 300, \
108 94, \
109 CFG_VALUE_OR_DEFAULT, \
110 "High Watermark offset to unpause Netif queues")
111
112/*
113 * <ini>
114 * TxFlowMaxQueueDepth - Max pause queue depth.
115 *
116 * @Min: 400
117 * @Max: 3500
118 * @Default: 1500
119 *
120 * This ini specifies the max queue pause depth.It is only applicable
121 * where legacy flow control is used i.e.for Rome.
122 *
123 * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
124 * TxLbwFlowLowWaterMark, TxLbwFlowHighWaterMarkOffset,
125 * TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
126 * TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
127 *
128 * Supported Feature: Dynamic Flow Control
129 *
130 * Usage: Internal
131 *
132 * </ini>
133 */
134#define CFG_DP_LL_TX_FLOW_MAX_Q_DEPTH \
135 CFG_INI_UINT( \
136 "TxFlowMaxQueueDepth", \
137 400, \
138 3500, \
139 1500, \
140 CFG_VALUE_OR_DEFAULT, \
141 "Max pause queue depth")
142
143/*
144 * <ini>
145 * TxLbwFlowLowWaterMark - Low watermark for pausing network queues
146 * in low bandwidth band
147 * @Min: 0
148 * @Max: 1000
149 * @Default: 450
150 *
151 * This ini specifies the low watermark of data packets transmitted
152 * before pausing netif queues in tx flow path in low bandwidth band.
153 * It is only applicable where legacy flow control is used i.e.for Rome.
154 *
155 * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
156 * TxFlowMaxQueueDepth, TxLbwFlowHighWaterMarkOffset,
157 * TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
158 * TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
159 *
160 * Supported Feature: Dynamic Flow Control
161 *
162 * Usage: Internal
163 *
164 * </ini>
165 */
166#define CFG_DP_LL_TX_LBW_FLOW_LWM \
167 CFG_INI_UINT( \
168 "TxLbwFlowLowWaterMark", \
169 0, \
170 1000, \
171 450, \
172 CFG_VALUE_OR_DEFAULT, \
173 "Low watermark for pausing network queues")
174
175/*
176 * <ini>
177 * TxLbwFlowHighWaterMarkOffset - High Watermark offset to unpause Netif queues
178 * in low bandwidth band.
179 * @Min: 0
180 * @Max: 300
181 * @Default: 50
182 *
183 * This ini specifies the offset to upause the netif queues
184 * when they are paused due to insufficient descriptors as guided by
185 * ini TxLbwFlowLowWaterMark in low bandwidth band. It is only applicable
186 * where legacy flow control is used i.e.for Rome.
187 *
188 * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
189 * TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
190 * TxLbwFlowMaxQueueDepth, TxHbwFlowLowWaterMark,
191 * TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
192 *
193 * Supported Feature: Dynamic Flow Control
194 *
195 * Usage: Internal
196 *
197 * </ini>
198 */
199#define CFG_DP_LL_TX_LBW_FLOW_HWM_OFFSET \
200 CFG_INI_UINT( \
201 "TxLbwFlowHighWaterMarkOffset", \
202 0, \
203 300, \
204 50, \
205 CFG_VALUE_OR_DEFAULT, \
206 "High Watermark offset to unpause Netif queues")
207
208/*
209 * <ini>
210 * TxLbwFlowMaxQueueDepth - Max pause queue depth in low bandwidth band
211 *
212 * @Min: 400
213 * @Max: 3500
214 * @Default: 750
215 *
216 * This ini specifies the max queue pause depth in low bandwidth band.
217 * It is only applicable where legacy flow control is used i.e.for Rome.
218 *
219 * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
220 * TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
221 * TxLbwFlowHighWaterMarkOffset, TxHbwFlowLowWaterMark,
222 * TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
223 *
224 * Supported Feature: Dynamic Flow Control
225 *
226 * Usage: Internal
227 *
228 * </ini>
229 */
230#define CFG_DP_LL_TX_LBW_FLOW_MAX_Q_DEPTH \
231 CFG_INI_UINT( \
232 "TxLbwFlowMaxQueueDepth", \
233 400, \
234 3500, \
235 750, \
236 CFG_VALUE_OR_DEFAULT, \
237 "Max pause queue depth in low bandwidth band")
238
239/*
240 * <ini>
241 * TxHbwFlowLowWaterMark - Low watermark for pausing network queues
242 * in high bandwidth band
243 * @Min: 0
244 * @Max: 1000
245 * @Default: 406
246 *
247 * This ini specifies the threshold of data packets transmitted
248 * before pausing netif queues.It is only applicable where
249 * legacy flow control is used i.e.for Rome.
250 *
251 * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
252 * TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
253 * TxLbwFlowHighWaterMarkOffset, TxLbwFlowMaxQueueDepth,
254 * TxHbwFlowHighWaterMarkOffset, TxHbwFlowMaxQueueDepth
255 *
256 * Supported Feature: Dynamic Flow Control
257 *
258 * Usage: Internal
259 *
260 * </ini>
261 */
262#define CFG_DP_LL_TX_HBW_FLOW_LWM \
263 CFG_INI_UINT( \
264 "TxHbwFlowLowWaterMark", \
265 0, \
266 1000, \
267 406, \
268 CFG_VALUE_OR_DEFAULT, \
269 "Low watermark for pausing network queues")
270
271/*
272 * <ini>
273 * TxHbwFlowHighWaterMarkOffset - High Watermark offset to unpause Netif queues
274 * in high bandwidth band.
275 * @Min: 0
276 * @Max: 300
277 * @Default: 94
278 *
279 * This ini specifies the offset to upause the netif queues
280 * when they are paused due to insufficient descriptors as guided by
281 * ini TxHbwFlowLowWaterMark in high bandwidth band. It is only applicable
282 * where legacy flow control is used i.e.for Rome.
283 *
284 * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
285 * TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
286 * TxLbwFlowHighWaterMarkOffset, TxLbwFlowMaxQueueDepth,
287 * TxHbwFlowLowWaterMark, TxHbwFlowMaxQueueDepth
288 *
289 * Supported Feature: Dynamic Flow Control
290 *
291 * Usage: Internal
292 *
293 * </ini>
294 */
295#define CFG_DP_LL_TX_HBW_FLOW_HWM_OFFSET \
296 CFG_INI_UINT( \
297 "TxHbwFlowHighWaterMarkOffset", \
298 0, \
299 300, \
300 94, \
301 CFG_VALUE_OR_DEFAULT, \
302 "High Watermark offset to unpause Netif queues")
303
304/*
305 * <ini>
306 * TxHbwFlowMaxQueueDepth - Max pause queue depth in high bandwidth band
307 * @Min: 4000
308 * @Max: 3500
309 * @Default: 1500
310 *
311 * This ini specifies the max queue pause depth in high bandwidth band.
312 * It is only applicable where legacy flow control is used i.e.for Rome.
313 *
314 * Related: TxFlowLowWaterMark, TxFlowHighWaterMarkOffset,
315 * TxFlowMaxQueueDepth, TxLbwFlowLowWaterMark,
316 * TxLbwFlowHighWaterMarkOffset, TxLbwFlowMaxQueueDepth,
317 * TxHbwFlowLowWaterMark, TxHbwFlowHighWaterMarkOffset
318 *
319 * Supported Feature: Dynamic Flow Control
320 *
321 * Usage: Internal
322 *
323 * </ini>
324 */
325#define CFG_DP_LL_TX_HBW_FLOW_MAX_Q_DEPTH \
326 CFG_INI_UINT( \
327 "TxHbwFlowMaxQueueDepth", \
328 400, \
329 3500, \
330 1500, \
331 CFG_VALUE_OR_DEFAULT, \
332 "Max pause queue depth in high bandwidth band")
333
334#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
335#ifdef MSM_PLATFORM
336/*
337 * <ini>
338 * gBusBandwidthHighThreshold - bus bandwidth high threshold
339 *
340 * @Min: 0
341 * @Max: 4294967295UL
342 * @Default: 2000
343 *
344 * This ini specifies thebus bandwidth high threshold
345 *
346 * Usage: Internal
347 *
348 * </ini>
349 */
350#define CFG_DP_BUS_BANDWIDTH_HIGH_THRESHOLD \
351 CFG_INI_UINT( \
352 "gBusBandwidthHighThreshold", \
353 0, \
354 4294967295UL, \
355 2000, \
356 CFG_VALUE_OR_DEFAULT, \
357 "Bus bandwidth high threshold")
358
359/*
360 * <ini>
361 * gBusBandwidthMediumThreshold - bus bandwidth medium threshold
362 *
363 * @Min: 0
364 * @Max: 4294967295UL
365 * @Default: 500
366 *
367 * This ini specifies thebus bandwidth medium threshold
368 *
369 * Usage: Internal
370 *
371 * </ini>
372 */
373#define CFG_DP_BUS_BANDWIDTH_MEDIUM_THRESHOLD \
374 CFG_INI_UINT( \
375 "gBusBandwidthMediumThreshold", \
376 0, \
377 4294967295UL, \
378 500, \
379 CFG_VALUE_OR_DEFAULT, \
380 "Bus bandwidth medium threshold")
381
382/*
383 * <ini>
384 * gBusBandwidthLowThreshold - bus bandwidth low threshold
385 *
386 * @Min: 0
387 * @Max: 4294967295UL
388 * @Default: 150
389 *
390 * This ini specifies thebus bandwidth low threshold
391 *
392 * Usage: Internal
393 *
394 * </ini>
395 */
396#define CFG_DP_BUS_BANDWIDTH_LOW_THRESHOLD \
397 CFG_INI_UINT( \
398 "gBusBandwidthLowThreshold", \
399 0, \
400 4294967295UL, \
401 150, \
402 CFG_VALUE_OR_DEFAULT, \
403 "Bus bandwidth low threshold")
404
405/*
406 * <ini>
407 * gBusBandwidthComputeInterval - bus bandwidth compute interval
408 *
409 * @Min: 0
410 * @Max: 10000
411 * @Default: 100
412 *
413 * This ini specifies thebus bandwidth compute interval
414 *
415 * Usage: Internal
416 *
417 * </ini>
418 */
419#define CFG_DP_BUS_BANDWIDTH_COMPUTE_INTERVAL \
420 CFG_INI_UINT( \
421 "gBusBandwidthComputeInterval", \
422 0, \
423 10000, \
424 100, \
425 CFG_VALUE_OR_DEFAULT, \
426 "Bus bandwidth compute interval")
427
428/*
429 * <ini>
wadesongcc2e2162018-11-12 14:42:11 +0800430 * gTcpLimitOutputEnable - Control to enable TCP limit output byte
jitiphil869b9f72018-09-25 17:14:01 +0530431 * @Default: true
432 *
433 * This ini is used to enable dynamic configuration of TCP limit output bytes
434 * tcp_limit_output_bytes param. Enabling this will let driver post message to
435 * cnss-daemon, accordingly cnss-daemon will modify the tcp_limit_output_bytes.
436 *
437 * Supported Feature: Tcp limit output bytes
438 *
439 * Usage: Internal
440 *
441 * </ini>
442 */
443#define CFG_DP_ENABLE_TCP_LIMIT_OUTPUT \
444 CFG_INI_BOOL( \
445 "gTcpLimitOutputEnable", \
446 true, \
447 "Control to enable TCP limit output byte")
448
449/*
450 * <ini>
451 * gTcpAdvWinScaleEnable - Control to enable TCP adv window scaling
452 * @Default: true
453 *
454 * This ini is used to enable dynamic configuration of TCP adv window scaling
455 * system parameter.
456 *
457 * Supported Feature: Tcp Advance Window Scaling
458 *
459 * Usage: Internal
460 *
461 * </ini>
462 */
463#define CFG_DP_ENABLE_TCP_ADV_WIN_SCALE \
464 CFG_INI_BOOL( \
465 "gTcpAdvWinScaleEnable", \
466 true, \
467 "Control to enable TCP adv window scaling")
468
469/*
470 * <ini>
471 * gTcpDelAckEnable - Control to enable Dynamic Configuration of Tcp Delayed Ack
472 * @Default: true
473 *
474 * This ini is used to enable Dynamic Configuration of Tcp Delayed Ack
475 *
476 * Related: gTcpDelAckThresholdHigh, gTcpDelAckThresholdLow,
477 * gTcpDelAckTimerCount
478 *
479 * Supported Feature: Tcp Delayed Ack
480 *
481 * Usage: Internal
482 *
483 * </ini>
484 */
485#define CFG_DP_ENABLE_TCP_DELACK \
486 CFG_INI_BOOL( \
487 "gTcpDelAckEnable", \
488 true, \
489 "Control to enable Dynamic Config of Tcp Delayed Ack")
490
491/*
492 * <ini>
493 * gTcpDelAckThresholdHigh - High Threshold inorder to trigger TCP Del Ack
494 * indication
495 * @Min: 0
496 * @Max: 16000
497 * @Default: 500
498 *
499 * This ini is used to mention the High Threshold inorder to trigger TCP Del Ack
500 * indication i.e the threshold of packets received over a period of 100 ms.
501 * i.e to have a low RX throughput requirement
502 * Related: gTcpDelAckEnable, gTcpDelAckThresholdLow, gTcpDelAckTimerCount
503 *
504 * Supported Feature: Tcp Delayed Ack
505 *
506 * Usage: Internal
507 *
508 * </ini>
509 */
510#define CFG_DP_TCP_DELACK_THRESHOLD_HIGH \
511 CFG_INI_UINT( \
512 "gTcpDelAckThresholdHigh", \
513 0, \
514 16000, \
515 500, \
516 CFG_VALUE_OR_DEFAULT, \
517 "High Threshold inorder to trigger TCP Del Ack")
518
519/*
520 * <ini>
521 * gTcpDelAckThresholdLow - Low Threshold inorder to trigger TCP Del Ack
522 * indication
523 * @Min: 0
524 * @Max: 10000
525 * @Default: 1000
526 *
527 * This ini is used to mention the Low Threshold inorder to trigger TCP Del Ack
528 * indication i.e the threshold of packets received over a period of 100 ms.
529 * i.e to have a low RX throughput requirement
530 *
531 * Related: gTcpDelAckEnable, gTcpDelAckThresholdHigh, gTcpDelAckTimerCount
532 *
533 * Supported Feature: Tcp Delayed Ack
534 *
535 * Usage: Internal
536 *
537 * </ini>
538 */
539#define CFG_DP_TCP_DELACK_THRESHOLD_LOW \
540 CFG_INI_UINT( \
541 "gTcpDelAckThresholdLow", \
542 0, \
543 10000, \
544 1000, \
545 CFG_VALUE_OR_DEFAULT, \
546 "Low Threshold inorder to trigger TCP Del Ack")
547
548/*
549 * <ini>
550 * gTcpDelAckTimerCount - Del Ack Timer Count inorder to trigger TCP Del Ack
551 * indication
552 * @Min: 1
553 * @Max: 1000
554 * @Default: 30
555 *
556 * This ini is used to mention the Del Ack Timer Count inorder to
557 * trigger TCP Del Ack indication i.e number of 100 ms periods
558 *
559 * Related: gTcpDelAckEnable, gTcpDelAckThresholdHigh, gTcpDelAckThresholdLow
560 *
561 * Supported Feature: Tcp Delayed Ack
562 *
563 * Usage: Internal
564 *
565 * </ini>
566 */
567#define CFG_DP_TCP_DELACK_TIMER_COUNT \
568 CFG_INI_UINT( \
569 "gTcpDelAckTimerCount", \
570 1, \
571 1000, \
572 30, \
573 CFG_VALUE_OR_DEFAULT, \
574 "Del Ack Timer Count inorder to trigger TCP Del Ack")
575
576/*
577 * <ini>
578 * gTcpTxHighTputThreshold - High Threshold inorder to trigger High
579 * Tx Throughput requirement.
580 * @Min: 0
581 * @Max: 16000
582 * @Default: 500
583 *
584 * This ini specifies the threshold of packets transmitted
585 * over a period of 100 ms beyond which TCP can be considered to have a high
586 * TX throughput requirement. The driver uses this condition to tweak TCP TX
587 * specific parameters (via cnss-daemon)
588 *
589 * Supported Feature: To tweak TCP TX n/w parameters
590 *
591 * Usage: Internal
592 *
593 * </ini>
594 */
595#define CFG_DP_TCP_TX_HIGH_TPUT_THRESHOLD \
596 CFG_INI_UINT( \
597 "gTcpTxHighTputThreshold", \
598 0, \
599 16000, \
600 500, \
601 CFG_VALUE_OR_DEFAULT, \
602 "High Threshold inorder to trigger High Tx Tp")
603
604#endif /* MSM_PLATFORM */
605
606/*
607 * <ini>
608 * NAPI_CPU_AFFINITY_MASK - CPU mask to affine NAPIs
609 *
610 * @Min: 0
611 * @Max: 0xFF
612 * @Default: 0
613 *
614 * This ini is used to set NAPI IRQ CPU affinity
615 *
616 * Supported Feature: NAPI
617 *
618 * Usage: Internal
619 *
620 * </ini>
621 */
622#define CFG_DP_NAPI_CE_CPU_MASK \
623 CFG_INI_UINT( \
624 "NAPI_CPU_AFFINITY_MASK", \
625 0, \
626 0xFF, \
627 0, \
628 CFG_VALUE_OR_DEFAULT, \
629 "CPU mask to affine NAPIs")
630
631/*
632 * <ini>
633 * RX_THREAD_CPU_AFFINITY_MASK - CPU mask to affine Rx_thread
634 *
635 * @Default: e
636 *
637 * This ini is used to set Rx_thread CPU affinity
638 *
639 * Supported Feature: Rx_thread
640 *
641 * Usage: Internal
642 *
643 * </ini>
644 */
hangtian2b9856f2019-01-25 11:50:39 +0800645#ifdef RX_PERFORMANCE
646#define CFG_DP_RX_THREAD_CPU_MASK \
647 CFG_INI_UINT( \
648 "RX_THREAD_CPU_AFFINITY_MASK", \
649 0, \
650 0xFF, \
651 0x02, \
652 CFG_VALUE_OR_DEFAULT, \
653 "CPU mask to affine Rx_thread")
654#else
jitiphil869b9f72018-09-25 17:14:01 +0530655#define CFG_DP_RX_THREAD_CPU_MASK \
656 CFG_INI_UINT( \
657 "RX_THREAD_CPU_AFFINITY_MASK", \
658 0, \
659 0xFF, \
660 0, \
661 CFG_VALUE_OR_DEFAULT, \
662 "CPU mask to affine Rx_thread")
hangtian2b9856f2019-01-25 11:50:39 +0800663#endif
jitiphil869b9f72018-09-25 17:14:01 +0530664
665/*
666 * <ini>
667 * RX_THREAD_CPU_AFFINITY_MASK - CPU mask to affine Rx_thread
668 *
669 * @Min: 0
670 * @Max: 0xFF
671 * @Default: 0
672 *
673 * This ini is used to set Rx_thread CPU affinity
674 *
675 * List of RPS CPU maps for different rx queues registered by WLAN driver
676 * Ref - Kernel/Documentation/networking/scaling.txt
677 * RPS CPU map for a particular RX queue, selects CPU(s) for bottom half
678 * processing of RX packets. For example, for a system with 4 CPUs,
679 * 0xe: Use CPU1 - CPU3 and donot use CPU0.
680 * 0x0: RPS is disabled, packets are processed on the interrupting CPU.
681.*
682 * WLAN driver registers NUM_TX_QUEUES queues for tx and rx each during
683 * alloc_netdev_mq. Hence, we need to have a cpu mask for each of the rx queues.
684 *
685 * For example, if the NUM_TX_QUEUES is 4, a sample WLAN ini entry may look like
686 * rpsRxQueueCpuMapList=a b c d
687 * For a 4 CPU system (CPU0 - CPU3), this implies:
688 * 0xa - (1010) use CPU1, CPU3 for rx queue 0
689 * 0xb - (1011) use CPU0, CPU1 and CPU3 for rx queue 1
690 * 0xc - (1100) use CPU2, CPU3 for rx queue 2
691 * 0xd - (1101) use CPU0, CPU2 and CPU3 for rx queue 3
692
693 * In practice, we may want to avoid the cores which are heavily loaded.
694 *
695 * Default value of rpsRxQueueCpuMapList. Different platforms may have
696 * different configurations for NUM_TX_QUEUES and # of cpus, and will need to
697 * configure an appropriate value via ini file. Setting default value to 'e' to
698 * avoid use of CPU0 (since its heavily used by other system processes) by rx
699 * queue 0, which is currently being used for rx packet processing.
700 *
701 * Maximum length of string used to hold a list of cpu maps for various rx
702 * queues. Considering a 16 core system with 5 rx queues, a RPS CPU map
703 * list may look like -
704 * rpsRxQueueCpuMapList = ffff ffff ffff ffff ffff
705 * (all 5 rx queues can be processed on all 16 cores)
706 * max string len = 24 + 1(for '\0'). Considering 30 to be on safe side.
707 *
708 * Supported Feature: Rx_thread
709 *
710 * Usage: Internal
711 * </ini>
712 */
713#define CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST \
714 CFG_INI_STRING( \
715 "rpsRxQueueCpuMapList", \
716 1, \
717 30, \
718 "e", \
719 "specify RPS map for different RX queus")
720
721/*
722 * <ini>
723 * gEnableTxOrphan- Enable/Disable orphaning of Tx packets
724 * @Default: false
725 *
726 * This ini is used to enable/disable orphaning of Tx packets.
727 *
728 * Related: None
729 *
730 * Usage: External
731 *
732 * </ini>
733 */
734#define CFG_DP_TX_ORPHAN_ENABLE \
735 CFG_INI_BOOL( \
736 "gEnableTxOrphan", \
737 false, \
738 "orphaning of Tx packets")
739
740/*
741 * <ini>
742 * rx_mode - Control to decide rx mode for packet procesing
743 *
744 * @Min: 0
745 * @Max: (CFG_ENABLE_RX_THREAD | CFG_ENABLE_RPS | CFG_ENABLE_NAPI | \
746 * CFG_ENABLE_DYNAMIC_RPS)
747 *
748 * Some possible configurations:
749 * rx_mode=0 - Uses tasklets for bottom half
750 * CFG_ENABLE_NAPI (rx_mode=4) - Uses NAPI for bottom half
751 * CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI (rx_mode=5) - NAPI for bottom half,
752 * rx_thread for stack. Single threaded.
753 * CFG_ENABLE_DP_RX_THREAD | CFG_ENABLE_NAPI (rx_mode=10) - NAPI for bottom
754 * half, dp_rx_thread for stack processing. Supports multiple rx threads.
755 *
756 * Usage: Internal
757 *
758 * </ini>
759 */
760#define CFG_DP_RX_MODE \
761 CFG_INI_UINT("rx_mode", \
762 0, CFG_RX_MODE_MAX, CFG_RX_MODE_DEFAULT, \
763 CFG_VALUE_OR_DEFAULT, \
764 "Control to decide rx mode for packet procesing")
765
jitiphil296c23e2018-11-15 16:26:14 +0530766/*
767 * <ini>
768 *
769 * In a typical infrastructure setup, it is quite normal to receive
770 * replayed multicast packets. These packets may cause more harm than
771 * help if not handled properly. Providing a configuration option
772 * to enable filtering of such packets
773 *
774 * </ini>
775 */
776#define CFG_DP_FILTER_MULTICAST_REPLAY \
777 CFG_INI_BOOL("enable_multicast_replay_filter", \
778 true, "Enable filtering of replayed multicast packets")
779
780/*
781 * <ini>
782 * rx_wakelock_timeout - Amount of time to hold wakelock for RX unicast packets
783 * @Min: 0
784 * @Max: 100
785 * @Default: 50
786 *
787 * This ini item configures the amount of time, in milliseconds, that the driver
788 * should prevent system power collapse after receiving an RX unicast packet.
789 * A conigured value of 0 disables the RX Wakelock feature completely.
790 *
791 * Related: None.
792 *
793 * Supported Feature: RX Wakelock
794 *
795 * Usage: Internal/External
796 *
797 * </ini>
798 */
799#define CFG_DP_RX_WAKELOCK_TIMEOUT \
800 CFG_INI_UINT("rx_wakelock_timeout", \
801 0, 100, 50, CFG_VALUE_OR_DEFAULT, \
802 "Amount of time to hold wakelock for RX unicast packets")
803
804/*
805 * <ini>
806 * num_dp_rx_threads - Control to set the number of dp rx threads
807 *
808 * @Min: 1
809 * @Max: 4
810 * @Default: 1
811 *
812 * Usage: Internal
813 *
814 * </ini>
815 */
816#define CFG_DP_NUM_DP_RX_THREADS \
817 CFG_INI_UINT("num_dp_rx_threads", \
818 1, 4, 1, CFG_VALUE_OR_DEFAULT, \
819 "Control to set the number of dp rx threads")
820
jitiphil377bcc12018-10-05 19:46:08 +0530821#define CFG_DP_CE_SERVICE_MAX_RX_IND_FLUSH \
822 CFG_INI_UINT("ce_service_max_rx_ind_flush", \
823 1, 32, 32, \
824 CFG_VALUE_OR_DEFAULT, "Ctrl to set ce service max rx ind flsh")
825
826#define CFG_DP_CE_SERVICE_MAX_YIELD_TIME \
827 CFG_INI_UINT("ce_service_max_yield_time", \
828 500, 10000, 10000, \
829 CFG_VALUE_OR_DEFAULT, "Ctrl to set ce service max yield time")
830
831#ifdef WLAN_FEATURE_FASTPATH
832#define CFG_DP_ENABLE_FASTPATH \
833 CFG_INI_BOOL("gEnableFastPath", \
834 false, "Ctrl to enable fastpath feature")
835
836#define CFG_DP_ENABLE_FASTPATH_ALL \
837 CFG(CFG_DP_ENABLE_FASTPATH)
838#else
839#define CFG_DP_ENABLE_FASTPATH_ALL
840#endif
841
Alok Kumar2fad6442018-11-08 19:19:28 +0530842#define CFG_DP_ENABLE_TCP_PARAM_UPDATE \
843 CFG_INI_BOOL("enable_tcp_param_update", \
844 false, "configure TCP param through Wi-Fi HAL")
jitiphilb03ae082018-11-09 17:41:59 +0530845/*
846 * <ini>
847 *
848 * Enable/disable DPTRACE
849 * Enabling this might have performace impact.
850 *
851 * Config DPTRACE
852 * The sequence of params is important. If some param is missing, defaults are
853 * considered.
854 * Param 1: Enable/Disable DP Trace live mode (uint8_t)
855 * Param 2: DP Trace live mode high bandwidth thresh.(uint8_t)
856 * (packets/second) beyond which DP Trace is disabled. Decimal Val.
857 * MGMT, DHCP, EAPOL, ARP pkts are not counted. ICMP and Data are.
858 * Param 3: Default Verbosity (0-4)
859 * Param 4: Proto Bitmap (uint8_t). Decimal Value.
860 * (decimal 62 = 0x3e)
861 * e.g., to disable live mode, use the following param in the ini file.
862 * gDptraceConfig = 0
863 * e.g., to enable dptrace live mode and set the thresh as 6,
864 * use the following param in the ini file.
865 * gDptraceConfig = 1, 6
866 *
867 * </ini>
868 */
869#ifdef CONFIG_DP_TRACE
870#define CFG_DP_ENABLE_DP_TRACE \
871 CFG_INI_BOOL("enable_dp_trace", \
872 true, "Ctrl to enable dp trace feature")
873
874#define CFG_DP_DP_TRACE_CONFIG \
875 CFG_INI_STRING( \
876 "gDptraceConfig", \
877 1, \
878 20, \
879 "1, 6, 2, 126", \
880 "dp trace configuration string")
881#define CFG_DP_CONFIG_DP_TRACE_ALL \
882 CFG(CFG_DP_ENABLE_DP_TRACE) \
883 CFG(CFG_DP_DP_TRACE_CONFIG)
884#else
885#define CFG_DP_CONFIG_DP_TRACE_ALL
886#endif
887
888/*
889 * <ini>
890 * gEnableNUDTracking - Will enable or disable NUD tracking within driver
891 * @Min: 0
892 * @Max: 1
893 * @Default: 1
894 *
895 * This ini is used to enable or disable NUD tracking within driver
896 *
897 * Related: None
898 *
899 * Supported Feature: STA
900 *
901 * Usage: External
902 *
903 * <ini>
904 */
905#ifdef WLAN_NUD_TRACKING
906#define CFG_DP_ENABLE_NUD_TRACKING \
907 CFG_INI_BOOL("gEnableNUDTracking", \
908 true, "Ctrl to enable nud tracking")
909
910#define CFG_DP_ENABLE_NUD_TRACKING_ALL \
911 CFG(CFG_DP_ENABLE_NUD_TRACKING)
912#else
913#define CFG_DP_ENABLE_NUD_TRACKING_ALL
914#endif
Alok Kumar2fad6442018-11-08 19:19:28 +0530915
jitiphil869b9f72018-09-25 17:14:01 +0530916#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
917#define CFG_HDD_DP_LEGACY_TX_FLOW \
918 CFG(CFG_DP_LL_TX_FLOW_LWM) \
919 CFG(CFG_DP_LL_TX_FLOW_HWM_OFFSET) \
920 CFG(CFG_DP_LL_TX_FLOW_MAX_Q_DEPTH) \
921 CFG(CFG_DP_LL_TX_LBW_FLOW_LWM) \
922 CFG(CFG_DP_LL_TX_LBW_FLOW_HWM_OFFSET) \
923 CFG(CFG_DP_LL_TX_LBW_FLOW_MAX_Q_DEPTH) \
924 CFG(CFG_DP_LL_TX_HBW_FLOW_LWM) \
925 CFG(CFG_DP_LL_TX_HBW_FLOW_HWM_OFFSET) \
926 CFG(CFG_DP_LL_TX_HBW_FLOW_MAX_Q_DEPTH)
927#else
928#define CFG_HDD_DP_LEGACY_TX_FLOW
929#endif
930
931#ifdef MSM_PLATFORM
932#define CFG_HDD_DP_MSM_PLATFORM \
933 CFG(CFG_DP_BUS_BANDWIDTH_HIGH_THRESHOLD) \
934 CFG(CFG_DP_BUS_BANDWIDTH_MEDIUM_THRESHOLD) \
935 CFG(CFG_DP_BUS_BANDWIDTH_LOW_THRESHOLD) \
936 CFG(CFG_DP_BUS_BANDWIDTH_COMPUTE_INTERVAL) \
937 CFG(CFG_DP_ENABLE_TCP_LIMIT_OUTPUT) \
938 CFG(CFG_DP_ENABLE_TCP_ADV_WIN_SCALE) \
939 CFG(CFG_DP_ENABLE_TCP_DELACK) \
940 CFG(CFG_DP_TCP_DELACK_THRESHOLD_HIGH) \
941 CFG(CFG_DP_TCP_DELACK_THRESHOLD_LOW) \
942 CFG(CFG_DP_TCP_DELACK_TIMER_COUNT) \
943 CFG(CFG_DP_TCP_TX_HIGH_TPUT_THRESHOLD)
944#else
945#define CFG_HDD_DP_MSM_PLATFORM
946#endif
947
jitiphilb03ae082018-11-09 17:41:59 +0530948#define CFG_HDD_DP_ALL \
jitiphil869b9f72018-09-25 17:14:01 +0530949 CFG(CFG_DP_NAPI_CE_CPU_MASK) \
950 CFG(CFG_DP_RX_THREAD_CPU_MASK) \
951 CFG(CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST) \
952 CFG(CFG_DP_TX_ORPHAN_ENABLE) \
jitiphil377bcc12018-10-05 19:46:08 +0530953 CFG(CFG_DP_RX_MODE) \
954 CFG(CFG_DP_CE_SERVICE_MAX_RX_IND_FLUSH) \
955 CFG(CFG_DP_CE_SERVICE_MAX_YIELD_TIME) \
Alok Kumar2fad6442018-11-08 19:19:28 +0530956 CFG(CFG_DP_ENABLE_TCP_PARAM_UPDATE) \
jitiphil296c23e2018-11-15 16:26:14 +0530957 CFG(CFG_DP_FILTER_MULTICAST_REPLAY) \
958 CFG(CFG_DP_RX_WAKELOCK_TIMEOUT) \
959 CFG(CFG_DP_NUM_DP_RX_THREADS) \
jitiphilb03ae082018-11-09 17:41:59 +0530960 CFG_DP_ENABLE_FASTPATH_ALL \
jitiphil869b9f72018-09-25 17:14:01 +0530961 CFG_HDD_DP_MSM_PLATFORM \
jitiphilb03ae082018-11-09 17:41:59 +0530962 CFG_HDD_DP_LEGACY_TX_FLOW \
963 CFG_DP_ENABLE_NUD_TRACKING_ALL \
964 CFG_DP_CONFIG_DP_TRACE_ALL
jitiphil869b9f72018-09-25 17:14:01 +0530965#endif