blob: a395d9f926f668ed3067afa6e3b3ac33a9295233 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
Jeff Johnsonc1b9a912017-01-12 09:45:59 -08002 * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08003 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for
7 * any purpose with or without fee is hereby granted, provided that the
8 * above copyright notice and this permission notice appear in all
9 * copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
12 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
13 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
14 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
15 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
16 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
17 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
18 * PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * This file was originally distributed by Qualcomm Atheros, Inc.
23 * under proprietary terms before Copyright ownership was assigned
24 * to the Linux Foundation.
25 */
26
27/**
28 * DOC: wlan_hdd_lro.c
29 *
30 * WLAN HDD LRO interface implementation
31 */
32
33#include <wlan_hdd_includes.h>
Anurag Chouhan6d760662016-02-20 16:05:43 +053034#include <qdf_types.h>
Dhanashri Atre170855e2016-12-15 13:26:54 -080035#include <qdf_lro.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080036#include <wlan_hdd_lro.h>
37#include <wlan_hdd_napi.h>
38#include <wma_api.h>
Dhanashri Atreb08959a2016-03-01 17:28:03 -080039#include <cdp_txrx_lro.h>
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080040
41#include <linux/inet_lro.h>
42#include <linux/list.h>
43#include <linux/random.h>
44#include <net/tcp.h>
45
Prakash Dhavali7090c5f2015-11-02 17:55:19 -080046#define LRO_VALID_FIELDS \
47 (LRO_DESC | LRO_ELIGIBILITY_CHECKED | LRO_TCP_ACK_NUM | \
48 LRO_TCP_DATA_CSUM | LRO_TCP_SEQ_NUM | LRO_TCP_WIN)
49
Dhanashri Atre170855e2016-12-15 13:26:54 -080050#if defined(QCA_WIFI_NAPIER_EMULATION)
51/**
52 * hdd_lro_init() - initialization for LRO
53 * @hdd_ctx: HDD context
54 *
55 * This function sends the LRO configuration to the firmware
56 * via WMA
57 * Make sure that this function gets called after NAPI
58 * instances have been created.
59 *
60 * Return: 0 - success, < 0 - failure
61 */
62int hdd_lro_init(hdd_context_t *hdd_ctx)
63{
64 return 0;
65}
66
67/**
68 * hdd_lro_enable() - enable LRO
69 * @hdd_ctx: HDD context
70 * @adapter: HDD adapter
71 *
72 * This function enables LRO in the network device attached to
73 * the HDD adapter. It also allocates the HDD LRO instance for
74 * that network device
75 *
76 * Return: 0 - success, < 0 - failure
77 */
78int hdd_lro_enable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
79{
80 return QDF_STATUS_SUCCESS;
81}
82
83/**
84 * hdd_lro_rx() - LRO receive function
85 * @hdd_ctx: HDD context
86 * @adapter: HDD adapter
87 * @skb: network buffer
88 *
89 * Delivers LRO eligible frames to the LRO manager
90 *
91 * Return: HDD_LRO_RX - frame delivered to LRO manager
92 * HDD_LRO_NO_RX - frame not delivered
93 */
94enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
95 hdd_adapter_t *adapter, struct sk_buff *skb)
96{
97 struct net_lro_mgr *lro_mgr;
98 qdf_lro_ctx_t ctx = (qdf_lro_ctx_t)QDF_NBUF_CB_RX_LRO_CTX(skb);
99 /* LRO is not supported or non-TCP packet */
100 if (!ctx)
101 return HDD_LRO_NO_RX;
102
103 lro_mgr = ctx->lro_mgr;
104
105 if (QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb)) {
106 struct net_lro_info hdd_lro_info;
107
108 hdd_lro_info.valid_fields = LRO_VALID_FIELDS;
109
110 hdd_lro_info.lro_desc = QDF_NBUF_CB_RX_LRO_DESC(skb);
111 hdd_lro_info.lro_eligible = 1;
112 hdd_lro_info.tcp_ack_num = QDF_NBUF_CB_RX_TCP_ACK_NUM(skb);
113 hdd_lro_info.tcp_data_csum =
114 csum_unfold(QDF_NBUF_CB_RX_TCP_CHKSUM(skb));
115 hdd_lro_info.tcp_seq_num = QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb);
116 hdd_lro_info.tcp_win = QDF_NBUF_CB_RX_TCP_WIN(skb);
117
118 lro_receive_skb_ext(lro_mgr, skb,
119 (void *)adapter, &hdd_lro_info);
120
121 if (!hdd_lro_info.lro_desc->active)
122 qdf_lro_flow_free(skb);
123
124 return HDD_LRO_RX;
125 } else {
126 lro_flush_desc(lro_mgr, QDF_NBUF_CB_RX_LRO_DESC(skb));
127 return HDD_LRO_NO_RX;
128 }
129}
130/**
131 * hdd_lro_disable() - disable LRO
132 * @hdd_ctx: HDD context
133 * @adapter: HDD adapter
134 *
135 * This function frees the HDD LRO instance for the network
136 * device attached to the HDD adapter
137 *
138 * Return: none
139 */
140void hdd_lro_disable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
141{
142 return;
143}
144
145#else
146
147#define LRO_MAX_AGGR_SIZE 100
148
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800149/**
150 * hdd_lro_get_skb_header() - LRO callback function
151 * @skb: network buffer
152 * @ip_hdr: contains a pointer to the IP header
153 * @tcpudp_hdr: contains a pointer to the TCP header
154 * @hdr_flags: indicates if this is a TCP, IPV4 frame
155 * @priv: private driver specific opaque pointer
156 *
157 * Get the IP and TCP headers from the skb
158 *
159 * Return: 0 - success, < 0 - failure
160 */
161static int hdd_lro_get_skb_header(struct sk_buff *skb, void **ip_hdr,
162 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
163{
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530164 if (QDF_NBUF_CB_RX_IPV6_PROTO(skb)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800165 hdr_flags = 0;
166 return -EINVAL;
167 }
168
169 *hdr_flags |= (LRO_IPV4 | LRO_TCP);
170 (*ip_hdr) = skb->data;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530171 (*tcpudp_hdr) = skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800172 return 0;
173}
174
175/**
176 * hdd_lro_desc_pool_init() - Initialize the free pool of LRO
177 * descriptors
178 * @lro_desc_pool: free pool of the LRO descriptors
179 * @lro_mgr: LRO manager
180 *
181 * Initialize a list that holds the free LRO descriptors
182 *
183 * Return: none
184 */
185static void hdd_lro_desc_pool_init(struct hdd_lro_desc_pool *lro_desc_pool,
186 struct net_lro_mgr *lro_mgr)
187{
188 int i;
189
190 INIT_LIST_HEAD(&lro_desc_pool->lro_free_list_head);
191
192 for (i = 0; i < LRO_DESC_POOL_SZ; i++) {
193 lro_desc_pool->lro_desc_array[i].lro_desc =
194 &lro_mgr->lro_arr[i];
195 list_add_tail(&lro_desc_pool->lro_desc_array[i].lro_node,
196 &lro_desc_pool->lro_free_list_head);
197 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800198}
199
200/**
201 * hdd_lro_desc_info_init() - Initialize the LRO descriptors
202 * @hdd_info: HDD LRO data structure
203 *
204 * Initialize the free pool of LRO descriptors and the entries
205 * of the hash table
206 *
207 * Return: none
208 */
209static void hdd_lro_desc_info_init(struct hdd_lro_s *hdd_info)
210{
211 int i;
212
213 /* Initialize pool of free LRO desc.*/
214 hdd_lro_desc_pool_init(&hdd_info->lro_desc_info.lro_desc_pool,
215 hdd_info->lro_mgr);
216
217 /* Initialize the hash table of LRO desc.*/
218 for (i = 0; i < LRO_DESC_TABLE_SZ; i++) {
219 /* initialize the flows in the hash table */
220 INIT_LIST_HEAD(&hdd_info->lro_desc_info.
221 lro_hash_table[i].lro_desc_list);
222 }
223
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800224}
225
226/**
227 * hdd_lro_tcp_flow_match() - function to check for a flow match
228 * @iph: IP header
229 * @tcph: TCP header
230 * @lro_desc: LRO decriptor
231 *
232 * Checks if the descriptor belongs to the same flow as the one
233 * indicated by the TCP and IP header.
234 *
235 * Return: true - flow match, false - flow does not match
236 */
237static inline bool hdd_lro_tcp_flow_match(struct net_lro_desc *lro_desc,
238 struct iphdr *iph,
239 struct tcphdr *tcph)
240{
241 if ((lro_desc->tcph->source != tcph->source) ||
242 (lro_desc->tcph->dest != tcph->dest) ||
243 (lro_desc->iph->saddr != iph->saddr) ||
244 (lro_desc->iph->daddr != iph->daddr))
245 return false;
246
247 return true;
248
249}
250
251/**
252 * hdd_lro_desc_find() - LRO descriptor look-up function
253 *
254 * @adapter: HDD adaptor
255 * @skb: network buffer
256 * @iph: IP header
257 * @tcph: TCP header
258 * @lro_desc: contains a pointer to the LRO decriptor
259 *
260 * Look-up the LRO descriptor in the hash table based on the
261 * flow ID toeplitz. If the flow is not found, allocates a new
262 * LRO descriptor and places it in the hash table
263 *
264 * Return: 0 - success, < 0 - failure
265 */
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700266static int hdd_lro_desc_find(struct hdd_lro_s *lro_info,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800267 struct sk_buff *skb, struct iphdr *iph, struct tcphdr *tcph,
268 struct net_lro_desc **lro_desc)
269{
270 uint32_t i;
271 struct hdd_lro_desc_table *lro_hash_table;
272 struct list_head *ptr;
273 struct hdd_lro_desc_entry *entry;
Houston Hoffman2359ca02016-09-21 16:57:28 -0700274 struct hdd_lro_desc_pool *free_pool;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700275 struct hdd_lro_desc_info *desc_info = &lro_info->lro_desc_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800276
277 *lro_desc = NULL;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530278 i = QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(skb) & LRO_DESC_TABLE_SZ_MASK;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800279
280 lro_hash_table = &desc_info->lro_hash_table[i];
281
282 if (!lro_hash_table) {
283 hdd_err("Invalid hash entry");
Anurag Chouhanb2dc16f2016-02-25 11:47:37 +0530284 QDF_ASSERT(0);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800285 return -EINVAL;
286 }
287
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800288 /* Check if this flow exists in the descriptor list */
289 list_for_each(ptr, &lro_hash_table->lro_desc_list) {
290 struct net_lro_desc *tmp_lro_desc = NULL;
Srinivas Girigowdae41bffe2017-03-24 16:34:42 -0700291
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800292 entry = list_entry(ptr, struct hdd_lro_desc_entry, lro_node);
293 tmp_lro_desc = entry->lro_desc;
294 if (tmp_lro_desc->active) {
295 if (hdd_lro_tcp_flow_match(tmp_lro_desc, iph, tcph)) {
296 *lro_desc = entry->lro_desc;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800297 return 0;
298 }
299 }
300 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800301
302 /* no existing flow found, a new LRO desc needs to be allocated */
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700303 free_pool = &lro_info->lro_desc_info.lro_desc_pool;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800304 entry = list_first_entry_or_null(
Houston Hoffman2359ca02016-09-21 16:57:28 -0700305 &free_pool->lro_free_list_head,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800306 struct hdd_lro_desc_entry, lro_node);
307 if (NULL == entry) {
308 hdd_err("Could not allocate LRO desc!");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800309 return -ENOMEM;
310 }
311
312 list_del_init(&entry->lro_node);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800313
314 if (NULL == entry->lro_desc) {
Dhanashri Atree7d442a2016-07-14 18:20:29 -0700315 hdd_err("entry->lro_desc is NULL!");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800316 return -EINVAL;
317 }
318
Anurag Chouhan600c3a02016-03-01 10:33:54 +0530319 qdf_mem_zero((void *)entry->lro_desc, sizeof(struct net_lro_desc));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800320
321 /*
322 * lro_desc->active should be 0 and lro_desc->tcp_rcv_tsval
323 * should be 0 for newly allocated lro descriptors
324 */
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800325 list_add_tail(&entry->lro_node,
326 &lro_hash_table->lro_desc_list);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800327 *lro_desc = entry->lro_desc;
328
329 return 0;
330}
331
332/**
333 * hdd_lro_get_desc() - LRO descriptor look-up function
334 * @iph: IP header
335 * @tcph: TCP header
336 * @lro_arr: Array of LRO decriptors
337 * @lro_mgr: LRO manager
338 *
339 * Looks-up the LRO descriptor for a given flow
340 *
341 * Return: LRO descriptor
342 */
343static struct net_lro_desc *hdd_lro_get_desc(struct net_lro_mgr *lro_mgr,
344 struct net_lro_desc *lro_arr,
345 struct iphdr *iph,
346 struct tcphdr *tcph)
347{
348 int i;
349
350 for (i = 0; i < lro_mgr->max_desc; i++) {
351 if (lro_arr[i].active)
352 if (hdd_lro_tcp_flow_match(&lro_arr[i], iph, tcph))
353 return &lro_arr[i];
354 }
355
356 return NULL;
357}
358
359/**
360 * hdd_lro_eligible() - LRO eligibilty check
361 * @iph: IP header
362 * @tcph: TCP header
363 * @adapter: HDD adaptor
364 * @desc: LRO descriptor
365 * @skb: network buffer
366 *
367 * Determines if the frame is LRO eligible
368 *
369 * Return: true - LRO eligible frame, false - frame is not LRO
370 * eligible
371 */
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700372static bool hdd_lro_eligible(struct hdd_lro_s *lro_info, struct sk_buff *skb,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800373 struct iphdr *iph, struct tcphdr *tcph, struct net_lro_desc **desc)
374{
375 struct net_lro_desc *lro_desc = NULL;
376 int hw_lro_eligible =
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530377 QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) &&
378 (!QDF_NBUF_CB_RX_TCP_PURE_ACK(skb));
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800379
380 if (!hw_lro_eligible)
381 return false;
382
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700383 if (0 != hdd_lro_desc_find(lro_info, skb, iph, tcph, desc)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800384 hdd_err("finding the LRO desc failed");
385 return false;
386 }
387
388 lro_desc = *desc;
389 if (!lro_desc)
390 return false;
391
392 /* if this is not the first skb, check the timestamp option */
393 if (lro_desc->tcp_rcv_tsval) {
394 if (tcph->doff == 8) {
395 __be32 *topt = (__be32 *)(tcph + 1);
396
397 if (*topt != htonl((TCPOPT_NOP << 24)
398 |(TCPOPT_NOP << 16)
399 | (TCPOPT_TIMESTAMP << 8)
400 | TCPOLEN_TIMESTAMP))
401 return true;
402
403 /* timestamp should be in right order */
404 topt++;
405 if (after(ntohl(lro_desc->tcp_rcv_tsval),
406 ntohl(*topt)))
407 return false;
408
409 /* timestamp reply should not be zero */
410 topt++;
411 if (*topt == 0)
412 return false;
413 }
414 }
415
416 return true;
417}
418
419/**
420 * hdd_lro_desc_free() - Free the LRO descriptor
421 * @adapter: HDD adaptor
422 * @desc: LRO descriptor
423 *
424 * Return the LRO descriptor to the free pool
425 *
426 * Return: none
427 */
428static void hdd_lro_desc_free(struct net_lro_desc *desc,
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700429 struct hdd_lro_s *lro_info)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800430{
431 struct hdd_lro_desc_entry *entry;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700432 struct net_lro_mgr *lro_mgr = lro_info->lro_mgr;
433 struct net_lro_desc *arr_base = lro_mgr->lro_arr;
434 struct hdd_lro_desc_info *desc_info = &lro_info->lro_desc_info;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800435 int i = desc - arr_base;
436
437 if (i >= LRO_DESC_POOL_SZ) {
438 hdd_err("invalid index %d", i);
439 return;
440 }
441
442 entry = &desc_info->lro_desc_pool.lro_desc_array[i];
443
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800444 list_del_init(&entry->lro_node);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800445
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800446 list_add_tail(&entry->lro_node, &desc_info->
447 lro_desc_pool.lro_free_list_head);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800448}
449
450/**
451 * hdd_lro_flush_pkt() - function to flush the LRO flow
452 * @iph: IP header
453 * @tcph: TCP header
454 * @adapter: HDD adaptor
455 * @lro_mgr: LRO manager
456 *
457 * Flush all the packets aggregated in the LRO manager for the
458 * flow indicated by the TCP and IP header
459 *
460 * Return: none
461 */
Jeff Johnson64d86182016-10-05 16:15:36 -0700462static void hdd_lro_flush_pkt(struct net_lro_mgr *lro_mgr,
463 struct iphdr *iph, struct tcphdr *tcph,
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700464 struct hdd_lro_s *lro_info)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800465{
466 struct net_lro_desc *lro_desc;
467
468 lro_desc = hdd_lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
469
Manjunathappa Prakashb4ae4ab2016-08-19 12:36:05 -0700470 if (lro_desc) {
Dhanashri Atree7d442a2016-07-14 18:20:29 -0700471 /* statistics */
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700472 hdd_lro_desc_free(lro_desc, lro_info);
Manjunathappa Prakashb4ae4ab2016-08-19 12:36:05 -0700473 lro_flush_desc(lro_mgr, lro_desc);
474 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800475}
476
477/**
478 * hdd_lro_flush() - LRO flush callback
479 * @data: opaque pointer containing HDD specific information
480 *
481 * Callback registered to flush all the packets aggregated in
482 * the LRO manager for all the flows
483 *
484 * Return: none
485 */
Jeff Johnson64d86182016-10-05 16:15:36 -0700486static void hdd_lro_flush(void *data)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800487{
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700488 struct hdd_lro_s *hdd_lro = data;
489 struct net_lro_mgr *lro_mgr = hdd_lro->lro_mgr;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800490 int i;
491
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700492 for (i = 0; i < lro_mgr->max_desc; i++) {
493 if (lro_mgr->lro_arr[i].active) {
494 hdd_lro_desc_free(&lro_mgr->lro_arr[i], hdd_lro);
495 lro_flush_desc(lro_mgr, &lro_mgr->lro_arr[i]);
Orhan K AKYILDIZ5f0ff722016-09-28 19:06:50 -0700496 }
Orhan K AKYILDIZ5f0ff722016-09-28 19:06:50 -0700497 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800498}
499
500/**
501 * hdd_lro_init() - initialization for LRO
502 * @hdd_ctx: HDD context
503 *
504 * This function sends the LRO configuration to the firmware
505 * via WMA
Orhan K AKYILDIZb71d0612016-08-06 15:16:59 -0700506 * Make sure that this function gets called after NAPI
507 * instances have been created.
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800508 *
509 * Return: 0 - success, < 0 - failure
510 */
511int hdd_lro_init(hdd_context_t *hdd_ctx)
512{
Dhanashri Atre09828f12016-11-13 10:36:58 -0800513 struct cdp_lro_hash_config lro_config;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800514
Orhan K AKYILDIZb71d0612016-08-06 15:16:59 -0700515 if ((!hdd_ctx->config->lro_enable) &&
Jeff Johnson03294f12016-12-09 17:10:24 -0800516 (hdd_napi_enabled(HDD_NAPI_ANY) == 0)) {
Srinivas Girigowdaf91ec572017-03-06 17:20:19 -0800517 hdd_warn("LRO and NAPI are both disabled");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800518 return 0;
519 }
520
521 lro_config.lro_enable = 1;
522 lro_config.tcp_flag = TCPHDR_ACK;
523 lro_config.tcp_flag_mask = TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST |
524 TCPHDR_ACK | TCPHDR_URG | TCPHDR_ECE | TCPHDR_CWR;
525
526 get_random_bytes(lro_config.toeplitz_hash_ipv4,
527 (sizeof(lro_config.toeplitz_hash_ipv4[0]) *
528 LRO_IPV4_SEED_ARR_SZ));
529
530 get_random_bytes(lro_config.toeplitz_hash_ipv6,
531 (sizeof(lro_config.toeplitz_hash_ipv6[0]) *
532 LRO_IPV6_SEED_ARR_SZ));
533
534 hdd_debug("sending the LRO configuration to the fw");
535 if (0 != wma_lro_init(&lro_config)) {
536 hdd_err("Failed to send LRO configuration!");
537 hdd_ctx->config->lro_enable = 0;
538 return -EAGAIN;
539 }
540
541 return 0;
542}
543
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700544static void *hdd_init_lro_mgr(void)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800545{
546 struct hdd_lro_s *hdd_lro;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700547 hdd_context_t *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
548 size_t lro_info_sz, lro_mgr_sz, desc_arr_sz, desc_pool_sz;
549 size_t hash_table_sz;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800550 uint8_t *lro_mem_ptr;
551
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700552 if (NULL == hdd_ctx) {
553 hdd_err("hdd_ctx is NULL");
554 return NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800555 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800556 /*
Jeff Johnsonc1b9a912017-01-12 09:45:59 -0800557 * Allocate all the LRO data structures at once and then carve
558 * them up as needed
559 */
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700560 lro_info_sz = sizeof(struct hdd_lro_s);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800561 lro_mgr_sz = sizeof(struct net_lro_mgr);
562 desc_arr_sz = (LRO_DESC_POOL_SZ * sizeof(struct net_lro_desc));
563 desc_pool_sz = (LRO_DESC_POOL_SZ * sizeof(struct hdd_lro_desc_entry));
564 hash_table_sz = (sizeof(struct hdd_lro_desc_table) * LRO_DESC_TABLE_SZ);
565
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700566 lro_mem_ptr = qdf_mem_malloc(lro_info_sz + lro_mgr_sz + desc_arr_sz +
567 desc_pool_sz + hash_table_sz);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800568
569 if (NULL == lro_mem_ptr) {
570 hdd_err("Unable to allocate memory for LRO");
571 hdd_ctx->config->lro_enable = 0;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700572 return NULL;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800573 }
574
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700575 hdd_lro = (struct hdd_lro_s *)lro_mem_ptr;
576 lro_mem_ptr += lro_info_sz;
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800577 /* LRO manager */
578 hdd_lro->lro_mgr = (struct net_lro_mgr *)lro_mem_ptr;
579 lro_mem_ptr += lro_mgr_sz;
580
581 /* LRO decriptor array */
582 hdd_lro->lro_mgr->lro_arr = (struct net_lro_desc *)lro_mem_ptr;
583 lro_mem_ptr += desc_arr_sz;
584
585 /* LRO descriptor pool */
586 hdd_lro->lro_desc_info.lro_desc_pool.lro_desc_array =
587 (struct hdd_lro_desc_entry *)lro_mem_ptr;
588 lro_mem_ptr += desc_pool_sz;
589
590 /* hash table to store the LRO descriptors */
591 hdd_lro->lro_desc_info.lro_hash_table =
592 (struct hdd_lro_desc_table *)lro_mem_ptr;
593
594 /* Initialize the LRO descriptors */
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700595 hdd_lro_desc_info_init(hdd_lro);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800596
Nirav Shahbd36b062016-07-18 11:12:59 +0530597 if (hdd_ctx->enableRxThread)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800598 hdd_lro->lro_mgr->features = LRO_F_NI;
599
600 if (hdd_napi_enabled(HDD_NAPI_ANY))
601 hdd_lro->lro_mgr->features |= LRO_F_NAPI;
602
603 hdd_lro->lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
604 hdd_lro->lro_mgr->max_aggr = LRO_MAX_AGGR_SIZE;
605 hdd_lro->lro_mgr->get_skb_header = hdd_lro_get_skb_header;
606 hdd_lro->lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
607 hdd_lro->lro_mgr->max_desc = LRO_DESC_POOL_SZ;
608
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700609 return hdd_lro;
610}
611
612/**
613 * hdd_lro_enable() - enable LRO
614 * @hdd_ctx: HDD context
615 * @adapter: HDD adapter
616 *
617 * This function enables LRO in the network device attached to
618 * the HDD adapter. It also allocates the HDD LRO instance for
619 * that network device
620 *
621 * Return: 0 - success, < 0 - failure
622 */
623int hdd_lro_enable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
624{
Kiran Kumar Lokere9aecfee2016-11-23 17:37:42 -0800625 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700626
627 if (!hdd_ctx->config->lro_enable ||
628 QDF_STA_MODE != adapter->device_mode) {
Srinivas Girigowdaf91ec572017-03-06 17:20:19 -0800629 hdd_debug("LRO Disabled");
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700630 return 0;
631 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800632
633 /* Register the flush callback */
Kiran Kumar Lokere9aecfee2016-11-23 17:37:42 -0800634 cdp_register_lro_flush_cb(soc, hdd_lro_flush, hdd_init_lro_mgr);
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700635 adapter->dev->features |= NETIF_F_LRO;
636
Srinivas Girigowdaf91ec572017-03-06 17:20:19 -0800637 hdd_debug("LRO Enabled");
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800638
639 return 0;
640}
641
Jeff Johnson4008f342016-11-16 16:16:15 -0800642static void hdd_deinit_lro_mgr(void *lro_info)
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700643{
644 if (lro_info) {
Srinivas Girigowdaf91ec572017-03-06 17:20:19 -0800645 hdd_debug("LRO instance %p is being freed", lro_info);
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700646 qdf_mem_free(lro_info);
647 }
648}
649
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800650/**
651 * hdd_lro_disable() - disable LRO
652 * @hdd_ctx: HDD context
653 * @adapter: HDD adapter
654 *
655 * This function frees the HDD LRO instance for the network
656 * device attached to the HDD adapter
657 *
658 * Return: none
659 */
660void hdd_lro_disable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
661{
Kiran Kumar Lokere9aecfee2016-11-23 17:37:42 -0800662 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
663
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800664 if (!hdd_ctx->config->lro_enable ||
Govind Singhd9b6ca82016-09-07 20:45:32 +0530665 QDF_STA_MODE != adapter->device_mode)
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800666 return;
667
Dhanashri Atre8d978172015-10-30 15:12:03 -0700668 /* Deregister the flush callback */
Kiran Kumar Lokere9aecfee2016-11-23 17:37:42 -0800669 cdp_deregister_lro_flush_cb(soc, hdd_deinit_lro_mgr);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800670}
671
672/**
673 * hdd_lro_rx() - LRO receive function
674 * @hdd_ctx: HDD context
675 * @adapter: HDD adapter
676 * @skb: network buffer
677 *
678 * Delivers LRO eligible frames to the LRO manager
679 *
680 * Return: HDD_LRO_RX - frame delivered to LRO manager
681 * HDD_LRO_NO_RX - frame not delivered
682 */
683enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
684 hdd_adapter_t *adapter, struct sk_buff *skb)
685{
686 enum hdd_lro_rx_status status = HDD_LRO_NO_RX;
687
688 if ((adapter->dev->features & NETIF_F_LRO) &&
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530689 QDF_NBUF_CB_RX_TCP_PROTO(skb)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800690 struct iphdr *iph;
691 struct tcphdr *tcph;
692 struct net_lro_desc *lro_desc = NULL;
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700693 struct hdd_lro_s *lro_info;
694 struct hif_opaque_softc *hif_hdl =
695 (struct hif_opaque_softc *)cds_get_context(
696 QDF_MODULE_ID_HIF);
697 if (hif_hdl == NULL) {
698 hdd_err("hif_hdl is NULL");
699 return status;
700 }
701
702 lro_info = hif_get_lro_info(QDF_NBUF_CB_RX_CTX_ID(skb),
703 hif_hdl);
704 if (lro_info == NULL) {
705 hdd_err("LRO mgr is NULL, vdev could be going down");
706 return status;
707 }
708
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800709 iph = (struct iphdr *)skb->data;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530710 tcph = (struct tcphdr *)(skb->data + QDF_NBUF_CB_RX_TCP_OFFSET(skb));
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700711 lro_info->lro_mgr->dev = adapter->dev;
712 if (hdd_lro_eligible(lro_info, skb, iph, tcph, &lro_desc)) {
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800713 struct net_lro_info hdd_lro_info;
714
715 hdd_lro_info.valid_fields = LRO_VALID_FIELDS;
716
717 hdd_lro_info.lro_desc = lro_desc;
718 hdd_lro_info.lro_eligible = 1;
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530719 hdd_lro_info.tcp_ack_num = QDF_NBUF_CB_RX_TCP_ACK_NUM(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800720 hdd_lro_info.tcp_data_csum =
Anurag Chouhanf04e84f2016-03-03 10:12:12 +0530721 csum_unfold(htons(QDF_NBUF_CB_RX_TCP_CHKSUM(skb)));
722 hdd_lro_info.tcp_seq_num = QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb);
723 hdd_lro_info.tcp_win = QDF_NBUF_CB_RX_TCP_WIN(skb);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800724
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700725 lro_receive_skb_ext(lro_info->lro_mgr, skb,
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800726 (void *)adapter, &hdd_lro_info);
727
Srinivas Girigowdae41bffe2017-03-24 16:34:42 -0700728 if (!hdd_lro_info.lro_desc->active)
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700729 hdd_lro_desc_free(lro_desc, lro_info);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800730
731 status = HDD_LRO_RX;
732 } else {
Manjunathappa Prakash04f26442016-10-13 14:46:49 -0700733 hdd_lro_flush_pkt(lro_info->lro_mgr,
734 iph, tcph, lro_info);
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800735 }
Prakash Dhavali7090c5f2015-11-02 17:55:19 -0800736 }
737 return status;
738}
Dhanashri Atree7d442a2016-07-14 18:20:29 -0700739
Dhanashri Atre170855e2016-12-15 13:26:54 -0800740#endif
741
Dhanashri Atree7d442a2016-07-14 18:20:29 -0700742/**
Dhanashri Atree7d442a2016-07-14 18:20:29 -0700743 * wlan_hdd_display_lro_stats() - display LRO statistics
744 * @hdd_ctx: hdd context
745 *
746 * Return: none
747 */
748void hdd_lro_display_stats(hdd_context_t *hdd_ctx)
749{
Srinivas Girigowdaf91ec572017-03-06 17:20:19 -0800750 hdd_debug("LRO stats is broken, will fix it");
Dhanashri Atree7d442a2016-07-14 18:20:29 -0700751}