blob: 66e390f9cd4f5d909881e6313ee6deb78bbef171 [file] [log] [blame]
Manjunathappa Prakash1fef6fb2020-01-08 15:43:35 -08001/*
2 * Copyright (c) 2020, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <dp_types.h>
18#include <dp_fisa_rx.h>
19#include "hal_rx_flow.h"
20#include "dp_htt.h"
21#include "dp_internal.h"
22#include <enet.h>
23#include <linux/skbuff.h>
24
25#if defined(FISA_DEBUG_ENABLE)
26/**
27 * hex_dump_skb_data() - Helper function to dump skb while debugging
28 * @nbuf: Nbuf to be dumped
29 * @dump: dump enable/disable dumping
30 *
31 * Return: NONE
32 */
33static void hex_dump_skb_data(qdf_nbuf_t nbuf, bool dump)
34{
35 qdf_nbuf_t next_nbuf;
36 int i = 0;
37
38 if (!dump)
39 return;
40
41 if (!nbuf)
42 return;
43
44 dp_fisa_debug("%ps: skb: %pk skb->next:%pk frag_list %pk skb->data:%pk len %d data_len%d",
45 (void *)_RET_IP_, nbuf, qdf_nbuf_next(nbuf),
46 skb_shinfo(nbuf)->frag_list, qdf_nbuf_data(nbuf), nbuf->len,
47 nbuf->data_len);
48 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, nbuf->data,
49 64);
50
51 next_nbuf = skb_shinfo(nbuf)->frag_list;
52 while (next_nbuf) {
53 dp_fisa_debug("%d nbuf:%pk nbuf->next:%pK nbuf->data:%pk len %d", i,
54 next_nbuf, qdf_nbuf_next(next_nbuf),
55 qdf_nbuf_data(next_nbuf), qdf_nbuf_len(next_nbuf));
56 QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
57 qdf_nbuf_data(next_nbuf), 64);
58 next_nbuf = qdf_nbuf_next(next_nbuf);
59 i++;
60 }
61}
62
63/**
64 * dump_tlvs() - Helper function to dump TLVs of msdu
65 * @hal_soc_hdl: Handle to TLV functions
66 * @buf: Pointer to TLV header
67 * @dbg_level: level control output of TLV dump
68 *
69 * Return: NONE
70 */
71static void dump_tlvs(hal_soc_handle_t hal_soc_hdl, uint8_t *buf,
72 uint8_t dbg_level)
73{
74 uint32_t fisa_aggr_count, fisa_timeout, cumulat_l4_csum, cumulat_ip_len;
75 int flow_aggr_cont;
76
77 hal_rx_dump_pkt_tlvs(hal_soc_hdl, buf, dbg_level);
78
79 flow_aggr_cont = hal_rx_get_fisa_flow_agg_continuation(hal_soc_hdl,
80 buf);
81 fisa_aggr_count = hal_rx_get_fisa_flow_agg_count(hal_soc_hdl, buf);
82 fisa_timeout = hal_rx_get_fisa_timeout(hal_soc_hdl, buf);
83 cumulat_l4_csum = hal_rx_get_fisa_cumulative_l4_checksum(hal_soc_hdl,
84 buf);
85 cumulat_ip_len = hal_rx_get_fisa_cumulative_ip_length(hal_soc_hdl, buf);
86
87 dp_fisa_debug("flow_aggr_cont %d, fisa_timeout %d, fisa_aggr_count %d, cumulat_l4_csum %d, cumulat_ip_len %d",
88 flow_aggr_cont, fisa_timeout, fisa_aggr_count, cumulat_l4_csum,
89 cumulat_ip_len);
90}
91#else
92static void hex_dump_skb_data(qdf_nbuf_t nbuf, bool dump)
93{
94}
95
96static void dump_tlvs(hal_soc_handle_t hal_soc_hdl, uint8_t *buf,
97 uint8_t dbg_level)
98{
99}
100#endif
101
102/**
103 * nbuf_skip_rx_pkt_tlv() - Function to skip the TLVs and mac header from msdu
104 * @hal_soc_hdl: Handle to hal_soc to get the TLV info
105 * @nbuf: msdu for which TLVs has to be skipped
106 *
107 * Return: None
108 */
109static void nbuf_skip_rx_pkt_tlv(hal_soc_handle_t hal_soc_hdl, qdf_nbuf_t nbuf)
110{
111 uint8_t *rx_tlv_hdr;
112 uint32_t l2_hdr_offset;
113
114 rx_tlv_hdr = qdf_nbuf_data(nbuf);
115 l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(hal_soc_hdl,
116 rx_tlv_hdr);
117 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN + l2_hdr_offset);
118}
119
120/**
121 * print_flow_tuple() - Debug function to dump flow tuple
122 * @flow_tuple: flow tuple containing tuple info
123 *
124 * Return: NONE
125 */
126static void print_flow_tuple(struct cdp_rx_flow_tuple_info *flow_tuple)
127{
128 dp_info("dest_ip_127_96 0x%x", flow_tuple->dest_ip_127_96);
129 dp_info("dest_ip_95_64 0x%x", flow_tuple->dest_ip_95_64);
130 dp_info("dest_ip_63_32 0x%x", flow_tuple->dest_ip_63_32);
131 dp_info("dest_ip_31_0 0x%x", flow_tuple->dest_ip_31_0);
132 dp_info("src_ip_127_96 0x%x", flow_tuple->src_ip_127_96);
133 dp_info("src_ip_95_64 0x%x", flow_tuple->src_ip_95_64);
134 dp_info("src_ip_63_32 0x%x", flow_tuple->src_ip_63_32);
135 dp_info("src_ip_31_0 0x%x", flow_tuple->src_ip_31_0);
136 dp_info("dest_port 0x%x", flow_tuple->dest_port);
137 dp_info("src_port 0x%x", flow_tuple->src_port);
138 dp_info("l4_protocol 0x%x", flow_tuple->l4_protocol);
139}
140
141/**
142 * get_flow_tuple_from_nbuf() - Get the flow tuple from msdu
143 * @hal_soc_hdl: Handle to hal soc
144 * @flow_tuple_info: return argument where the flow is populated
145 * @nbuf: msdu from which flow tuple is extracted.
146 * @rx_tlv_hdr: Pointer to msdu TLVs
147 *
148 * Return: None
149 */
150static void
151get_flow_tuple_from_nbuf(hal_soc_handle_t hal_soc_hdl,
152 struct cdp_rx_flow_tuple_info *flow_tuple_info,
153 qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
154{
155 struct iphdr *iph;
156 struct tcphdr *tcph;
157 uint32_t ip_hdr_offset = HAL_RX_TLV_GET_IP_OFFSET(rx_tlv_hdr);
158 uint32_t tcp_hdr_offset = HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv_hdr);
159 uint32_t l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(hal_soc_hdl,
160 rx_tlv_hdr);
161
162 flow_tuple_info->tuple_populated = true;
163
164 qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN + l2_hdr_offset);
165
166 iph = (struct iphdr *)(qdf_nbuf_data(nbuf) + ip_hdr_offset);
167 tcph = (struct tcphdr *)(qdf_nbuf_data(nbuf) + ip_hdr_offset +
168 tcp_hdr_offset);
169
170 flow_tuple_info->dest_ip_31_0 = qdf_ntohl(iph->daddr);
171 flow_tuple_info->dest_ip_63_32 = 0;
172 flow_tuple_info->dest_ip_95_64 = 0;
173 flow_tuple_info->dest_ip_127_96 =
174 HAL_IP_DA_SA_PREFIX_IPV4_COMPATIBLE_IPV6;
175
176 flow_tuple_info->src_ip_31_0 = qdf_ntohl(iph->saddr);
177 flow_tuple_info->src_ip_63_32 = 0;
178 flow_tuple_info->src_ip_95_64 = 0;
179 flow_tuple_info->src_ip_127_96 =
180 HAL_IP_DA_SA_PREFIX_IPV4_COMPATIBLE_IPV6;
181
182 flow_tuple_info->dest_port = qdf_ntohs(tcph->dest);
183 flow_tuple_info->src_port = qdf_ntohs(tcph->source);
184 flow_tuple_info->l4_protocol = iph->protocol;
185 dp_fisa_debug("l4_protocol %d", flow_tuple_info->l4_protocol);
186
187 qdf_nbuf_push_head(nbuf, RX_PKT_TLVS_LEN + l2_hdr_offset);
188
189 dp_fisa_debug("head_skb: %pk head_skb->next:%pk head_skb->data:%pk len %d data_len",
190 nbuf, qdf_nbuf_next(nbuf), qdf_nbuf_data(nbuf), nbuf->len,
191 nbuf->data_len);
192}
193
194/**
195 * dp_rx_fisa_setup_hw_fse() - Populate flow so as to update DDR flow table
196 * @fisa_hdl: Handle fisa context
197 * @hashed_flow_idx: Index to flow table
198 * @rx_flow_info: tuple to be populated in flow table
199 * @flow_steer_info: REO index to which flow to be steered
200 *
201 * Return: Pointer to DDR flow table entry
202 */
203static void *
204dp_rx_fisa_setup_hw_fse(struct dp_rx_fst *fisa_hdl,
205 uint32_t hashed_flow_idx,
206 struct cdp_rx_flow_tuple_info *rx_flow_info,
207 uint32_t flow_steer_info)
208{
209 struct hal_rx_flow flow;
210 void *hw_fse;
211
212 /* REO destination index starts from 1 */
213 flow.reo_destination_indication = flow_steer_info + 1;
214 flow.fse_metadata = 0xDEADBEEF;
215 flow.tuple_info.dest_ip_127_96 = rx_flow_info->dest_ip_127_96;
216 flow.tuple_info.dest_ip_95_64 = rx_flow_info->dest_ip_95_64;
217 flow.tuple_info.dest_ip_63_32 = rx_flow_info->dest_ip_63_32;
218 flow.tuple_info.dest_ip_31_0 = rx_flow_info->dest_ip_31_0;
219 flow.tuple_info.src_ip_127_96 = rx_flow_info->src_ip_127_96;
220 flow.tuple_info.src_ip_95_64 = rx_flow_info->src_ip_95_64;
221 flow.tuple_info.src_ip_63_32 = rx_flow_info->src_ip_63_32;
222 flow.tuple_info.src_ip_31_0 = rx_flow_info->src_ip_31_0;
223 flow.tuple_info.dest_port = rx_flow_info->dest_port;
224 flow.tuple_info.src_port = rx_flow_info->src_port;
225 flow.tuple_info.l4_protocol = rx_flow_info->l4_protocol;
226 flow.reo_destination_handler = HAL_RX_FSE_REO_DEST_FT;
227 hw_fse = hal_rx_flow_setup_fse(fisa_hdl->hal_rx_fst, hashed_flow_idx,
228 &flow);
229 dp_rx_dump_fisa_table(fisa_hdl->soc_hdl);
230
231 return hw_fse;
232}
233
234/**
235 * dp_rx_fisa_update_sw_ft_entry() - Helper function to update few SW FT entry
236 * @sw_ft_entry: Pointer to softerware flow tabel entry
237 * @flow_hash: flow_hash for the flow
238 * @vdev: Saving dp_vdev in FT later used in the flushing the flow
239 * @flow_id: Flow ID of the flow
240 *
241 * Return: NONE
242 */
243static void dp_rx_fisa_update_sw_ft_entry(struct dp_fisa_rx_sw_ft *sw_ft_entry,
244 uint32_t flow_hash,
245 struct dp_vdev *vdev,
246 uint32_t flow_id)
247{
248 sw_ft_entry->flow_hash = flow_hash;
249 sw_ft_entry->flow_id = flow_id;
250 sw_ft_entry->vdev = vdev;
251}
252
253/**
254 * is_same_flow() - Function to compare flow tuple to decide if they match
255 * @tuple1: flow tuple 1
256 * @tuple2: flow tuple 2
257 *
258 * Return: true if they match, false if they differ
259 */
260static bool is_same_flow(struct cdp_rx_flow_tuple_info *tuple1,
261 struct cdp_rx_flow_tuple_info *tuple2)
262{
263 if ((tuple1->src_port ^ tuple2->src_port) |
264 (tuple1->dest_port ^ tuple2->dest_port) |
265 (tuple1->src_ip_31_0 ^ tuple2->src_ip_31_0) |
266 (tuple1->src_ip_63_32 ^ tuple2->src_ip_63_32) |
267 (tuple1->src_ip_95_64 ^ tuple2->src_ip_95_64) |
268 (tuple1->src_ip_127_96 ^ tuple2->src_ip_127_96) |
269 (tuple1->dest_ip_31_0 ^ tuple2->dest_ip_31_0) |
270 /* DST IP check not required? */
271 (tuple1->dest_ip_63_32 ^ tuple2->dest_ip_63_32) |
272 (tuple1->dest_ip_95_64 ^ tuple2->dest_ip_95_64) |
273 (tuple1->dest_ip_127_96 ^ tuple2->dest_ip_127_96) |
274 (tuple1->l4_protocol ^ tuple2->l4_protocol))
275 return false;
276 else
277 return true;
278}
279
280/**
281 * dp_rx_flow_send_htt_operation_cmd() - Invalidate FSE cache on FT change
282 * @pdev: handle to DP pdev
283 * @fse_op: Cache operation code
284 * @rx_flow_tuple: flow tuple whose entry has to be invalidated
285 *
286 * Return: Success if we successfully send FW HTT command
287 */
288static QDF_STATUS
289dp_rx_flow_send_htt_operation_cmd(struct dp_pdev *pdev,
290 enum dp_htt_flow_fst_operation fse_op,
291 struct cdp_rx_flow_tuple_info *rx_flow_tuple)
292{
293 struct dp_htt_rx_flow_fst_operation fse_op_cmd;
294 struct cdp_rx_flow_info rx_flow_info;
295
296 rx_flow_info.is_addr_ipv4 = true;
297 rx_flow_info.op_code = CDP_FLOW_FST_ENTRY_ADD;
298 qdf_mem_copy(&rx_flow_info.flow_tuple_info, rx_flow_tuple,
299 sizeof(struct cdp_rx_flow_tuple_info));
300 rx_flow_info.fse_metadata = 0xDADA;
301 fse_op_cmd.pdev_id = pdev->pdev_id;
302 fse_op_cmd.op_code = fse_op;
303 fse_op_cmd.rx_flow = &rx_flow_info;
304
305 return dp_htt_rx_flow_fse_operation(pdev, &fse_op_cmd);
306}
307
308/**
309 * dp_rx_fisa_add_ft_entry() - Add new flow to HW and SW FT if it is not added
310 * @fisa_hdl: handle to FISA context
311 * @flow_idx_hash: Hashed flow index
312 * @nbuf: nbuf belonging to new flow
313 * @vdev: Handle DP vdev to save in SW flow table
314 * @rx_tlv_hdr: Pointer to TLV header
315 *
316 * Return: pointer to sw FT entry on success, NULL otherwise
317 */
318static struct dp_fisa_rx_sw_ft *
319dp_rx_fisa_add_ft_entry(struct dp_rx_fst *fisa_hdl,
320 uint32_t flow_idx_hash,
321 qdf_nbuf_t nbuf, struct dp_vdev *vdev,
322 uint8_t *rx_tlv_hdr)
323{
324 struct dp_fisa_rx_sw_ft *sw_ft_entry;
325 uint32_t flow_hash;
326 uint32_t hashed_flow_idx;
327 uint32_t skid_count = 0, max_skid_length;
328 struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
329 QDF_STATUS status;
330 bool is_fst_updated = false;
331 bool is_flow_tcp, is_flow_udp, is_flow_ipv6;
332 hal_soc_handle_t hal_soc_hdl = fisa_hdl->soc_hdl->hal_soc;
333 uint32_t reo_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
334
335 is_flow_tcp = HAL_RX_TLV_GET_TCP_PROTO(rx_tlv_hdr);
336 is_flow_udp = HAL_RX_TLV_GET_UDP_PROTO(rx_tlv_hdr);
337 is_flow_ipv6 = HAL_RX_TLV_GET_IPV6(rx_tlv_hdr);
338
339 if (is_flow_ipv6 || !(is_flow_tcp || is_flow_udp)) {
340 dp_fisa_debug("Not UDP or TCP IPV4 flow");
341 return NULL;
342 }
343
344 /* Get the hash from TLV
345 * FSE FT Toeplitz hash is same Common parser hash available in TLV
346 * common parser toeplitz hash is same as FSE toeplitz hash as
347 * toeplitz key is same.
348 */
349 rx_flow_tuple_info.tuple_populated = false;
350 flow_hash = flow_idx_hash;
351 hashed_flow_idx = flow_hash & fisa_hdl->hash_mask;
352 max_skid_length = fisa_hdl->max_skid_length;
353
354 dp_fisa_debug("flow_hash 0x%x hashed_flow_idx 0x%x", flow_hash,
355 hashed_flow_idx);
356 dp_fisa_debug("max_skid_length 0x%x", max_skid_length);
357 qdf_spin_lock_bh(&fisa_hdl->dp_rx_fst_lock);
358 do {
359 sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
360 fisa_hdl->base)[hashed_flow_idx]);
361 if (!sw_ft_entry->is_populated) {
362 /* Add SW FT entry */
363 dp_rx_fisa_update_sw_ft_entry(sw_ft_entry,
364 flow_hash, vdev,
365 hashed_flow_idx);
366 if (!rx_flow_tuple_info.tuple_populated)
367 get_flow_tuple_from_nbuf(hal_soc_hdl,
368 &rx_flow_tuple_info,
369 nbuf, rx_tlv_hdr);
370
371 /* Add HW FT entry */
372 sw_ft_entry->hw_fse =
373 dp_rx_fisa_setup_hw_fse(fisa_hdl,
374 hashed_flow_idx,
375 &rx_flow_tuple_info,
376 reo_id);
377 sw_ft_entry->is_populated = true;
378 sw_ft_entry->napi_id = reo_id;
379 qdf_mem_copy(&sw_ft_entry->rx_flow_tuple_info,
380 &rx_flow_tuple_info,
381 sizeof(struct cdp_rx_flow_tuple_info));
382
383 sw_ft_entry->is_flow_tcp = is_flow_tcp;
384 sw_ft_entry->is_flow_udp = is_flow_udp;
385
386 is_fst_updated = true;
387 fisa_hdl->add_flow_count++;
388 break;
389 }
390 /* else */
391 if (!rx_flow_tuple_info.tuple_populated)
392 get_flow_tuple_from_nbuf(hal_soc_hdl,
393 &rx_flow_tuple_info,
394 nbuf, rx_tlv_hdr);
395
396 if (is_same_flow(&sw_ft_entry->rx_flow_tuple_info,
397 &rx_flow_tuple_info)) {
398 dp_fisa_debug("It is same flow fse entry idx %d",
399 hashed_flow_idx);
400 /* Incoming flow tuple matches with existing
401 * entry. This is subsequent skbs of the same
402 * flow. Earlier entry made is not reflected
403 * yet in FSE cache
404 */
405 break;
406 }
407 /* else */
408 /* hash collision move to the next FT entry */
409 dp_fisa_debug("Hash collision %d", fisa_hdl->hash_collision_cnt);
410 fisa_hdl->hash_collision_cnt++;
411#ifdef NOT_YET /* assist Flow eviction algorithm */
412 /* uint32_t lru_ft_entry_time = 0xffffffff, lru_ft_entry_idx = 0; */
413 if (fisa_hdl->hw_ft_entry->timestamp < lru_ft_entry_time) {
414 lru_ft_entry_time = fisa_hdl->hw_ft_entry->timestamp;
415 lru_ft_entry_idx = hashed_flow_idx;
416 }
417#endif
418 skid_count++;
419 hashed_flow_idx++;
420 hashed_flow_idx &= fisa_hdl->hash_mask;
421 } while (skid_count <= max_skid_length);
422
423 /*
424 * fisa_hdl->flow_eviction_cnt++;
425 * if (skid_count > max_skid_length)
426 * Remove LRU flow from HW FT
427 * Remove LRU flow from SW FT
428 */
429 qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
430
431 if (skid_count > max_skid_length) {
432 dp_fisa_debug("Max skid length reached flow cannot be added, evict exiting flow");
433 return NULL;
434 }
435
436 /**
437 * Send HTT cache invalidation command to firmware to
438 * reflect the flow update
439 */
440 if (is_fst_updated) {
441 status = dp_rx_flow_send_htt_operation_cmd(vdev->pdev,
442 DP_HTT_FST_CACHE_INVALIDATE_FULL,
443 &rx_flow_tuple_info);
444 if (QDF_STATUS_SUCCESS != status) {
445 dp_err("Failed to send the cache invalidation\n");
446 /* TBD: remove flow from SW and HW flow table
447 * Not big impact cache entry gets updated later
448 */
449 }
450 }
451 dp_fisa_debug("sw_ft_entry %pk", sw_ft_entry);
452 return sw_ft_entry;
453}
454
455/**
456 * is_flow_idx_valid() - Function to decide if flow_idx TLV is valid
457 * @flow_invalid: flow invalid TLV value
458 * @flow_timeout: flow timeout TLV value, set when FSE timedout flow search
459 *
460 * Return: True if flow_idx value is valid
461 */
462static bool is_flow_idx_valid(bool flow_invalid, bool flow_timeout)
463{
464 if (!flow_invalid && !flow_timeout)
465 return true;
466 else
467 return false;
468}
469
470/**
471 * dp_rx_get_fisa_flow() - Get FT entry corresponding to incoming nbuf
472 * @fisa_hdl: handle to FISA context
473 * @vdev: handle to DP vdev
474 * @nbuf: incoming msdu
475 *
476 * Return: handle SW FT entry for nbuf flow
477 */
478static struct dp_fisa_rx_sw_ft *
479dp_rx_get_fisa_flow(struct dp_rx_fst *fisa_hdl, struct dp_vdev *vdev,
480 qdf_nbuf_t nbuf)
481{
482 uint8_t *rx_tlv_hdr;
483 uint32_t flow_idx;
484 bool flow_invalid, flow_timeout, flow_idx_valid;
485 struct dp_fisa_rx_sw_ft *sw_ft_entry = NULL;
486 struct dp_fisa_rx_sw_ft *sw_ft_base = (struct dp_fisa_rx_sw_ft *)
487 fisa_hdl->base;
488 hal_soc_handle_t hal_soc_hdl = fisa_hdl->soc_hdl->hal_soc;
489
490 if (QDF_NBUF_CB_RX_TCP_PROTO(nbuf))
491 return sw_ft_entry;
492
493 rx_tlv_hdr = qdf_nbuf_data(nbuf);
494 hal_rx_msdu_get_flow_params(hal_soc_hdl, rx_tlv_hdr, &flow_invalid,
495 &flow_timeout, &flow_idx);
496
497 dp_fisa_debug("nbuf %pk fl_idx %d fl_inv %d fl_timeout %d",
498 nbuf, flow_idx, flow_invalid, flow_timeout);
499
500 flow_idx_valid = is_flow_idx_valid(flow_invalid, flow_timeout);
501 if (flow_idx_valid) {
502 qdf_assert_always(flow_idx < fisa_hdl->max_entries);
503 dp_fisa_debug("flow_idx is valid 0x%x", flow_idx);
504 return &sw_ft_base[flow_idx];
505 }
506
507 /* else new flow, add entry to FT */
508 sw_ft_entry = dp_rx_fisa_add_ft_entry(fisa_hdl, flow_idx, nbuf, vdev,
509 rx_tlv_hdr);
510
511 return sw_ft_entry;
512}
513
514/**
515 * dp_add_nbuf_to_fisa_flow() - Aggregate incoming nbuf
516 * @fisa_hdl: handle to fisa context
517 * @vdev: handle DP vdev
518 * @nbuf: Incoming nbuf
519 * @fisa_flow: Handle SW flow entry
520 *
521 * Return: Success on aggregation
522 */
523static int dp_add_nbuf_to_fisa_flow(struct dp_rx_fst *fisa_hdl,
524 struct dp_vdev *vdev, qdf_nbuf_t nbuf,
525 struct dp_fisa_rx_sw_ft *fisa_flow)
526{
527 return FISA_AGGR_NOT_ELIGIBLE;
528}
529
530/**
531 * dp_fisa_rx() - Entry function to FISA to handle aggregation
532 * @soc: core txrx main context
533 * @vdev: Handle DP vdev
534 * @nbuf_list: List nbufs to be aggregated
535 *
536 * Return: Success on aggregation
537 */
538QDF_STATUS dp_fisa_rx(struct dp_soc *soc, struct dp_vdev *vdev,
539 qdf_nbuf_t nbuf_list)
540{
541 struct dp_rx_fst *dp_fisa_rx_hdl = soc->rx_fst;
542 qdf_nbuf_t head_nbuf;
543 qdf_nbuf_t next_nbuf;
544 struct dp_fisa_rx_sw_ft *fisa_flow;
545 int fisa_ret;
546
547 head_nbuf = nbuf_list;
548
549 while (head_nbuf) {
550 next_nbuf = head_nbuf->next;
551 qdf_nbuf_set_next(head_nbuf, NULL);
552
553 /* Add new flow if the there is no ongoing flow */
554 fisa_flow = dp_rx_get_fisa_flow(dp_fisa_rx_hdl, vdev,
555 head_nbuf);
556
557 /* Fragmented skb do not handle via fisa
558 * get that flow and deliver that flow to rx_thread
559 */
560 if (qdf_unlikely(qdf_nbuf_get_ext_list(head_nbuf))) {
561 dp_fisa_debug("Fragmented skb, will not be FISAed");
562 if (fisa_flow)
563 dp_rx_fisa_flush_flow(vdev, fisa_flow);
564 goto deliver_nbuf;
565 }
566
567 if (!fisa_flow)
568 goto pull_nbuf;
569
570 fisa_ret = dp_add_nbuf_to_fisa_flow(dp_fisa_rx_hdl, vdev,
571 head_nbuf, fisa_flow);
572 if (fisa_ret == FISA_AGGR_DONE)
573 goto next_msdu;
574 else
575 qdf_assert(0);
576
577pull_nbuf:
578 nbuf_skip_rx_pkt_tlv(dp_fisa_rx_hdl->soc_hdl->hal_soc,
579 head_nbuf);
580
581deliver_nbuf: /* Deliver without FISA */
582 qdf_nbuf_set_next(head_nbuf, NULL);
583 hex_dump_skb_data(head_nbuf, false);
584 vdev->osif_rx(vdev->osif_vdev, head_nbuf);
585next_msdu:
586 head_nbuf = next_nbuf;
587 }
588
589 return QDF_STATUS_SUCCESS;
590}
591
592/**
593 * dp_rx_fisa_flush() - Flush function to end of context flushing of aggregates
594 * @soc: core txrx main context
595 * @napi_id: REO number to flush the flow Rxed on the REO
596 *
597 * Return: Success on flushing the flows for the REO
598 */
599QDF_STATUS dp_rx_fisa_flush(struct dp_soc *soc, int napi_id)
600{
601 struct dp_rx_fst *fisa_hdl = soc->rx_fst;
602 struct dp_fisa_rx_sw_ft *sw_ft_entry =
603 (struct dp_fisa_rx_sw_ft *)fisa_hdl->base;
604 int ft_size = fisa_hdl->max_entries;
605 int i;
606
607 for (i = 0; i < ft_size; i++) {
608 if (sw_ft_entry[i].napi_id == napi_id &&
609 sw_ft_entry[i].is_populated) {
610 dp_fisa_debug("flushing %d %pk napi_id %d\n", i,
611 &sw_ft_entry[i], napi_id);
612 /* Save the ip_len and checksum as hardware assist is
613 * always based on his start of aggregation
614 */
615 sw_ft_entry[i].napi_flush_cumulative_l4_checksum =
616 sw_ft_entry[i].cumulative_l4_checksum;
617 sw_ft_entry[i].napi_flush_cumulative_ip_length =
618 sw_ft_entry[i].hal_cumultive_ip_len;
619 dp_fisa_debug("napi_flush_cumulative_ip_length 0x%x",
620 sw_ft_entry[i].napi_flush_cumulative_ip_length);
621
622 dp_rx_fisa_flush_flow(sw_ft_entry[i].vdev,
623 &sw_ft_entry[i]);
624 sw_ft_entry[i].cur_aggr = 0;
625 }
626 }
627
628 return QDF_STATUS_SUCCESS;
629}
630
631QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
632{
633 struct dp_rx_fst *rx_fst = soc->rx_fst;
634 struct dp_fisa_rx_sw_ft *sw_ft_entry =
635 &((struct dp_fisa_rx_sw_ft *)rx_fst->base)[0];
636 int ft_size = rx_fst->max_entries;
637 int i;
638
639 dp_info("Num of flows programmed %d", rx_fst->add_flow_count);
640 dp_info("Num of flows evicted %d", rx_fst->del_flow_count);
641 dp_info("Hash collision count %d", rx_fst->hash_collision_cnt);
642
643 for (i = 0; i < ft_size; i++, sw_ft_entry++) {
644 if (!sw_ft_entry->is_populated)
645 continue;
646
647 dp_info("FLOw ID %d is %s on napi/ring %d",
648 sw_ft_entry->flow_id,
649 sw_ft_entry->is_flow_udp ? "udp" : "tcp",
650 sw_ft_entry->napi_id);
651 dp_info("num msdu aggr %d", sw_ft_entry->aggr_count);
652 dp_info("flush count %d", sw_ft_entry->flush_count);
653 dp_info("bytes_aggregated %d", sw_ft_entry->bytes_aggregated);
654 dp_info("avg aggregation %d",
655 sw_ft_entry->bytes_aggregated / sw_ft_entry->flush_count
656 );
657 print_flow_tuple(&sw_ft_entry->rx_flow_tuple_info);
658 }
659 return QDF_STATUS_SUCCESS;
660}