blob: 656d4be8fd7654b13bd2b44ee3b8991d25f576e2 [file] [log] [blame]
Nirav Shah52d85aa2018-04-26 14:03:00 +05301/*
2 * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all
7 * copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16 * PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <qdf_atomic.h> /* qdf_atomic_inc, etc. */
20#include <qdf_lock.h> /* qdf_os_spinlock */
21#include <qdf_time.h> /* qdf_system_ticks, etc. */
22#include <qdf_nbuf.h> /* qdf_nbuf_t */
23#include <qdf_net_types.h> /* QDF_NBUF_TX_EXT_TID_INVALID */
24
25#include <cds_queue.h> /* TAILQ */
26#ifdef QCA_COMPUTE_TX_DELAY
27#include <enet.h> /* ethernet_hdr_t, etc. */
28#include <ipv6_defs.h> /* ipv6_traffic_class */
29#endif
30
31#include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */
32#include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */
33#include <ol_txrx_htt_api.h> /* htt_tx_status */
34
35#include <ol_ctrl_txrx_api.h>
36#include <cdp_txrx_tx_delay.h>
37#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc */
38#include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */
39#ifdef QCA_COMPUTE_TX_DELAY
40#include <ol_tx_classify.h> /* ol_tx_dest_addr_find */
41#endif
42#include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */
43#include <ol_osif_txrx_api.h>
44#include <ol_tx.h> /* ol_tx_reinject */
45#include <ol_tx_send.h>
46
47#include <ol_cfg.h> /* ol_cfg_is_high_latency */
48#include <ol_tx_sched.h>
49#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
50#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
51#endif
52#include <ol_tx_queue.h>
53#include <ol_txrx.h>
54#include <pktlog_ac_fmt.h>
55#include <cdp_txrx_handle.h>
56
57void ol_tx_init_pdev(ol_txrx_pdev_handle pdev)
58{
59 qdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
60 &pdev->target_tx_credit);
61}
62
63qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
64 qdf_nbuf_t msdu, uint16_t peer_id)
65{
66 struct ol_tx_desc_t *tx_desc = NULL;
67 struct ol_txrx_msdu_info_t msdu_info;
68
69 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
70 msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
71 msdu_info.peer = NULL;
72 msdu_info.htt.action.tx_comp_req = 0;
73 msdu_info.tso_info.is_tso = 0;
74
75 tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info);
76 if (!tx_desc)
77 return msdu;
78
79 HTT_TX_DESC_POSTPONED_SET(*((uint32_t *)(tx_desc->htt_tx_desc)), true);
80
81 htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
82
83 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
84
85 return NULL;
86}
87
88/*
89 * The TXRX module doesn't accept tx frames unless the target has
90 * enough descriptors for them.
91 * For LL, the TXRX descriptor pool is sized to match the target's
92 * descriptor pool. Hence, if the descriptor allocation in TXRX
93 * succeeds, that guarantees that the target has room to accept
94 * the new tx frame.
95 */
96struct ol_tx_desc_t *
97ol_tx_prepare_ll(ol_txrx_vdev_handle vdev,
98 qdf_nbuf_t msdu,
99 struct ol_txrx_msdu_info_t *msdu_info)
100{
101 struct ol_tx_desc_t *tx_desc;
102 struct ol_txrx_pdev_t *pdev = vdev->pdev;
103
104 (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type;
105 tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info);
106 if (qdf_unlikely(!tx_desc)) {
107 /*
108 * If TSO packet, free associated
109 * remaining TSO segment descriptors
110 */
111 if (qdf_nbuf_is_tso(msdu))
112 ol_free_remaining_tso_segs(
113 vdev, msdu_info, true);
114 TXRX_STATS_MSDU_LIST_INCR(
115 pdev, tx.dropped.host_reject, msdu);
116 return NULL;
117 }
118
119 return tx_desc;
120}
121
122qdf_nbuf_t
123ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
124 enum ol_tx_spec tx_spec,
125 qdf_nbuf_t msdu_list)
126{
127 qdf_nbuf_t msdu = msdu_list;
128 htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
129 struct ol_txrx_msdu_info_t msdu_info;
130
131 msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
132 msdu_info.htt.action.tx_comp_req = 0;
133
134 /*
135 * The msdu_list variable could be used instead of the msdu var,
136 * but just to clarify which operations are done on a single MSDU
137 * vs. a list of MSDUs, use a distinct variable for single MSDUs
138 * within the list.
139 */
140 while (msdu) {
141 qdf_nbuf_t next;
142 struct ol_tx_desc_t *tx_desc = NULL;
143
144 msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
145 msdu_info.peer = NULL;
146 msdu_info.tso_info.is_tso = 0;
147
148 tx_desc = ol_tx_prepare_ll(vdev, msdu, &msdu_info);
149 if (!tx_desc)
150 return msdu;
151
152 /*
153 * The netbuf may get linked into a different list inside the
154 * ol_tx_send function, so store the next pointer before the
155 * tx_send call.
156 */
157 next = qdf_nbuf_next(msdu);
158
159 if (tx_spec != OL_TX_SPEC_STD) {
160 if (tx_spec & OL_TX_SPEC_NO_FREE) {
161 tx_desc->pkt_type = OL_TX_FRM_NO_FREE;
162 } else if (tx_spec & OL_TX_SPEC_TSO) {
163 tx_desc->pkt_type = OL_TX_FRM_TSO;
164 } else if (tx_spec & OL_TX_SPEC_NWIFI_NO_ENCRYPT) {
165 uint8_t sub_type =
166 ol_txrx_tx_raw_subtype(tx_spec);
167 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
168 htt_pkt_type_native_wifi,
169 sub_type);
170 } else if (ol_txrx_tx_is_raw(tx_spec)) {
171 /* different types of raw frames */
172 uint8_t sub_type =
173 ol_txrx_tx_raw_subtype(tx_spec);
174 htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
175 htt_pkt_type_raw, sub_type);
176 }
177 }
178 /*
179 * If debug display is enabled, show the meta-data being
180 * downloaded to the target via the HTT tx descriptor.
181 */
182 htt_tx_desc_display(tx_desc->htt_tx_desc);
183 ol_tx_send(vdev->pdev, tx_desc, msdu, vdev->vdev_id);
184 msdu = next;
185 }
186 return NULL; /* all MSDUs were accepted */
187}
188
189#if defined(HELIUMPLUS)
190void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
191{
192 uint32_t *frag_ptr_i_p;
193 int i;
194
195 ol_txrx_err("OL TX Descriptor 0x%pK msdu_id %d\n",
196 tx_desc, tx_desc->id);
197 ol_txrx_err("HTT TX Descriptor vaddr: 0x%pK paddr: %pad",
198 tx_desc->htt_tx_desc, &tx_desc->htt_tx_desc_paddr);
199 ol_txrx_err("Fragment Descriptor 0x%pK (paddr=%pad)",
200 tx_desc->htt_frag_desc, &tx_desc->htt_frag_desc_paddr);
201
202 /*
203 * it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
204 * is already de-referrable (=> in virtual address space)
205 */
206 frag_ptr_i_p = tx_desc->htt_frag_desc;
207
208 /* Dump 6 words of TSO flags */
209 print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
210 DUMP_PREFIX_NONE, 8, 4,
211 frag_ptr_i_p, 24, true);
212
213 frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
214
215 i = 0;
216 while (*frag_ptr_i_p) {
217 print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
218 DUMP_PREFIX_NONE, 8, 4,
219 frag_ptr_i_p, 8, true);
220 i++;
221 if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
222 break;
223 /* jump to next pointer - skip length */
224 frag_ptr_i_p += 2;
225 }
226}
227#endif /* HELIUMPLUS */
228
229struct ol_tx_desc_t *
230ol_txrx_mgmt_tx_desc_alloc(
231 struct ol_txrx_pdev_t *pdev,
232 struct ol_txrx_vdev_t *vdev,
233 qdf_nbuf_t tx_mgmt_frm,
234 struct ol_txrx_msdu_info_t *tx_msdu_info)
235{
236 struct ol_tx_desc_t *tx_desc;
237
238 /* For LL tx_comp_req is not used so initialized to 0 */
239 tx_msdu_info->htt.action.tx_comp_req = 0;
240 tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
241 /* FIX THIS -
242 * The FW currently has trouble using the host's fragments table
243 * for management frames. Until this is fixed, rather than
244 * specifying the fragment table to the FW, specify just the
245 * address of the initial fragment.
246 */
247#if defined(HELIUMPLUS)
248 /* ol_txrx_dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
249 * tx_desc);
250 */
251#endif /* defined(HELIUMPLUS) */
252 if (tx_desc) {
253 /*
254 * Following the call to ol_tx_desc_ll, frag 0 is the
255 * HTT tx HW descriptor, and the frame payload is in
256 * frag 1.
257 */
258 htt_tx_desc_frags_table_set(
259 pdev->htt_pdev,
260 tx_desc->htt_tx_desc,
261 qdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
262 0, 0);
263#if defined(HELIUMPLUS) && defined(HELIUMPLUS_DEBUG)
264 ol_txrx_dump_frag_desc(
265 "after htt_tx_desc_frags_table_set",
266 tx_desc);
267#endif /* defined(HELIUMPLUS) */
268 }
269
270 return tx_desc;
271}
272
273int ol_txrx_mgmt_send_frame(
274 struct ol_txrx_vdev_t *vdev,
275 struct ol_tx_desc_t *tx_desc,
276 qdf_nbuf_t tx_mgmt_frm,
277 struct ol_txrx_msdu_info_t *tx_msdu_info,
278 uint16_t chanfreq)
279{
280 struct ol_txrx_pdev_t *pdev = vdev->pdev;
281
282 htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
283 QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
284 QDF_NBUF_TX_PKT_MGMT_TRACK;
285 ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
286 htt_pkt_type_mgmt);
287
288 return 0;
289}
290
291#if defined(FEATURE_TSO)
292void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
293 struct ol_txrx_msdu_info_t *msdu_info,
294 bool is_tso_seg_mapping_done)
295{
296 struct qdf_tso_seg_elem_t *next_seg;
297 struct qdf_tso_seg_elem_t *free_seg = msdu_info->tso_info.curr_seg;
298 struct ol_txrx_pdev_t *pdev;
299 bool is_last_seg = false;
300
301 if (qdf_unlikely(!vdev)) {
302 ol_txrx_err("vdev is null");
303 return;
304 }
305
306 pdev = vdev->pdev;
307 if (qdf_unlikely(!pdev)) {
308 ol_txrx_err("pdev is null");
309 return;
310 }
311
312 /*
313 * TSO segment are mapped already, therefore,
314 * 1. unmap the tso segments,
315 * 2. free tso num segment if it is a last segment, and
316 * 3. free the tso segments.
317 */
318
319 if (is_tso_seg_mapping_done) {
320 struct qdf_tso_num_seg_elem_t *tso_num_desc =
321 msdu_info->tso_info.tso_num_seg_list;
322
323 if (qdf_unlikely(!tso_num_desc)) {
324 ol_txrx_err("TSO common info is NULL!");
325 return;
326 }
327
328 while (free_seg) {
329 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
330 tso_num_desc->num_seg.tso_cmn_num_seg--;
331
332 is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg ==
333 0) ? true : false;
334 qdf_nbuf_unmap_tso_segment(pdev->osdev, free_seg,
335 is_last_seg);
336 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
337
338 if (is_last_seg) {
339 ol_tso_num_seg_free(pdev,
340 msdu_info->tso_info.
341 tso_num_seg_list);
342 msdu_info->tso_info.tso_num_seg_list = NULL;
343 }
344
345 next_seg = free_seg->next;
346 free_seg->force_free = 1;
347 ol_tso_free_segment(pdev, free_seg);
348 free_seg = next_seg;
349 }
350 } else {
351 /*
352 * TSO segment are not mapped therefore,
353 * free the tso segments only.
354 */
355 while (free_seg) {
356 next_seg = free_seg->next;
357 free_seg->force_free = 1;
358 ol_tso_free_segment(pdev, free_seg);
359 free_seg = next_seg;
360 }
361 }
362}
363
364/**
365 * ol_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO
366 * related information in the msdu_info meta data
367 * @vdev: virtual device handle
368 * @msdu: network buffer
369 * @msdu_info: meta data associated with the msdu
370 *
371 * Return: 0 - success, >0 - error
372 */
373uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
374 qdf_nbuf_t msdu,
375 struct ol_txrx_msdu_info_t *msdu_info)
376{
377 msdu_info->tso_info.curr_seg = NULL;
378 if (qdf_nbuf_is_tso(msdu)) {
379 int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
380 struct qdf_tso_num_seg_elem_t *tso_num_seg;
381
382 msdu_info->tso_info.tso_num_seg_list = NULL;
383 msdu_info->tso_info.tso_seg_list = NULL;
384 msdu_info->tso_info.num_segs = num_seg;
385 while (num_seg) {
386 struct qdf_tso_seg_elem_t *tso_seg =
387 ol_tso_alloc_segment(vdev->pdev);
388 if (tso_seg) {
389 qdf_tso_seg_dbg_record(tso_seg,
390 TSOSEG_LOC_PREPARETSO);
391 tso_seg->next =
392 msdu_info->tso_info.tso_seg_list;
393 msdu_info->tso_info.tso_seg_list
394 = tso_seg;
395 num_seg--;
396 } else {
397 /* Free above alocated TSO segements till now */
398 msdu_info->tso_info.curr_seg =
399 msdu_info->tso_info.tso_seg_list;
400 ol_free_remaining_tso_segs(vdev, msdu_info,
401 false);
402 return 1;
403 }
404 }
405 tso_num_seg = ol_tso_num_seg_alloc(vdev->pdev);
406 if (tso_num_seg) {
407 tso_num_seg->next = msdu_info->tso_info.
408 tso_num_seg_list;
409 msdu_info->tso_info.tso_num_seg_list = tso_num_seg;
410 } else {
411 /* Free the already allocated num of segments */
412 msdu_info->tso_info.curr_seg =
413 msdu_info->tso_info.tso_seg_list;
414 ol_free_remaining_tso_segs(vdev, msdu_info, false);
415 return 1;
416 }
417
418 if (qdf_unlikely(!qdf_nbuf_get_tso_info(vdev->pdev->osdev,
419 msdu, &msdu_info->tso_info))) {
420 /* Free the already allocated num of segments */
421 msdu_info->tso_info.curr_seg =
422 msdu_info->tso_info.tso_seg_list;
423 ol_free_remaining_tso_segs(vdev, msdu_info, false);
424 return 1;
425 }
426
427 msdu_info->tso_info.curr_seg =
428 msdu_info->tso_info.tso_seg_list;
429 num_seg = msdu_info->tso_info.num_segs;
430 } else {
431 msdu_info->tso_info.is_tso = 0;
432 msdu_info->tso_info.num_segs = 1;
433 }
434 return 0;
435}
436
437/**
438 * ol_tx_tso_update_stats() - update TSO stats
439 * @pdev: pointer to ol_txrx_pdev_t structure
440 * @msdu_info: tso msdu_info for the msdu
441 * @msdu: tso mdsu for which stats are updated
442 * @tso_msdu_idx: stats index in the global TSO stats array where stats will be
443 * updated
444 *
445 * Return: None
446 */
447void ol_tx_tso_update_stats(struct ol_txrx_pdev_t *pdev,
448 struct qdf_tso_info_t *tso_info, qdf_nbuf_t msdu,
449 uint32_t tso_msdu_idx)
450{
451 TXRX_STATS_TSO_HISTOGRAM(pdev, tso_info->num_segs);
452 TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, tso_msdu_idx,
453 qdf_nbuf_tcp_tso_size(msdu));
454 TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev,
455 tso_msdu_idx, qdf_nbuf_len(msdu));
456 TXRX_STATS_TSO_NUM_FRAGS_UPDATE(pdev, tso_msdu_idx,
457 qdf_nbuf_get_nr_frags(msdu));
458}
459
460/**
461 * ol_tx_tso_get_stats_idx() - retrieve global TSO stats index and increment it
462 * @pdev: pointer to ol_txrx_pdev_t structure
463 *
464 * Retrieve the current value of the global variable and increment it. This is
465 * done in a spinlock as the global TSO stats may be accessed in parallel by
466 * multiple TX streams.
467 *
468 * Return: The current value of TSO stats index.
469 */
470uint32_t ol_tx_tso_get_stats_idx(struct ol_txrx_pdev_t *pdev)
471{
472 uint32_t msdu_stats_idx = 0;
473
474 qdf_spin_lock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
475 msdu_stats_idx = pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx;
476 pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx++;
477 pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx &=
478 NUM_MAX_TSO_MSDUS_MASK;
479 qdf_spin_unlock_bh(&pdev->stats.pub.tx.tso.tso_stats_lock);
480
481 TXRX_STATS_TSO_RESET_MSDU(pdev, msdu_stats_idx);
482
483 return msdu_stats_idx;
484}
485
486/**
487 * ol_tso_seg_list_init() - function to initialise the tso seg freelist
488 * @pdev: the data physical device sending the data
489 * @num_seg: number of segments needs to be intialised
490 *
491 * Return: none
492 */
493void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
494{
495 int i = 0;
496 struct qdf_tso_seg_elem_t *c_element;
497
498 /* Host should not allocate any c_element. */
499 if (num_seg <= 0) {
500 ol_txrx_err("Pool size passed is 0");
501 QDF_BUG(0);
502 pdev->tso_seg_pool.pool_size = i;
503 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
504 return;
505 }
506
507 c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
508 pdev->tso_seg_pool.freelist = c_element;
509 for (i = 0; i < (num_seg - 1); i++) {
510 if (qdf_unlikely(!c_element)) {
511 ol_txrx_err("c_element NULL for seg %d", i);
512 QDF_BUG(0);
513 pdev->tso_seg_pool.pool_size = i;
514 pdev->tso_seg_pool.num_free = i;
515 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
516 return;
517 }
518 /* set the freelist bit and magic cookie*/
519 c_element->on_freelist = 1;
520 c_element->cookie = TSO_SEG_MAGIC_COOKIE;
521#ifdef TSOSEG_DEBUG
522 c_element->dbg.txdesc = NULL;
523 qdf_atomic_init(&c_element->dbg.cur); /* history empty */
524 qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT1);
525#endif /* TSOSEG_DEBUG */
526 c_element->next =
527 qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
528 c_element = c_element->next;
529 }
530 /*
531 * NULL check for the last c_element of the list or
532 * first c_element if num_seg is equal to 1.
533 */
534 if (qdf_unlikely(!c_element)) {
535 ol_txrx_err("c_element NULL for seg %d", i);
536 QDF_BUG(0);
537 pdev->tso_seg_pool.pool_size = i;
538 pdev->tso_seg_pool.num_free = i;
539 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
540 return;
541 }
542 c_element->on_freelist = 1;
543 c_element->cookie = TSO_SEG_MAGIC_COOKIE;
544#ifdef TSOSEG_DEBUG
545 qdf_tso_seg_dbg_init(c_element);
546 qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT2);
547#endif /* TSOSEG_DEBUG */
548 c_element->next = NULL;
549 pdev->tso_seg_pool.pool_size = num_seg;
550 pdev->tso_seg_pool.num_free = num_seg;
551 qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
552}
553
554/**
555 * ol_tso_seg_list_deinit() - function to de-initialise the tso seg freelist
556 * @pdev: the data physical device sending the data
557 *
558 * Return: none
559 */
560void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
561{
562 int i;
563 struct qdf_tso_seg_elem_t *c_element;
564 struct qdf_tso_seg_elem_t *temp;
565
566 /* pool size 0 implies that tso seg list is not initialised*/
567 if (!pdev->tso_seg_pool.freelist &&
568 pdev->tso_seg_pool.pool_size == 0)
569 return;
570
571 qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
572 c_element = pdev->tso_seg_pool.freelist;
573 i = pdev->tso_seg_pool.pool_size;
574
575 pdev->tso_seg_pool.freelist = NULL;
576 pdev->tso_seg_pool.num_free = 0;
577 pdev->tso_seg_pool.pool_size = 0;
578
579 qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
580 qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
581
582 while (i-- > 0 && c_element) {
583 temp = c_element->next;
584 if (c_element->on_freelist != 1) {
585 qdf_tso_seg_dbg_bug("seg already freed (double?)");
586 return;
587 } else if (c_element->cookie != TSO_SEG_MAGIC_COOKIE) {
588 qdf_tso_seg_dbg_bug("seg cookie is bad (corruption?)");
589 return;
590 }
591 /* free this seg, so reset the cookie value*/
592 c_element->cookie = 0;
593 qdf_mem_free(c_element);
594 c_element = temp;
595 }
596}
597
598/**
599 * ol_tso_num_seg_list_init() - function to initialise the freelist of elements
600 * use to count the num of tso segments in jumbo
601 * skb packet freelist
602 * @pdev: the data physical device sending the data
603 * @num_seg: number of elements needs to be intialised
604 *
605 * Return: none
606 */
607void ol_tso_num_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
608{
609 int i = 0;
610 struct qdf_tso_num_seg_elem_t *c_element;
611
612 /* Host should not allocate any c_element. */
613 if (num_seg <= 0) {
614 ol_txrx_err("Pool size passed is 0");
615 QDF_BUG(0);
616 pdev->tso_num_seg_pool.num_seg_pool_size = i;
617 qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
618 return;
619 }
620
621 c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
622 pdev->tso_num_seg_pool.freelist = c_element;
623 for (i = 0; i < (num_seg - 1); i++) {
624 if (qdf_unlikely(!c_element)) {
625 ol_txrx_err("c_element NULL for num of seg %d", i);
626 QDF_BUG(0);
627 pdev->tso_num_seg_pool.num_seg_pool_size = i;
628 pdev->tso_num_seg_pool.num_free = i;
629 qdf_spinlock_create(&pdev->tso_num_seg_pool.
630 tso_num_seg_mutex);
631 return;
632 }
633 c_element->next =
634 qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
635 c_element = c_element->next;
636 }
637 /*
638 * NULL check for the last c_element of the list or
639 * first c_element if num_seg is equal to 1.
640 */
641 if (qdf_unlikely(!c_element)) {
642 ol_txrx_err("c_element NULL for num of seg %d", i);
643 QDF_BUG(0);
644 pdev->tso_num_seg_pool.num_seg_pool_size = i;
645 pdev->tso_num_seg_pool.num_free = i;
646 qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
647 return;
648 }
649 c_element->next = NULL;
650 pdev->tso_num_seg_pool.num_seg_pool_size = num_seg;
651 pdev->tso_num_seg_pool.num_free = num_seg;
652 qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
653}
654
655/**
656 * ol_tso_num_seg_list_deinit() - function to de-initialise the freelist of
657 * elements use to count the num of tso segment
658 * in a jumbo skb packet freelist
659 * @pdev: the data physical device sending the data
660 *
661 * Return: none
662 */
663void ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
664{
665 int i;
666 struct qdf_tso_num_seg_elem_t *c_element;
667 struct qdf_tso_num_seg_elem_t *temp;
668
669 /* pool size 0 implies that tso num seg list is not initialised*/
670 if (!pdev->tso_num_seg_pool.freelist &&
671 pdev->tso_num_seg_pool.num_seg_pool_size == 0)
672 return;
673
674 qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
675 c_element = pdev->tso_num_seg_pool.freelist;
676 i = pdev->tso_num_seg_pool.num_seg_pool_size;
677
678 pdev->tso_num_seg_pool.freelist = NULL;
679 pdev->tso_num_seg_pool.num_free = 0;
680 pdev->tso_num_seg_pool.num_seg_pool_size = 0;
681
682 qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
683 qdf_spinlock_destroy(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
684
685 while (i-- > 0 && c_element) {
686 temp = c_element->next;
687 qdf_mem_free(c_element);
688 c_element = temp;
689 }
690}
691#endif /* FEATURE_TSO */
692
693#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
694void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)
695{
696 qdf_spinlock_create(&pdev->stats.pub.tx.tso.tso_stats_lock);
697}
698
699void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)
700{
701 qdf_spinlock_destroy(&pdev->stats.pub.tx.tso.tso_stats_lock);
702}
703
704void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
705{
706 int msdu_idx;
707 int seg_idx;
708
Nirav Shahe6194ac2018-07-13 11:04:41 +0530709 txrx_nofl_info("TSO Statistics:");
710 txrx_nofl_info("TSO pkts %lld, bytes %lld\n",
711 pdev->stats.pub.tx.tso.tso_pkts.pkts,
712 pdev->stats.pub.tx.tso.tso_pkts.bytes);
Nirav Shah52d85aa2018-04-26 14:03:00 +0530713
Nirav Shahe6194ac2018-07-13 11:04:41 +0530714 txrx_nofl_info("TSO Histogram for numbers of segments:\n"
715 "Single segment %d\n"
716 " 2-5 segments %d\n"
717 " 6-10 segments %d\n"
718 "11-15 segments %d\n"
719 "16-20 segments %d\n"
720 " 20+ segments %d\n",
721 pdev->stats.pub.tx.tso.tso_hist.pkts_1,
722 pdev->stats.pub.tx.tso.tso_hist.pkts_2_5,
723 pdev->stats.pub.tx.tso.tso_hist.pkts_6_10,
724 pdev->stats.pub.tx.tso.tso_hist.pkts_11_15,
725 pdev->stats.pub.tx.tso.tso_hist.pkts_16_20,
726 pdev->stats.pub.tx.tso.tso_hist.pkts_20_plus);
Nirav Shah52d85aa2018-04-26 14:03:00 +0530727
Nirav Shahe6194ac2018-07-13 11:04:41 +0530728 txrx_nofl_info("TSO History Buffer: Total size %d, current_index %d",
729 NUM_MAX_TSO_MSDUS,
730 TXRX_STATS_TSO_MSDU_IDX(pdev));
Nirav Shah52d85aa2018-04-26 14:03:00 +0530731
732 for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
733 if (TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx) == 0)
734 continue;
Nirav Shahe6194ac2018-07-13 11:04:41 +0530735 txrx_nofl_info("jumbo pkt idx: %d num segs %d gso_len %d total_len %d nr_frags %d",
736 msdu_idx,
737 TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx),
738 TXRX_STATS_TSO_MSDU_GSO_SIZE(pdev, msdu_idx),
739 TXRX_STATS_TSO_MSDU_TOTAL_LEN(pdev, msdu_idx),
740 TXRX_STATS_TSO_MSDU_NR_FRAGS(pdev, msdu_idx));
Nirav Shah52d85aa2018-04-26 14:03:00 +0530741
742 for (seg_idx = 0;
743 ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev,
744 msdu_idx)) && (seg_idx < NUM_MAX_TSO_SEGS));
745 seg_idx++) {
746 struct qdf_tso_seg_t tso_seg =
747 TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
748
Nirav Shahe6194ac2018-07-13 11:04:41 +0530749 txrx_nofl_info("seg idx: %d", seg_idx);
750 txrx_nofl_info("tso_enable: %d",
751 tso_seg.tso_flags.tso_enable);
752 txrx_nofl_info("fin %d syn %d rst %d psh %d ack %d urg %d ece %d cwr %d ns %d",
753 tso_seg.tso_flags.fin,
754 tso_seg.tso_flags.syn,
755 tso_seg.tso_flags.rst,
756 tso_seg.tso_flags.psh,
757 tso_seg.tso_flags.ack,
758 tso_seg.tso_flags.urg,
759 tso_seg.tso_flags.ece,
760 tso_seg.tso_flags.cwr,
761 tso_seg.tso_flags.ns);
762 txrx_nofl_info("tcp_seq_num: 0x%x ip_id: %d",
763 tso_seg.tso_flags.tcp_seq_num,
764 tso_seg.tso_flags.ip_id);
Nirav Shah52d85aa2018-04-26 14:03:00 +0530765 }
766 }
767}
768
769void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
770{
771 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_pkts,
772 sizeof(struct ol_txrx_stats_elem));
773#if defined(FEATURE_TSO)
774 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_info,
775 sizeof(struct ol_txrx_stats_tso_info));
776 qdf_mem_zero(&pdev->stats.pub.tx.tso.tso_hist,
777 sizeof(struct ol_txrx_tso_histogram));
778#endif
779}
780#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */