Prakash Dhavali | 7090c5f | 2015-11-02 17:55:19 -0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * Previously licensed under the ISC license by Qualcomm Atheros, Inc. |
| 5 | * |
| 6 | * |
| 7 | * Permission to use, copy, modify, and/or distribute this software for |
| 8 | * any purpose with or without fee is hereby granted, provided that the |
| 9 | * above copyright notice and this permission notice appear in all |
| 10 | * copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 13 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 14 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE |
| 15 | * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL |
| 16 | * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR |
| 17 | * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
| 18 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
| 19 | * PERFORMANCE OF THIS SOFTWARE. |
| 20 | */ |
| 21 | |
| 22 | /* |
| 23 | * This file was originally distributed by Qualcomm Atheros, Inc. |
| 24 | * under proprietary terms before Copyright ownership was assigned |
| 25 | * to the Linux Foundation. |
| 26 | */ |
| 27 | |
| 28 | /** |
| 29 | * DOC: cdf_nbuf.c |
| 30 | * |
| 31 | * Connectivity driver framework(CDF) network buffer management APIs |
| 32 | */ |
| 33 | |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/version.h> |
| 36 | #include <linux/skbuff.h> |
| 37 | #include <linux/module.h> |
| 38 | #include <cdf_types.h> |
| 39 | #include <cdf_nbuf.h> |
| 40 | #include <cdf_memory.h> |
| 41 | #include <cdf_trace.h> |
| 42 | #include <cdf_status.h> |
| 43 | #include <cdf_lock.h> |
| 44 | |
| 45 | #if defined(FEATURE_TSO) |
| 46 | #include <net/ipv6.h> |
| 47 | #include <linux/ipv6.h> |
| 48 | #include <linux/tcp.h> |
| 49 | #include <linux/if_vlan.h> |
| 50 | #include <linux/ip.h> |
| 51 | #endif /* FEATURE_TSO */ |
| 52 | |
| 53 | /* Packet Counter */ |
| 54 | static uint32_t nbuf_tx_mgmt[NBUF_TX_PKT_STATE_MAX]; |
| 55 | static uint32_t nbuf_tx_data[NBUF_TX_PKT_STATE_MAX]; |
| 56 | |
| 57 | /** |
| 58 | * cdf_nbuf_tx_desc_count_display() - Displays the packet counter |
| 59 | * |
| 60 | * Return: none |
| 61 | */ |
| 62 | void cdf_nbuf_tx_desc_count_display(void) |
| 63 | { |
| 64 | cdf_print("Current Snapshot of the Driver:\n"); |
| 65 | cdf_print("Data Packets:\n"); |
| 66 | cdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d", |
| 67 | nbuf_tx_data[NBUF_TX_PKT_HDD] - |
| 68 | (nbuf_tx_data[NBUF_TX_PKT_TXRX] + |
| 69 | nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] - |
| 70 | nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE]), |
| 71 | nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] - |
| 72 | nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE], |
| 73 | nbuf_tx_data[NBUF_TX_PKT_TXRX] - nbuf_tx_data[NBUF_TX_PKT_HTT], |
| 74 | nbuf_tx_data[NBUF_TX_PKT_HTT] - nbuf_tx_data[NBUF_TX_PKT_HTC]); |
| 75 | cdf_print(" HTC %d HIF %d CE %d TX_COMP %d\n", |
| 76 | nbuf_tx_data[NBUF_TX_PKT_HTC] - nbuf_tx_data[NBUF_TX_PKT_HIF], |
| 77 | nbuf_tx_data[NBUF_TX_PKT_HIF] - nbuf_tx_data[NBUF_TX_PKT_CE], |
| 78 | nbuf_tx_data[NBUF_TX_PKT_CE] - nbuf_tx_data[NBUF_TX_PKT_FREE], |
| 79 | nbuf_tx_data[NBUF_TX_PKT_FREE]); |
| 80 | cdf_print("Mgmt Packets:\n"); |
| 81 | cdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n", |
| 82 | nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_ENQUEUE] - |
| 83 | nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_DEQUEUE], |
| 84 | nbuf_tx_mgmt[NBUF_TX_PKT_TXRX] - nbuf_tx_mgmt[NBUF_TX_PKT_HTT], |
| 85 | nbuf_tx_mgmt[NBUF_TX_PKT_HTT] - nbuf_tx_mgmt[NBUF_TX_PKT_HTC], |
| 86 | nbuf_tx_mgmt[NBUF_TX_PKT_HTC] - nbuf_tx_mgmt[NBUF_TX_PKT_HIF], |
| 87 | nbuf_tx_mgmt[NBUF_TX_PKT_HIF] - nbuf_tx_mgmt[NBUF_TX_PKT_CE], |
| 88 | nbuf_tx_mgmt[NBUF_TX_PKT_CE] - nbuf_tx_mgmt[NBUF_TX_PKT_FREE], |
| 89 | nbuf_tx_mgmt[NBUF_TX_PKT_FREE]); |
| 90 | } |
| 91 | |
| 92 | /** |
| 93 | * cdf_nbuf_tx_desc_count_update() - Updates the layer packet counter |
| 94 | * @packet_type : packet type either mgmt/data |
| 95 | * @current_state : layer at which the packet currently present |
| 96 | * |
| 97 | * Return: none |
| 98 | */ |
| 99 | static inline void cdf_nbuf_tx_desc_count_update(uint8_t packet_type, |
| 100 | uint8_t current_state) |
| 101 | { |
| 102 | switch (packet_type) { |
| 103 | case NBUF_TX_PKT_MGMT_TRACK: |
| 104 | nbuf_tx_mgmt[current_state]++; |
| 105 | break; |
| 106 | case NBUF_TX_PKT_DATA_TRACK: |
| 107 | nbuf_tx_data[current_state]++; |
| 108 | break; |
| 109 | default: |
| 110 | break; |
| 111 | } |
| 112 | } |
| 113 | |
| 114 | /** |
| 115 | * cdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt |
| 116 | * |
| 117 | * Return: none |
| 118 | */ |
| 119 | void cdf_nbuf_tx_desc_count_clear(void) |
| 120 | { |
| 121 | memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt)); |
| 122 | memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data)); |
| 123 | } |
| 124 | |
| 125 | /** |
| 126 | * cdf_nbuf_set_state() - Updates the packet state |
| 127 | * @nbuf: network buffer |
| 128 | * @current_state : layer at which the packet currently is |
| 129 | * |
| 130 | * This function updates the packet state to the layer at which the packet |
| 131 | * currently is |
| 132 | * |
| 133 | * Return: none |
| 134 | */ |
| 135 | void cdf_nbuf_set_state(cdf_nbuf_t nbuf, uint8_t current_state) |
| 136 | { |
| 137 | /* |
| 138 | * Only Mgmt, Data Packets are tracked. WMI messages |
| 139 | * such as scan commands are not tracked |
| 140 | */ |
| 141 | uint8_t packet_type; |
| 142 | packet_type = NBUF_GET_PACKET_TRACK(nbuf); |
| 143 | |
| 144 | if ((packet_type != NBUF_TX_PKT_DATA_TRACK) && |
| 145 | (packet_type != NBUF_TX_PKT_MGMT_TRACK)) { |
| 146 | return; |
| 147 | } |
| 148 | NBUF_SET_PACKET_STATE(nbuf, current_state); |
| 149 | cdf_nbuf_tx_desc_count_update(packet_type, |
| 150 | current_state); |
| 151 | } |
| 152 | |
| 153 | cdf_nbuf_trace_update_t trace_update_cb = NULL; |
| 154 | |
| 155 | /** |
| 156 | * __cdf_nbuf_alloc() - Allocate nbuf |
| 157 | * @hdl: Device handle |
| 158 | * @size: Netbuf requested size |
| 159 | * @reserve: Reserve |
| 160 | * @align: Align |
| 161 | * @prio: Priority |
| 162 | * |
| 163 | * This allocates an nbuf aligns if needed and reserves some space in the front, |
| 164 | * since the reserve is done after alignment the reserve value if being |
| 165 | * unaligned will result in an unaligned address. |
| 166 | * |
| 167 | * Return: nbuf or %NULL if no memory |
| 168 | */ |
| 169 | struct sk_buff *__cdf_nbuf_alloc(cdf_device_t osdev, size_t size, int reserve, |
| 170 | int align, int prio) |
| 171 | { |
| 172 | struct sk_buff *skb; |
| 173 | unsigned long offset; |
| 174 | |
| 175 | if (align) |
| 176 | size += (align - 1); |
| 177 | |
| 178 | skb = dev_alloc_skb(size); |
| 179 | |
| 180 | if (!skb) { |
| 181 | pr_err("ERROR:NBUF alloc failed\n"); |
| 182 | return NULL; |
| 183 | } |
| 184 | memset(skb->cb, 0x0, sizeof(skb->cb)); |
| 185 | |
| 186 | /* |
| 187 | * The default is for netbuf fragments to be interpreted |
| 188 | * as wordstreams rather than bytestreams. |
| 189 | * Set the CVG_NBUF_MAX_EXTRA_FRAGS+1 wordstream_flags bits, |
| 190 | * to provide this default. |
| 191 | */ |
| 192 | NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) = |
| 193 | (1 << (CVG_NBUF_MAX_EXTRA_FRAGS + 1)) - 1; |
| 194 | |
| 195 | /* |
| 196 | * XXX:how about we reserve first then align |
| 197 | * Align & make sure that the tail & data are adjusted properly |
| 198 | */ |
| 199 | |
| 200 | if (align) { |
| 201 | offset = ((unsigned long)skb->data) % align; |
| 202 | if (offset) |
| 203 | skb_reserve(skb, align - offset); |
| 204 | } |
| 205 | |
| 206 | /* |
| 207 | * NOTE:alloc doesn't take responsibility if reserve unaligns the data |
| 208 | * pointer |
| 209 | */ |
| 210 | skb_reserve(skb, reserve); |
| 211 | |
| 212 | return skb; |
| 213 | } |
| 214 | |
| 215 | /** |
| 216 | * __cdf_nbuf_free() - free the nbuf its interrupt safe |
| 217 | * @skb: Pointer to network buffer |
| 218 | * |
| 219 | * Return: none |
| 220 | */ |
| 221 | void __cdf_nbuf_free(struct sk_buff *skb) |
| 222 | { |
| 223 | if ((NBUF_OWNER_ID(skb) == IPA_NBUF_OWNER_ID) && NBUF_CALLBACK_FN(skb)) |
| 224 | NBUF_CALLBACK_FN_EXEC(skb); |
| 225 | else |
| 226 | dev_kfree_skb_any(skb); |
| 227 | } |
| 228 | |
| 229 | /** |
| 230 | * __cdf_nbuf_map() - get the dma map of the nbuf |
| 231 | * @osdev: OS device |
| 232 | * @bmap: Bitmap |
| 233 | * @skb: Pointer to network buffer |
| 234 | * @dir: Direction |
| 235 | * |
| 236 | * Return: CDF_STATUS |
| 237 | */ |
| 238 | CDF_STATUS |
| 239 | __cdf_nbuf_map(cdf_device_t osdev, struct sk_buff *skb, cdf_dma_dir_t dir) |
| 240 | { |
| 241 | #ifdef CDF_OS_DEBUG |
| 242 | struct skb_shared_info *sh = skb_shinfo(skb); |
| 243 | #endif |
| 244 | cdf_assert((dir == CDF_DMA_TO_DEVICE) |
| 245 | || (dir == CDF_DMA_FROM_DEVICE)); |
| 246 | |
| 247 | /* |
| 248 | * Assume there's only a single fragment. |
| 249 | * To support multiple fragments, it would be necessary to change |
| 250 | * cdf_nbuf_t to be a separate object that stores meta-info |
| 251 | * (including the bus address for each fragment) and a pointer |
| 252 | * to the underlying sk_buff. |
| 253 | */ |
| 254 | cdf_assert(sh->nr_frags == 0); |
| 255 | |
| 256 | return __cdf_nbuf_map_single(osdev, skb, dir); |
| 257 | |
| 258 | return CDF_STATUS_SUCCESS; |
| 259 | } |
| 260 | |
| 261 | /** |
| 262 | * __cdf_nbuf_unmap() - to unmap a previously mapped buf |
| 263 | * @osdev: OS device |
| 264 | * @skb: Pointer to network buffer |
| 265 | * @dir: Direction |
| 266 | * |
| 267 | * Return: none |
| 268 | */ |
| 269 | void |
| 270 | __cdf_nbuf_unmap(cdf_device_t osdev, struct sk_buff *skb, cdf_dma_dir_t dir) |
| 271 | { |
| 272 | cdf_assert((dir == CDF_DMA_TO_DEVICE) |
| 273 | || (dir == CDF_DMA_FROM_DEVICE)); |
| 274 | |
| 275 | cdf_assert(((dir == CDF_DMA_TO_DEVICE) |
| 276 | || (dir == CDF_DMA_FROM_DEVICE))); |
| 277 | /* |
| 278 | * Assume there's a single fragment. |
| 279 | * If this is not true, the assertion in __cdf_nbuf_map will catch it. |
| 280 | */ |
| 281 | __cdf_nbuf_unmap_single(osdev, skb, dir); |
| 282 | } |
| 283 | |
| 284 | /** |
| 285 | * __cdf_nbuf_map_single() - dma map of the nbuf |
| 286 | * @osdev: OS device |
| 287 | * @skb: Pointer to network buffer |
| 288 | * @dir: Direction |
| 289 | * |
| 290 | * Return: CDF_STATUS |
| 291 | */ |
| 292 | CDF_STATUS |
| 293 | __cdf_nbuf_map_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir) |
| 294 | { |
| 295 | uint32_t paddr_lo; |
| 296 | |
| 297 | /* tempory hack for simulation */ |
| 298 | #ifdef A_SIMOS_DEVHOST |
| 299 | NBUF_MAPPED_PADDR_LO(buf) = paddr_lo = (uint32_t) buf->data; |
| 300 | return CDF_STATUS_SUCCESS; |
| 301 | #else |
| 302 | /* assume that the OS only provides a single fragment */ |
| 303 | NBUF_MAPPED_PADDR_LO(buf) = paddr_lo = |
| 304 | dma_map_single(osdev->dev, buf->data, |
| 305 | skb_end_pointer(buf) - buf->data, dir); |
| 306 | return dma_mapping_error(osdev->dev, paddr_lo) ? |
| 307 | CDF_STATUS_E_FAILURE : CDF_STATUS_SUCCESS; |
| 308 | #endif /* #ifdef A_SIMOS_DEVHOST */ |
| 309 | } |
| 310 | |
| 311 | /** |
| 312 | * __cdf_nbuf_unmap_single() - dma unmap nbuf |
| 313 | * @osdev: OS device |
| 314 | * @skb: Pointer to network buffer |
| 315 | * @dir: Direction |
| 316 | * |
| 317 | * Return: none |
| 318 | */ |
| 319 | void |
| 320 | __cdf_nbuf_unmap_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir) |
| 321 | { |
| 322 | #if !defined(A_SIMOS_DEVHOST) |
| 323 | dma_unmap_single(osdev->dev, NBUF_MAPPED_PADDR_LO(buf), |
| 324 | skb_end_pointer(buf) - buf->data, dir); |
| 325 | #endif /* #if !defined(A_SIMOS_DEVHOST) */ |
| 326 | } |
| 327 | |
| 328 | /** |
| 329 | * __cdf_nbuf_set_rx_cksum() - set rx checksum |
| 330 | * @skb: Pointer to network buffer |
| 331 | * @cksum: Pointer to checksum value |
| 332 | * |
| 333 | * Return: CDF_STATUS |
| 334 | */ |
| 335 | CDF_STATUS |
| 336 | __cdf_nbuf_set_rx_cksum(struct sk_buff *skb, cdf_nbuf_rx_cksum_t *cksum) |
| 337 | { |
| 338 | switch (cksum->l4_result) { |
| 339 | case CDF_NBUF_RX_CKSUM_NONE: |
| 340 | skb->ip_summed = CHECKSUM_NONE; |
| 341 | break; |
| 342 | case CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: |
| 343 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 344 | break; |
| 345 | case CDF_NBUF_RX_CKSUM_TCP_UDP_HW: |
| 346 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 347 | skb->csum = cksum->val; |
| 348 | break; |
| 349 | default: |
| 350 | pr_err("ADF_NET:Unknown checksum type\n"); |
| 351 | cdf_assert(0); |
| 352 | return CDF_STATUS_E_NOSUPPORT; |
| 353 | } |
| 354 | return CDF_STATUS_SUCCESS; |
| 355 | } |
| 356 | |
| 357 | /** |
| 358 | * __cdf_nbuf_get_tx_cksum() - get tx checksum |
| 359 | * @skb: Pointer to network buffer |
| 360 | * |
| 361 | * Return: TX checksum value |
| 362 | */ |
| 363 | cdf_nbuf_tx_cksum_t __cdf_nbuf_get_tx_cksum(struct sk_buff *skb) |
| 364 | { |
| 365 | switch (skb->ip_summed) { |
| 366 | case CHECKSUM_NONE: |
| 367 | return CDF_NBUF_TX_CKSUM_NONE; |
| 368 | case CHECKSUM_PARTIAL: |
| 369 | /* XXX ADF and Linux checksum don't map with 1-to-1. This is |
| 370 | * not 100% correct */ |
| 371 | return CDF_NBUF_TX_CKSUM_TCP_UDP; |
| 372 | case CHECKSUM_COMPLETE: |
| 373 | return CDF_NBUF_TX_CKSUM_TCP_UDP_IP; |
| 374 | default: |
| 375 | return CDF_NBUF_TX_CKSUM_NONE; |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | /** |
| 380 | * __cdf_nbuf_get_tid() - get tid |
| 381 | * @skb: Pointer to network buffer |
| 382 | * |
| 383 | * Return: tid |
| 384 | */ |
| 385 | uint8_t __cdf_nbuf_get_tid(struct sk_buff *skb) |
| 386 | { |
| 387 | return skb->priority; |
| 388 | } |
| 389 | |
| 390 | /** |
| 391 | * __cdf_nbuf_set_tid() - set tid |
| 392 | * @skb: Pointer to network buffer |
| 393 | * |
| 394 | * Return: none |
| 395 | */ |
| 396 | void __cdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid) |
| 397 | { |
| 398 | skb->priority = tid; |
| 399 | } |
| 400 | |
| 401 | /** |
| 402 | * __cdf_nbuf_set_tid() - set tid |
| 403 | * @skb: Pointer to network buffer |
| 404 | * |
| 405 | * Return: none |
| 406 | */ |
| 407 | uint8_t __cdf_nbuf_get_exemption_type(struct sk_buff *skb) |
| 408 | { |
| 409 | return CDF_NBUF_EXEMPT_NO_EXEMPTION; |
| 410 | } |
| 411 | |
| 412 | /** |
| 413 | * __cdf_nbuf_reg_trace_cb() - register trace callback |
| 414 | * @cb_func_ptr: Pointer to trace callback function |
| 415 | * |
| 416 | * Return: none |
| 417 | */ |
| 418 | void __cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr) |
| 419 | { |
| 420 | trace_update_cb = cb_func_ptr; |
| 421 | return; |
| 422 | } |
| 423 | |
| 424 | #ifdef QCA_PKT_PROTO_TRACE |
| 425 | /** |
| 426 | * __cdf_nbuf_trace_update() - update trace event |
| 427 | * @skb: Pointer to network buffer |
| 428 | * @event_string: Pointer to trace callback function |
| 429 | * |
| 430 | * Return: none |
| 431 | */ |
| 432 | void __cdf_nbuf_trace_update(struct sk_buff *buf, char *event_string) |
| 433 | { |
| 434 | char string_buf[NBUF_PKT_TRAC_MAX_STRING]; |
| 435 | |
| 436 | if ((!trace_update_cb) || (!event_string)) |
| 437 | return; |
| 438 | |
| 439 | if (!cdf_nbuf_trace_get_proto_type(buf)) |
| 440 | return; |
| 441 | |
| 442 | /* Buffer over flow */ |
| 443 | if (NBUF_PKT_TRAC_MAX_STRING <= |
| 444 | (cdf_str_len(event_string) + NBUF_PKT_TRAC_PROTO_STRING)) { |
| 445 | return; |
| 446 | } |
| 447 | |
| 448 | cdf_mem_zero(string_buf, NBUF_PKT_TRAC_MAX_STRING); |
| 449 | cdf_mem_copy(string_buf, event_string, cdf_str_len(event_string)); |
| 450 | if (NBUF_PKT_TRAC_TYPE_EAPOL & cdf_nbuf_trace_get_proto_type(buf)) { |
| 451 | cdf_mem_copy(string_buf + cdf_str_len(event_string), |
| 452 | "EPL", NBUF_PKT_TRAC_PROTO_STRING); |
| 453 | } else if (NBUF_PKT_TRAC_TYPE_DHCP & cdf_nbuf_trace_get_proto_type(buf)) { |
| 454 | cdf_mem_copy(string_buf + cdf_str_len(event_string), |
| 455 | "DHC", NBUF_PKT_TRAC_PROTO_STRING); |
| 456 | } else if (NBUF_PKT_TRAC_TYPE_MGMT_ACTION & |
| 457 | cdf_nbuf_trace_get_proto_type(buf)) { |
| 458 | cdf_mem_copy(string_buf + cdf_str_len(event_string), |
| 459 | "MACT", NBUF_PKT_TRAC_PROTO_STRING); |
| 460 | } |
| 461 | |
| 462 | trace_update_cb(string_buf); |
| 463 | return; |
| 464 | } |
| 465 | #endif /* QCA_PKT_PROTO_TRACE */ |
| 466 | |
| 467 | #ifdef MEMORY_DEBUG |
| 468 | #define CDF_NET_BUF_TRACK_MAX_SIZE (1024) |
| 469 | |
| 470 | /** |
| 471 | * struct cdf_nbuf_track_t - Network buffer track structure |
| 472 | * |
| 473 | * @p_next: Pointer to next |
| 474 | * @net_buf: Pointer to network buffer |
| 475 | * @file_name: File name |
| 476 | * @line_num: Line number |
| 477 | * @size: Size |
| 478 | */ |
| 479 | struct cdf_nbuf_track_t { |
| 480 | struct cdf_nbuf_track_t *p_next; |
| 481 | cdf_nbuf_t net_buf; |
| 482 | uint8_t *file_name; |
| 483 | uint32_t line_num; |
| 484 | size_t size; |
| 485 | }; |
| 486 | |
| 487 | spinlock_t g_cdf_net_buf_track_lock; |
| 488 | typedef struct cdf_nbuf_track_t CDF_NBUF_TRACK; |
| 489 | |
| 490 | CDF_NBUF_TRACK *gp_cdf_net_buf_track_tbl[CDF_NET_BUF_TRACK_MAX_SIZE]; |
| 491 | |
| 492 | /** |
| 493 | * cdf_net_buf_debug_init() - initialize network buffer debug functionality |
| 494 | * |
| 495 | * CDF network buffer debug feature tracks all SKBs allocated by WLAN driver |
| 496 | * in a hash table and when driver is unloaded it reports about leaked SKBs. |
| 497 | * WLAN driver module whose allocated SKB is freed by network stack are |
| 498 | * suppose to call cdf_net_buf_debug_release_skb() such that the SKB is not |
| 499 | * reported as memory leak. |
| 500 | * |
| 501 | * Return: none |
| 502 | */ |
| 503 | void cdf_net_buf_debug_init(void) |
| 504 | { |
| 505 | uint32_t i; |
| 506 | unsigned long irq_flag; |
| 507 | |
| 508 | spin_lock_init(&g_cdf_net_buf_track_lock); |
| 509 | |
| 510 | spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag); |
| 511 | |
| 512 | for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) |
| 513 | gp_cdf_net_buf_track_tbl[i] = NULL; |
| 514 | |
| 515 | spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag); |
| 516 | |
| 517 | return; |
| 518 | } |
| 519 | |
| 520 | /** |
| 521 | * cdf_net_buf_debug_init() - exit network buffer debug functionality |
| 522 | * |
| 523 | * Exit network buffer tracking debug functionality and log SKB memory leaks |
| 524 | * |
| 525 | * Return: none |
| 526 | */ |
| 527 | void cdf_net_buf_debug_exit(void) |
| 528 | { |
| 529 | uint32_t i; |
| 530 | unsigned long irq_flag; |
| 531 | CDF_NBUF_TRACK *p_node; |
| 532 | CDF_NBUF_TRACK *p_prev; |
| 533 | |
| 534 | spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag); |
| 535 | |
| 536 | for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) { |
| 537 | p_node = gp_cdf_net_buf_track_tbl[i]; |
| 538 | while (p_node) { |
| 539 | p_prev = p_node; |
| 540 | p_node = p_node->p_next; |
| 541 | CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL, |
| 542 | "SKB buf memory Leak@ File %s, @Line %d, size %zu\n", |
| 543 | p_prev->file_name, p_prev->line_num, |
| 544 | p_prev->size); |
| 545 | } |
| 546 | } |
| 547 | |
| 548 | spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag); |
| 549 | |
| 550 | return; |
| 551 | } |
| 552 | |
| 553 | /** |
| 554 | * cdf_net_buf_debug_clean() - clean up network buffer debug functionality |
| 555 | * |
| 556 | * Return: none |
| 557 | */ |
| 558 | void cdf_net_buf_debug_clean(void) |
| 559 | { |
| 560 | uint32_t i; |
| 561 | unsigned long irq_flag; |
| 562 | CDF_NBUF_TRACK *p_node; |
| 563 | CDF_NBUF_TRACK *p_prev; |
| 564 | |
| 565 | spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag); |
| 566 | |
| 567 | for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) { |
| 568 | p_node = gp_cdf_net_buf_track_tbl[i]; |
| 569 | while (p_node) { |
| 570 | p_prev = p_node; |
| 571 | p_node = p_node->p_next; |
| 572 | cdf_mem_free(p_prev); |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag); |
| 577 | |
| 578 | return; |
| 579 | } |
| 580 | |
| 581 | /** |
| 582 | * cdf_net_buf_debug_hash() - hash network buffer pointer |
| 583 | * |
| 584 | * Return: hash value |
| 585 | */ |
| 586 | uint32_t cdf_net_buf_debug_hash(cdf_nbuf_t net_buf) |
| 587 | { |
| 588 | uint32_t i; |
| 589 | |
| 590 | i = (uint32_t) ((uintptr_t) net_buf & (CDF_NET_BUF_TRACK_MAX_SIZE - 1)); |
| 591 | |
| 592 | return i; |
| 593 | } |
| 594 | |
| 595 | /** |
| 596 | * cdf_net_buf_debug_look_up() - look up network buffer in debug hash table |
| 597 | * |
| 598 | * Return: If skb is found in hash table then return pointer to network buffer |
| 599 | * else return %NULL |
| 600 | */ |
| 601 | CDF_NBUF_TRACK *cdf_net_buf_debug_look_up(cdf_nbuf_t net_buf) |
| 602 | { |
| 603 | uint32_t i; |
| 604 | CDF_NBUF_TRACK *p_node; |
| 605 | |
| 606 | i = cdf_net_buf_debug_hash(net_buf); |
| 607 | p_node = gp_cdf_net_buf_track_tbl[i]; |
| 608 | |
| 609 | while (p_node) { |
| 610 | if (p_node->net_buf == net_buf) |
| 611 | return p_node; |
| 612 | p_node = p_node->p_next; |
| 613 | } |
| 614 | |
| 615 | return NULL; |
| 616 | |
| 617 | } |
| 618 | |
| 619 | /** |
| 620 | * cdf_net_buf_debug_add_node() - store skb in debug hash table |
| 621 | * |
| 622 | * Return: none |
| 623 | */ |
| 624 | void cdf_net_buf_debug_add_node(cdf_nbuf_t net_buf, size_t size, |
| 625 | uint8_t *file_name, uint32_t line_num) |
| 626 | { |
| 627 | uint32_t i; |
| 628 | unsigned long irq_flag; |
| 629 | CDF_NBUF_TRACK *p_node; |
| 630 | |
| 631 | spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag); |
| 632 | |
| 633 | i = cdf_net_buf_debug_hash(net_buf); |
| 634 | p_node = cdf_net_buf_debug_look_up(net_buf); |
| 635 | |
| 636 | if (p_node) { |
| 637 | CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, |
| 638 | "Double allocation of skb ! Already allocated from %s %d", |
| 639 | p_node->file_name, p_node->line_num); |
| 640 | CDF_ASSERT(0); |
| 641 | goto done; |
| 642 | } else { |
| 643 | p_node = (CDF_NBUF_TRACK *) cdf_mem_malloc(sizeof(*p_node)); |
| 644 | if (p_node) { |
| 645 | p_node->net_buf = net_buf; |
| 646 | p_node->file_name = file_name; |
| 647 | p_node->line_num = line_num; |
| 648 | p_node->size = size; |
| 649 | p_node->p_next = gp_cdf_net_buf_track_tbl[i]; |
| 650 | gp_cdf_net_buf_track_tbl[i] = p_node; |
| 651 | } else { |
| 652 | CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, |
| 653 | "Mem alloc failed ! Could not track skb from %s %d of size %zu", |
| 654 | file_name, line_num, size); |
| 655 | CDF_ASSERT(0); |
| 656 | } |
| 657 | } |
| 658 | |
| 659 | done: |
| 660 | spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag); |
| 661 | |
| 662 | return; |
| 663 | } |
| 664 | |
| 665 | /** |
| 666 | * cdf_net_buf_debug_delete_node() - remove skb from debug hash table |
| 667 | * |
| 668 | * Return: none |
| 669 | */ |
| 670 | void cdf_net_buf_debug_delete_node(cdf_nbuf_t net_buf) |
| 671 | { |
| 672 | uint32_t i; |
| 673 | bool found = false; |
| 674 | CDF_NBUF_TRACK *p_head; |
| 675 | CDF_NBUF_TRACK *p_node; |
| 676 | unsigned long irq_flag; |
| 677 | CDF_NBUF_TRACK *p_prev; |
| 678 | |
| 679 | spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag); |
| 680 | |
| 681 | i = cdf_net_buf_debug_hash(net_buf); |
| 682 | p_head = gp_cdf_net_buf_track_tbl[i]; |
| 683 | |
| 684 | /* Unallocated SKB */ |
| 685 | if (!p_head) |
| 686 | goto done; |
| 687 | |
| 688 | p_node = p_head; |
| 689 | /* Found at head of the table */ |
| 690 | if (p_head->net_buf == net_buf) { |
| 691 | gp_cdf_net_buf_track_tbl[i] = p_node->p_next; |
| 692 | cdf_mem_free((void *)p_node); |
| 693 | found = true; |
| 694 | goto done; |
| 695 | } |
| 696 | |
| 697 | /* Search in collision list */ |
| 698 | while (p_node) { |
| 699 | p_prev = p_node; |
| 700 | p_node = p_node->p_next; |
| 701 | if ((NULL != p_node) && (p_node->net_buf == net_buf)) { |
| 702 | p_prev->p_next = p_node->p_next; |
| 703 | cdf_mem_free((void *)p_node); |
| 704 | found = true; |
| 705 | break; |
| 706 | } |
| 707 | } |
| 708 | |
| 709 | done: |
| 710 | if (!found) { |
| 711 | CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, |
| 712 | "Unallocated buffer ! Double free of net_buf %p ?", |
| 713 | net_buf); |
| 714 | CDF_ASSERT(0); |
| 715 | } |
| 716 | |
| 717 | spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag); |
| 718 | |
| 719 | return; |
| 720 | } |
| 721 | |
| 722 | /** |
| 723 | * cdf_net_buf_debug_release_skb() - release skb to avoid memory leak |
| 724 | * |
| 725 | * WLAN driver module whose allocated SKB is freed by network stack are |
| 726 | * suppose to call this API before returning SKB to network stack such |
| 727 | * that the SKB is not reported as memory leak. |
| 728 | * |
| 729 | * Return: none |
| 730 | */ |
| 731 | void cdf_net_buf_debug_release_skb(cdf_nbuf_t net_buf) |
| 732 | { |
| 733 | cdf_net_buf_debug_delete_node(net_buf); |
| 734 | } |
| 735 | |
| 736 | #endif /*MEMORY_DEBUG */ |
| 737 | #if defined(FEATURE_TSO) |
| 738 | |
| 739 | struct cdf_tso_cmn_seg_info_t { |
| 740 | uint16_t ethproto; |
| 741 | uint16_t ip_tcp_hdr_len; |
| 742 | uint16_t l2_len; |
| 743 | unsigned char *eit_hdr; |
| 744 | unsigned int eit_hdr_len; |
| 745 | struct tcphdr *tcphdr; |
| 746 | uint16_t ipv4_csum_en; |
| 747 | uint16_t tcp_ipv4_csum_en; |
| 748 | uint16_t tcp_ipv6_csum_en; |
| 749 | uint16_t ip_id; |
| 750 | uint32_t tcp_seq_num; |
| 751 | }; |
| 752 | |
| 753 | /** |
| 754 | * __cdf_nbuf_get_tso_cmn_seg_info() - get TSO common |
| 755 | * information |
| 756 | * |
| 757 | * Get the TSO information that is common across all the TCP |
| 758 | * segments of the jumbo packet |
| 759 | * |
| 760 | * Return: 0 - success 1 - failure |
| 761 | */ |
| 762 | uint8_t __cdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb, |
| 763 | struct cdf_tso_cmn_seg_info_t *tso_info) |
| 764 | { |
| 765 | /* Get ethernet type and ethernet header length */ |
| 766 | tso_info->ethproto = vlan_get_protocol(skb); |
| 767 | |
| 768 | /* Determine whether this is an IPv4 or IPv6 packet */ |
| 769 | if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */ |
| 770 | /* for IPv4, get the IP ID and enable TCP and IP csum */ |
| 771 | struct iphdr *ipv4_hdr = ip_hdr(skb); |
| 772 | tso_info->ip_id = ntohs(ipv4_hdr->id); |
| 773 | tso_info->ipv4_csum_en = 1; |
| 774 | tso_info->tcp_ipv4_csum_en = 1; |
| 775 | if (cdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) { |
| 776 | cdf_print("TSO IPV4 proto 0x%x not TCP\n", |
| 777 | ipv4_hdr->protocol); |
| 778 | return 1; |
| 779 | } |
| 780 | } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */ |
| 781 | /* for IPv6, enable TCP csum. No IP ID or IP csum */ |
| 782 | tso_info->tcp_ipv6_csum_en = 1; |
| 783 | } else { |
| 784 | cdf_print("TSO: ethertype 0x%x is not supported!\n", |
| 785 | tso_info->ethproto); |
| 786 | return 1; |
| 787 | } |
| 788 | |
| 789 | tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb)); |
| 790 | tso_info->tcphdr = tcp_hdr(skb); |
| 791 | tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq); |
| 792 | /* get pointer to the ethernet + IP + TCP header and their length */ |
| 793 | tso_info->eit_hdr = skb->data; |
| 794 | tso_info->eit_hdr_len = (skb_transport_header(skb) |
| 795 | - skb_mac_header(skb)) + tcp_hdrlen(skb); |
| 796 | tso_info->ip_tcp_hdr_len = tso_info->eit_hdr_len - tso_info->l2_len; |
| 797 | return 0; |
| 798 | } |
| 799 | |
| 800 | /** |
| 801 | * __cdf_nbuf_get_tso_info() - function to divide a TSO nbuf |
| 802 | * into segments |
| 803 | * @nbuf: network buffer to be segmented |
| 804 | * @tso_info: This is the output. The information about the |
| 805 | * TSO segments will be populated within this. |
| 806 | * |
| 807 | * This function fragments a TCP jumbo packet into smaller |
| 808 | * segments to be transmitted by the driver. It chains the TSO |
| 809 | * segments created into a list. |
| 810 | * |
| 811 | * Return: number of TSO segments |
| 812 | */ |
| 813 | uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb, |
| 814 | struct cdf_tso_info_t *tso_info) |
| 815 | { |
| 816 | /* common accross all segments */ |
| 817 | struct cdf_tso_cmn_seg_info_t tso_cmn_info; |
| 818 | |
| 819 | /* segment specific */ |
| 820 | char *tso_frag_vaddr; |
| 821 | uint32_t tso_frag_paddr_32 = 0; |
| 822 | uint32_t num_seg = 0; |
| 823 | struct cdf_tso_seg_elem_t *curr_seg; |
| 824 | const struct skb_frag_struct *frag = NULL; |
| 825 | uint32_t tso_frag_len = 0; /* tso segment's fragment length*/ |
| 826 | uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/ |
| 827 | uint32_t foffset = 0; /* offset into the skb's fragment */ |
| 828 | uint32_t skb_proc = 0; /* bytes of the skb that have been processed*/ |
| 829 | uint32_t tso_seg_size = skb_shinfo(skb)->gso_size; |
| 830 | |
| 831 | memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info)); |
| 832 | |
| 833 | if (cdf_unlikely(__cdf_nbuf_get_tso_cmn_seg_info(skb, &tso_cmn_info))) { |
| 834 | cdf_print("TSO: error getting common segment info\n"); |
| 835 | return 0; |
| 836 | } |
| 837 | curr_seg = tso_info->tso_seg_list; |
| 838 | |
| 839 | /* length of the first chunk of data in the skb */ |
| 840 | skb_proc = skb_frag_len = skb->len - skb->data_len; |
| 841 | |
| 842 | /* the 0th tso segment's 0th fragment always contains the EIT header */ |
| 843 | /* update the remaining skb fragment length and TSO segment length */ |
| 844 | skb_frag_len -= tso_cmn_info.eit_hdr_len; |
| 845 | skb_proc -= tso_cmn_info.eit_hdr_len; |
| 846 | |
| 847 | /* get the address to the next tso fragment */ |
| 848 | tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len; |
| 849 | /* get the length of the next tso fragment */ |
| 850 | tso_frag_len = min(skb_frag_len, tso_seg_size); |
| 851 | tso_frag_paddr_32 = dma_map_single(osdev->dev, |
| 852 | tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE); |
| 853 | |
| 854 | num_seg = tso_info->num_segs; |
| 855 | tso_info->num_segs = 0; |
| 856 | tso_info->is_tso = 1; |
| 857 | |
| 858 | while (num_seg && curr_seg) { |
| 859 | int i = 1; /* tso fragment index */ |
| 860 | int j = 0; /* skb fragment index */ |
| 861 | uint8_t more_tso_frags = 1; |
| 862 | uint8_t from_frag_table = 0; |
| 863 | |
| 864 | /* Initialize the flags to 0 */ |
| 865 | memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg)); |
| 866 | tso_info->num_segs++; |
| 867 | |
| 868 | /* The following fields remain the same across all segments of |
| 869 | a jumbo packet */ |
| 870 | curr_seg->seg.tso_flags.tso_enable = 1; |
| 871 | curr_seg->seg.tso_flags.partial_checksum_en = 0; |
| 872 | curr_seg->seg.tso_flags.ipv4_checksum_en = |
| 873 | tso_cmn_info.ipv4_csum_en; |
| 874 | curr_seg->seg.tso_flags.tcp_ipv6_checksum_en = |
| 875 | tso_cmn_info.tcp_ipv6_csum_en; |
| 876 | curr_seg->seg.tso_flags.tcp_ipv4_checksum_en = |
| 877 | tso_cmn_info.tcp_ipv4_csum_en; |
| 878 | curr_seg->seg.tso_flags.l2_len = 0; |
| 879 | curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF; |
| 880 | curr_seg->seg.num_frags = 0; |
| 881 | |
| 882 | /* The following fields change for the segments */ |
| 883 | curr_seg->seg.tso_flags.ip_id = tso_cmn_info.ip_id; |
| 884 | tso_cmn_info.ip_id++; |
| 885 | |
| 886 | curr_seg->seg.tso_flags.syn = tso_cmn_info.tcphdr->syn; |
| 887 | curr_seg->seg.tso_flags.rst = tso_cmn_info.tcphdr->rst; |
| 888 | curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh; |
| 889 | curr_seg->seg.tso_flags.ack = tso_cmn_info.tcphdr->ack; |
| 890 | curr_seg->seg.tso_flags.urg = tso_cmn_info.tcphdr->urg; |
| 891 | curr_seg->seg.tso_flags.ece = tso_cmn_info.tcphdr->ece; |
| 892 | curr_seg->seg.tso_flags.cwr = tso_cmn_info.tcphdr->cwr; |
| 893 | |
| 894 | curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info.tcp_seq_num; |
| 895 | |
| 896 | /* First fragment for each segment always contains the ethernet, |
| 897 | IP and TCP header */ |
| 898 | curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info.eit_hdr; |
| 899 | curr_seg->seg.tso_frags[0].length = tso_cmn_info.eit_hdr_len; |
| 900 | tso_info->total_len = curr_seg->seg.tso_frags[0].length; |
| 901 | curr_seg->seg.tso_frags[0].paddr_low_32 = |
| 902 | dma_map_single(osdev->dev, tso_cmn_info.eit_hdr, |
| 903 | tso_cmn_info.eit_hdr_len, DMA_TO_DEVICE); |
| 904 | curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len; |
| 905 | curr_seg->seg.num_frags++; |
| 906 | |
| 907 | while (more_tso_frags) { |
| 908 | curr_seg->seg.tso_frags[i].vaddr = tso_frag_vaddr; |
| 909 | curr_seg->seg.tso_frags[i].length = tso_frag_len; |
| 910 | tso_info->total_len += |
| 911 | curr_seg->seg.tso_frags[i].length; |
| 912 | curr_seg->seg.tso_flags.ip_len += |
| 913 | curr_seg->seg.tso_frags[i].length; |
| 914 | curr_seg->seg.num_frags++; |
| 915 | skb_proc = skb_proc - curr_seg->seg.tso_frags[i].length; |
| 916 | |
| 917 | /* increment the TCP sequence number */ |
| 918 | tso_cmn_info.tcp_seq_num += tso_frag_len; |
| 919 | curr_seg->seg.tso_frags[i].paddr_upper_16 = 0; |
| 920 | curr_seg->seg.tso_frags[i].paddr_low_32 = |
| 921 | tso_frag_paddr_32; |
| 922 | |
| 923 | /* if there is no more data left in the skb */ |
| 924 | if (!skb_proc) |
| 925 | return tso_info->num_segs; |
| 926 | |
| 927 | /* get the next payload fragment information */ |
| 928 | /* check if there are more fragments in this segment */ |
| 929 | if ((tso_seg_size - tso_frag_len)) { |
| 930 | more_tso_frags = 1; |
| 931 | i++; |
| 932 | } else { |
| 933 | more_tso_frags = 0; |
| 934 | /* reset i and the tso payload size */ |
| 935 | i = 1; |
| 936 | tso_seg_size = skb_shinfo(skb)->gso_size; |
| 937 | } |
| 938 | |
| 939 | /* if the next fragment is contiguous */ |
| 940 | if (tso_frag_len < skb_frag_len) { |
| 941 | skb_frag_len = skb_frag_len - tso_frag_len; |
| 942 | tso_frag_len = min(skb_frag_len, tso_seg_size); |
| 943 | tso_frag_vaddr = tso_frag_vaddr + tso_frag_len; |
| 944 | if (from_frag_table) { |
| 945 | tso_frag_paddr_32 = |
| 946 | skb_frag_dma_map(osdev->dev, |
| 947 | frag, foffset, |
| 948 | tso_frag_len, |
| 949 | DMA_TO_DEVICE); |
| 950 | } else { |
| 951 | tso_frag_paddr_32 = |
| 952 | dma_map_single(osdev->dev, |
| 953 | tso_frag_vaddr, |
| 954 | tso_frag_len, |
| 955 | DMA_TO_DEVICE); |
| 956 | } |
| 957 | } else { /* the next fragment is not contiguous */ |
| 958 | tso_frag_len = min(skb_frag_len, tso_seg_size); |
| 959 | frag = &skb_shinfo(skb)->frags[j]; |
| 960 | skb_frag_len = skb_frag_size(frag); |
| 961 | |
| 962 | tso_frag_vaddr = skb_frag_address(frag); |
| 963 | tso_frag_paddr_32 = skb_frag_dma_map(osdev->dev, |
| 964 | frag, 0, tso_frag_len, |
| 965 | DMA_TO_DEVICE); |
| 966 | foffset += tso_frag_len; |
| 967 | from_frag_table = 1; |
| 968 | j++; |
| 969 | } |
| 970 | } |
| 971 | num_seg--; |
| 972 | /* if TCP FIN flag was set, set it in the last segment */ |
| 973 | if (!num_seg) |
| 974 | curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin; |
| 975 | |
| 976 | curr_seg = curr_seg->next; |
| 977 | } |
| 978 | return tso_info->num_segs; |
| 979 | } |
| 980 | |
| 981 | /** |
| 982 | * __cdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf |
| 983 | * into segments |
| 984 | * @nbuf: network buffer to be segmented |
| 985 | * @tso_info: This is the output. The information about the |
| 986 | * TSO segments will be populated within this. |
| 987 | * |
| 988 | * This function fragments a TCP jumbo packet into smaller |
| 989 | * segments to be transmitted by the driver. It chains the TSO |
| 990 | * segments created into a list. |
| 991 | * |
| 992 | * Return: 0 - success, 1 - failure |
| 993 | */ |
| 994 | uint32_t __cdf_nbuf_get_tso_num_seg(struct sk_buff *skb) |
| 995 | { |
| 996 | uint32_t gso_size, tmp_len, num_segs = 0; |
| 997 | |
| 998 | gso_size = skb_shinfo(skb)->gso_size; |
| 999 | tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb)) |
| 1000 | + tcp_hdrlen(skb)); |
| 1001 | while (tmp_len) { |
| 1002 | num_segs++; |
| 1003 | if (tmp_len > gso_size) |
| 1004 | tmp_len -= gso_size; |
| 1005 | else |
| 1006 | break; |
| 1007 | } |
| 1008 | return num_segs; |
| 1009 | } |
| 1010 | |
| 1011 | struct sk_buff *__cdf_nbuf_inc_users(struct sk_buff *skb) |
| 1012 | { |
| 1013 | atomic_inc(&skb->users); |
| 1014 | return skb; |
| 1015 | } |
| 1016 | |
| 1017 | #endif /* FEATURE_TSO */ |