David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 1 | /* ar-skbuff.c: socket buffer destruction handling |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
Joe Perches | 9b6d539 | 2016-06-02 12:08:52 -0700 | [diff] [blame] | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 13 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 14 | #include <linux/module.h> |
| 15 | #include <linux/net.h> |
| 16 | #include <linux/skbuff.h> |
| 17 | #include <net/sock.h> |
| 18 | #include <net/af_rxrpc.h> |
| 19 | #include "ar-internal.h" |
| 20 | |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 21 | #define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) |
| 22 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 23 | /* |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 24 | * Note the allocation or reception of a socket buffer. |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 25 | */ |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 26 | void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 27 | { |
| 28 | const void *here = __builtin_return_address(0); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 29 | int n = atomic_inc_return(select_skb_count(op)); |
| 30 | trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 31 | } |
| 32 | |
| 33 | /* |
| 34 | * Note the re-emergence of a socket buffer from a queue or buffer. |
| 35 | */ |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 36 | void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 37 | { |
| 38 | const void *here = __builtin_return_address(0); |
| 39 | if (skb) { |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 40 | int n = atomic_read(select_skb_count(op)); |
| 41 | trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 42 | } |
| 43 | } |
| 44 | |
| 45 | /* |
| 46 | * Note the addition of a ref on a socket buffer. |
| 47 | */ |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 48 | void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 49 | { |
| 50 | const void *here = __builtin_return_address(0); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 51 | int n = atomic_inc_return(select_skb_count(op)); |
| 52 | trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 53 | skb_get(skb); |
| 54 | } |
| 55 | |
| 56 | /* |
| 57 | * Note the destruction of a socket buffer. |
| 58 | */ |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 59 | void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 60 | { |
| 61 | const void *here = __builtin_return_address(0); |
| 62 | if (skb) { |
| 63 | int n; |
| 64 | CHECK_SLAB_OKAY(&skb->users); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 65 | n = atomic_dec_return(select_skb_count(op)); |
| 66 | trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 67 | kfree_skb(skb); |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | /* |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 72 | * Note the injected loss of a socket buffer. |
| 73 | */ |
| 74 | void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) |
| 75 | { |
| 76 | const void *here = __builtin_return_address(0); |
| 77 | if (skb) { |
| 78 | int n; |
| 79 | CHECK_SLAB_OKAY(&skb->users); |
| 80 | if (op == rxrpc_skb_tx_lost) { |
| 81 | n = atomic_read(select_skb_count(op)); |
| 82 | trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); |
| 83 | } else { |
| 84 | n = atomic_dec_return(select_skb_count(op)); |
| 85 | trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here); |
| 86 | kfree_skb(skb); |
| 87 | } |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | /* |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 92 | * Clear a queue of socket buffers. |
| 93 | */ |
| 94 | void rxrpc_purge_queue(struct sk_buff_head *list) |
| 95 | { |
| 96 | const void *here = __builtin_return_address(0); |
| 97 | struct sk_buff *skb; |
| 98 | while ((skb = skb_dequeue((list))) != NULL) { |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 99 | int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); |
| 100 | trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, |
| 101 | atomic_read(&skb->users), n, here); |
David Howells | df844fd | 2016-08-23 15:27:24 +0100 | [diff] [blame] | 102 | kfree_skb(skb); |
| 103 | } |
| 104 | } |