David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 1 | /* Peer event handling, typically ICMP messages. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/net.h> |
| 14 | #include <linux/skbuff.h> |
| 15 | #include <linux/errqueue.h> |
| 16 | #include <linux/udp.h> |
| 17 | #include <linux/in.h> |
| 18 | #include <linux/in6.h> |
| 19 | #include <linux/icmp.h> |
| 20 | #include <net/sock.h> |
| 21 | #include <net/af_rxrpc.h> |
| 22 | #include <net/ip.h> |
| 23 | #include "ar-internal.h" |
| 24 | |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 25 | static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); |
| 26 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 27 | /* |
David Howells | be6e670 | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 28 | * Find the peer associated with an ICMP packet. |
| 29 | */ |
| 30 | static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, |
| 31 | const struct sk_buff *skb) |
| 32 | { |
| 33 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
| 34 | struct sockaddr_rxrpc srx; |
| 35 | |
| 36 | _enter(""); |
| 37 | |
| 38 | memset(&srx, 0, sizeof(srx)); |
| 39 | srx.transport_type = local->srx.transport_type; |
| 40 | srx.transport.family = local->srx.transport.family; |
| 41 | |
| 42 | /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice |
| 43 | * versa? |
| 44 | */ |
| 45 | switch (srx.transport.family) { |
| 46 | case AF_INET: |
| 47 | srx.transport.sin.sin_port = serr->port; |
| 48 | srx.transport_len = sizeof(struct sockaddr_in); |
| 49 | switch (serr->ee.ee_origin) { |
| 50 | case SO_EE_ORIGIN_ICMP: |
| 51 | _net("Rx ICMP"); |
| 52 | memcpy(&srx.transport.sin.sin_addr, |
| 53 | skb_network_header(skb) + serr->addr_offset, |
| 54 | sizeof(struct in_addr)); |
| 55 | break; |
| 56 | case SO_EE_ORIGIN_ICMP6: |
| 57 | _net("Rx ICMP6 on v4 sock"); |
| 58 | memcpy(&srx.transport.sin.sin_addr, |
| 59 | skb_network_header(skb) + serr->addr_offset + 12, |
| 60 | sizeof(struct in_addr)); |
| 61 | break; |
| 62 | default: |
| 63 | memcpy(&srx.transport.sin.sin_addr, &ip_hdr(skb)->saddr, |
| 64 | sizeof(struct in_addr)); |
| 65 | break; |
| 66 | } |
| 67 | break; |
| 68 | |
David Howells | d191274 | 2016-09-17 07:26:01 +0100 | [diff] [blame] | 69 | #ifdef CONFIG_AF_RXRPC_IPV6 |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 70 | case AF_INET6: |
| 71 | srx.transport.sin6.sin6_port = serr->port; |
| 72 | srx.transport_len = sizeof(struct sockaddr_in6); |
| 73 | switch (serr->ee.ee_origin) { |
| 74 | case SO_EE_ORIGIN_ICMP6: |
| 75 | _net("Rx ICMP6"); |
| 76 | memcpy(&srx.transport.sin6.sin6_addr, |
| 77 | skb_network_header(skb) + serr->addr_offset, |
| 78 | sizeof(struct in6_addr)); |
| 79 | break; |
| 80 | case SO_EE_ORIGIN_ICMP: |
| 81 | _net("Rx ICMP on v6 sock"); |
David Howells | d191274 | 2016-09-17 07:26:01 +0100 | [diff] [blame] | 82 | memcpy(srx.transport.sin6.sin6_addr.s6_addr + 12, |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 83 | skb_network_header(skb) + serr->addr_offset, |
| 84 | sizeof(struct in_addr)); |
| 85 | break; |
| 86 | default: |
| 87 | memcpy(&srx.transport.sin6.sin6_addr, |
| 88 | &ipv6_hdr(skb)->saddr, |
| 89 | sizeof(struct in6_addr)); |
| 90 | break; |
| 91 | } |
| 92 | break; |
David Howells | d191274 | 2016-09-17 07:26:01 +0100 | [diff] [blame] | 93 | #endif |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 94 | |
David Howells | be6e670 | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 95 | default: |
| 96 | BUG(); |
| 97 | } |
| 98 | |
| 99 | return rxrpc_lookup_peer_rcu(local, &srx); |
| 100 | } |
| 101 | |
| 102 | /* |
David Howells | 1a70c05 | 2016-04-04 14:00:33 +0100 | [diff] [blame] | 103 | * Handle an MTU/fragmentation problem. |
| 104 | */ |
| 105 | static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr) |
| 106 | { |
| 107 | u32 mtu = serr->ee.ee_info; |
| 108 | |
| 109 | _net("Rx ICMP Fragmentation Needed (%d)", mtu); |
| 110 | |
| 111 | /* wind down the local interface MTU */ |
| 112 | if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { |
| 113 | peer->if_mtu = mtu; |
| 114 | _net("I/F MTU %u", mtu); |
| 115 | } |
| 116 | |
| 117 | if (mtu == 0) { |
| 118 | /* they didn't give us a size, estimate one */ |
| 119 | mtu = peer->if_mtu; |
| 120 | if (mtu > 1500) { |
| 121 | mtu >>= 1; |
| 122 | if (mtu < 1500) |
| 123 | mtu = 1500; |
| 124 | } else { |
| 125 | mtu -= 100; |
| 126 | if (mtu < peer->hdrsize) |
| 127 | mtu = peer->hdrsize + 4; |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | if (mtu < peer->mtu) { |
| 132 | spin_lock_bh(&peer->lock); |
| 133 | peer->mtu = mtu; |
| 134 | peer->maxdata = peer->mtu - peer->hdrsize; |
| 135 | spin_unlock_bh(&peer->lock); |
| 136 | _net("Net MTU %u (maxdata %u)", |
| 137 | peer->mtu, peer->maxdata); |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | /* |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 142 | * Handle an error received on the local endpoint. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 143 | */ |
David Howells | abe89ef | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 144 | void rxrpc_error_report(struct sock *sk) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 145 | { |
| 146 | struct sock_exterr_skb *serr; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 147 | struct rxrpc_local *local = sk->sk_user_data; |
| 148 | struct rxrpc_peer *peer; |
| 149 | struct sk_buff *skb; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 150 | |
| 151 | _enter("%p{%d}", sk, local->debug_id); |
| 152 | |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 153 | skb = sock_dequeue_err_skb(sk); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 154 | if (!skb) { |
| 155 | _leave("UDP socket errqueue empty"); |
| 156 | return; |
| 157 | } |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 158 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); |
Willem de Bruijn | c247f05 | 2015-03-07 20:33:22 -0500 | [diff] [blame] | 159 | serr = SKB_EXT_ERR(skb); |
| 160 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 161 | _leave("UDP empty message"); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 162 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 163 | return; |
| 164 | } |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 165 | |
David Howells | be6e670 | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 166 | rcu_read_lock(); |
| 167 | peer = rxrpc_lookup_peer_icmp_rcu(local, skb); |
| 168 | if (peer && !rxrpc_get_peer_maybe(peer)) |
| 169 | peer = NULL; |
| 170 | if (!peer) { |
| 171 | rcu_read_unlock(); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 172 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 173 | _leave(" [no peer]"); |
| 174 | return; |
| 175 | } |
| 176 | |
David Howells | 1a70c05 | 2016-04-04 14:00:33 +0100 | [diff] [blame] | 177 | if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && |
| 178 | serr->ee.ee_type == ICMP_DEST_UNREACH && |
| 179 | serr->ee.ee_code == ICMP_FRAG_NEEDED)) { |
| 180 | rxrpc_adjust_mtu(peer, serr); |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 181 | rcu_read_unlock(); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 182 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 183 | rxrpc_put_peer(peer); |
| 184 | _leave(" [MTU update]"); |
| 185 | return; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 186 | } |
| 187 | |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 188 | rxrpc_store_error(peer, serr); |
David Howells | be6e670 | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 189 | rcu_read_unlock(); |
David Howells | 71f3ca4 | 2016-09-17 10:49:14 +0100 | [diff] [blame] | 190 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 191 | |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 192 | /* The ref we obtained is passed off to the work item */ |
| 193 | rxrpc_queue_work(&peer->error_distributor); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 194 | _leave(""); |
| 195 | } |
| 196 | |
| 197 | /* |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 198 | * Map an error report to error codes on the peer record. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 199 | */ |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 200 | static void rxrpc_store_error(struct rxrpc_peer *peer, |
| 201 | struct sock_exterr_skb *serr) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 202 | { |
| 203 | struct sock_extended_err *ee; |
David S. Miller | c9d10c4 | 2011-05-19 18:37:11 -0400 | [diff] [blame] | 204 | int err; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 205 | |
| 206 | _enter(""); |
| 207 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 208 | ee = &serr->ee; |
| 209 | |
| 210 | _net("Rx Error o=%d t=%d c=%d e=%d", |
| 211 | ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno); |
| 212 | |
| 213 | err = ee->ee_errno; |
| 214 | |
| 215 | switch (ee->ee_origin) { |
| 216 | case SO_EE_ORIGIN_ICMP: |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 217 | switch (ee->ee_type) { |
| 218 | case ICMP_DEST_UNREACH: |
| 219 | switch (ee->ee_code) { |
| 220 | case ICMP_NET_UNREACH: |
| 221 | _net("Rx Received ICMP Network Unreachable"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 222 | break; |
| 223 | case ICMP_HOST_UNREACH: |
| 224 | _net("Rx Received ICMP Host Unreachable"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 225 | break; |
| 226 | case ICMP_PORT_UNREACH: |
| 227 | _net("Rx Received ICMP Port Unreachable"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 228 | break; |
| 229 | case ICMP_NET_UNKNOWN: |
| 230 | _net("Rx Received ICMP Unknown Network"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 231 | break; |
| 232 | case ICMP_HOST_UNKNOWN: |
| 233 | _net("Rx Received ICMP Unknown Host"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 234 | break; |
| 235 | default: |
| 236 | _net("Rx Received ICMP DestUnreach code=%u", |
| 237 | ee->ee_code); |
| 238 | break; |
| 239 | } |
| 240 | break; |
| 241 | |
| 242 | case ICMP_TIME_EXCEEDED: |
| 243 | _net("Rx Received ICMP TTL Exceeded"); |
| 244 | break; |
| 245 | |
| 246 | default: |
| 247 | _proto("Rx Received ICMP error { type=%u code=%u }", |
| 248 | ee->ee_type, ee->ee_code); |
| 249 | break; |
| 250 | } |
| 251 | break; |
| 252 | |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 253 | case SO_EE_ORIGIN_NONE: |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 254 | case SO_EE_ORIGIN_LOCAL: |
David Howells | fe77d5f | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 255 | _proto("Rx Received local error { error=%d }", err); |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 256 | err += RXRPC_LOCAL_ERROR_OFFSET; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 257 | break; |
| 258 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 259 | case SO_EE_ORIGIN_ICMP6: |
| 260 | default: |
David Howells | fe77d5f | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 261 | _proto("Rx Received error report { orig=%u }", ee->ee_origin); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 262 | break; |
| 263 | } |
| 264 | |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 265 | peer->error_report = err; |
| 266 | } |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 267 | |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 268 | /* |
| 269 | * Distribute an error that occurred on a peer |
| 270 | */ |
| 271 | void rxrpc_peer_error_distributor(struct work_struct *work) |
| 272 | { |
| 273 | struct rxrpc_peer *peer = |
| 274 | container_of(work, struct rxrpc_peer, error_distributor); |
| 275 | struct rxrpc_call *call; |
David Howells | f5c17aa | 2016-08-30 09:49:28 +0100 | [diff] [blame] | 276 | enum rxrpc_call_completion compl; |
David Howells | f5c17aa | 2016-08-30 09:49:28 +0100 | [diff] [blame] | 277 | int error; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 278 | |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 279 | _enter(""); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 280 | |
David Howells | f5c17aa | 2016-08-30 09:49:28 +0100 | [diff] [blame] | 281 | error = READ_ONCE(peer->error_report); |
| 282 | if (error < RXRPC_LOCAL_ERROR_OFFSET) { |
| 283 | compl = RXRPC_CALL_NETWORK_ERROR; |
| 284 | } else { |
| 285 | compl = RXRPC_CALL_LOCAL_ERROR; |
| 286 | error -= RXRPC_LOCAL_ERROR_OFFSET; |
| 287 | } |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 288 | |
David Howells | f5c17aa | 2016-08-30 09:49:28 +0100 | [diff] [blame] | 289 | _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error); |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 290 | |
| 291 | spin_lock_bh(&peer->lock); |
| 292 | |
| 293 | while (!hlist_empty(&peer->error_targets)) { |
| 294 | call = hlist_entry(peer->error_targets.first, |
| 295 | struct rxrpc_call, error_link); |
| 296 | hlist_del_init(&call->error_link); |
David Howells | e34d423 | 2016-08-30 09:49:29 +0100 | [diff] [blame] | 297 | rxrpc_see_call(call); |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 298 | |
David Howells | 248f219 | 2016-09-08 11:10:12 +0100 | [diff] [blame] | 299 | if (rxrpc_set_call_completion(call, compl, 0, error)) |
| 300 | rxrpc_notify_socket(call); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 301 | } |
| 302 | |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 303 | spin_unlock_bh(&peer->lock); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 304 | |
David Howells | f66d749 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 305 | rxrpc_put_peer(peer); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 306 | _leave(""); |
| 307 | } |
David Howells | cf1a647 | 2016-09-22 00:41:53 +0100 | [diff] [blame] | 308 | |
| 309 | /* |
| 310 | * Add RTT information to cache. This is called in softirq mode and has |
| 311 | * exclusive access to the peer RTT data. |
| 312 | */ |
| 313 | void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, |
| 314 | rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, |
| 315 | ktime_t send_time, ktime_t resp_time) |
| 316 | { |
| 317 | struct rxrpc_peer *peer = call->peer; |
| 318 | s64 rtt; |
| 319 | u64 sum = peer->rtt_sum, avg; |
| 320 | u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage; |
| 321 | |
| 322 | rtt = ktime_to_ns(ktime_sub(resp_time, send_time)); |
| 323 | if (rtt < 0) |
| 324 | return; |
| 325 | |
| 326 | /* Replace the oldest datum in the RTT buffer */ |
| 327 | sum -= peer->rtt_cache[cursor]; |
| 328 | sum += rtt; |
| 329 | peer->rtt_cache[cursor] = rtt; |
| 330 | peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1); |
| 331 | peer->rtt_sum = sum; |
| 332 | if (usage < RXRPC_RTT_CACHE_SIZE) { |
| 333 | usage++; |
| 334 | peer->rtt_usage = usage; |
| 335 | } |
| 336 | |
| 337 | /* Now recalculate the average */ |
| 338 | if (usage == RXRPC_RTT_CACHE_SIZE) { |
| 339 | avg = sum / RXRPC_RTT_CACHE_SIZE; |
| 340 | } else { |
| 341 | avg = sum; |
| 342 | do_div(avg, usage); |
| 343 | } |
| 344 | |
| 345 | peer->rtt = avg; |
| 346 | trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, |
| 347 | usage, avg); |
| 348 | } |