David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 1 | /* Error message handling (ICMP) |
| 2 | * |
| 3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/net.h> |
| 14 | #include <linux/skbuff.h> |
| 15 | #include <linux/errqueue.h> |
| 16 | #include <linux/udp.h> |
| 17 | #include <linux/in.h> |
| 18 | #include <linux/in6.h> |
| 19 | #include <linux/icmp.h> |
| 20 | #include <net/sock.h> |
| 21 | #include <net/af_rxrpc.h> |
| 22 | #include <net/ip.h> |
| 23 | #include "ar-internal.h" |
| 24 | |
| 25 | /* |
David Howells | be6e670 | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 26 | * Find the peer associated with an ICMP packet. |
| 27 | */ |
| 28 | static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, |
| 29 | const struct sk_buff *skb) |
| 30 | { |
| 31 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); |
| 32 | struct sockaddr_rxrpc srx; |
| 33 | |
| 34 | _enter(""); |
| 35 | |
| 36 | memset(&srx, 0, sizeof(srx)); |
| 37 | srx.transport_type = local->srx.transport_type; |
| 38 | srx.transport.family = local->srx.transport.family; |
| 39 | |
| 40 | /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice |
| 41 | * versa? |
| 42 | */ |
| 43 | switch (srx.transport.family) { |
| 44 | case AF_INET: |
| 45 | srx.transport.sin.sin_port = serr->port; |
| 46 | srx.transport_len = sizeof(struct sockaddr_in); |
| 47 | switch (serr->ee.ee_origin) { |
| 48 | case SO_EE_ORIGIN_ICMP: |
| 49 | _net("Rx ICMP"); |
| 50 | memcpy(&srx.transport.sin.sin_addr, |
| 51 | skb_network_header(skb) + serr->addr_offset, |
| 52 | sizeof(struct in_addr)); |
| 53 | break; |
| 54 | case SO_EE_ORIGIN_ICMP6: |
| 55 | _net("Rx ICMP6 on v4 sock"); |
| 56 | memcpy(&srx.transport.sin.sin_addr, |
| 57 | skb_network_header(skb) + serr->addr_offset + 12, |
| 58 | sizeof(struct in_addr)); |
| 59 | break; |
| 60 | default: |
| 61 | memcpy(&srx.transport.sin.sin_addr, &ip_hdr(skb)->saddr, |
| 62 | sizeof(struct in_addr)); |
| 63 | break; |
| 64 | } |
| 65 | break; |
| 66 | |
| 67 | default: |
| 68 | BUG(); |
| 69 | } |
| 70 | |
| 71 | return rxrpc_lookup_peer_rcu(local, &srx); |
| 72 | } |
| 73 | |
| 74 | /* |
David Howells | 1a70c05 | 2016-04-04 14:00:33 +0100 | [diff] [blame] | 75 | * Handle an MTU/fragmentation problem. |
| 76 | */ |
| 77 | static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr) |
| 78 | { |
| 79 | u32 mtu = serr->ee.ee_info; |
| 80 | |
| 81 | _net("Rx ICMP Fragmentation Needed (%d)", mtu); |
| 82 | |
| 83 | /* wind down the local interface MTU */ |
| 84 | if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { |
| 85 | peer->if_mtu = mtu; |
| 86 | _net("I/F MTU %u", mtu); |
| 87 | } |
| 88 | |
| 89 | if (mtu == 0) { |
| 90 | /* they didn't give us a size, estimate one */ |
| 91 | mtu = peer->if_mtu; |
| 92 | if (mtu > 1500) { |
| 93 | mtu >>= 1; |
| 94 | if (mtu < 1500) |
| 95 | mtu = 1500; |
| 96 | } else { |
| 97 | mtu -= 100; |
| 98 | if (mtu < peer->hdrsize) |
| 99 | mtu = peer->hdrsize + 4; |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | if (mtu < peer->mtu) { |
| 104 | spin_lock_bh(&peer->lock); |
| 105 | peer->mtu = mtu; |
| 106 | peer->maxdata = peer->mtu - peer->hdrsize; |
| 107 | spin_unlock_bh(&peer->lock); |
| 108 | _net("Net MTU %u (maxdata %u)", |
| 109 | peer->mtu, peer->maxdata); |
| 110 | } |
| 111 | } |
| 112 | |
| 113 | /* |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 114 | * handle an error received on the local endpoint |
| 115 | */ |
David Howells | abe89ef | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 116 | void rxrpc_error_report(struct sock *sk) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 117 | { |
| 118 | struct sock_exterr_skb *serr; |
| 119 | struct rxrpc_transport *trans; |
| 120 | struct rxrpc_local *local = sk->sk_user_data; |
| 121 | struct rxrpc_peer *peer; |
| 122 | struct sk_buff *skb; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 123 | |
| 124 | _enter("%p{%d}", sk, local->debug_id); |
| 125 | |
Willem de Bruijn | 364a9e9 | 2014-08-31 21:30:27 -0400 | [diff] [blame] | 126 | skb = sock_dequeue_err_skb(sk); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 127 | if (!skb) { |
| 128 | _leave("UDP socket errqueue empty"); |
| 129 | return; |
| 130 | } |
Willem de Bruijn | c247f05 | 2015-03-07 20:33:22 -0500 | [diff] [blame] | 131 | serr = SKB_EXT_ERR(skb); |
| 132 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { |
Willem de Bruijn | 49ca0d8 | 2015-01-30 13:29:31 -0500 | [diff] [blame] | 133 | _leave("UDP empty message"); |
| 134 | kfree_skb(skb); |
| 135 | return; |
| 136 | } |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 137 | |
| 138 | rxrpc_new_skb(skb); |
| 139 | |
David Howells | be6e670 | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 140 | rcu_read_lock(); |
| 141 | peer = rxrpc_lookup_peer_icmp_rcu(local, skb); |
| 142 | if (peer && !rxrpc_get_peer_maybe(peer)) |
| 143 | peer = NULL; |
| 144 | if (!peer) { |
| 145 | rcu_read_unlock(); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 146 | rxrpc_free_skb(skb); |
| 147 | _leave(" [no peer]"); |
| 148 | return; |
| 149 | } |
| 150 | |
| 151 | trans = rxrpc_find_transport(local, peer); |
| 152 | if (!trans) { |
David Howells | be6e670 | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 153 | rcu_read_unlock(); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 154 | rxrpc_put_peer(peer); |
| 155 | rxrpc_free_skb(skb); |
| 156 | _leave(" [no trans]"); |
| 157 | return; |
| 158 | } |
| 159 | |
David Howells | 1a70c05 | 2016-04-04 14:00:33 +0100 | [diff] [blame] | 160 | if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && |
| 161 | serr->ee.ee_type == ICMP_DEST_UNREACH && |
| 162 | serr->ee.ee_code == ICMP_FRAG_NEEDED)) { |
| 163 | rxrpc_adjust_mtu(peer, serr); |
| 164 | rxrpc_free_skb(skb); |
| 165 | skb = NULL; |
| 166 | goto out; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 167 | } |
| 168 | |
David Howells | 1a70c05 | 2016-04-04 14:00:33 +0100 | [diff] [blame] | 169 | out: |
David Howells | be6e670 | 2016-04-04 14:00:32 +0100 | [diff] [blame] | 170 | rcu_read_unlock(); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 171 | rxrpc_put_peer(peer); |
| 172 | |
David Howells | 1a70c05 | 2016-04-04 14:00:33 +0100 | [diff] [blame] | 173 | if (skb) { |
| 174 | /* pass the transport ref to error_handler to release */ |
| 175 | skb_queue_tail(&trans->error_queue, skb); |
| 176 | rxrpc_queue_work(&trans->error_handler); |
| 177 | } else { |
| 178 | rxrpc_put_transport(trans); |
| 179 | } |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 180 | _leave(""); |
| 181 | } |
| 182 | |
| 183 | /* |
| 184 | * deal with UDP error messages |
| 185 | */ |
| 186 | void rxrpc_UDP_error_handler(struct work_struct *work) |
| 187 | { |
| 188 | struct sock_extended_err *ee; |
| 189 | struct sock_exterr_skb *serr; |
| 190 | struct rxrpc_transport *trans = |
| 191 | container_of(work, struct rxrpc_transport, error_handler); |
| 192 | struct sk_buff *skb; |
David S. Miller | c9d10c4 | 2011-05-19 18:37:11 -0400 | [diff] [blame] | 193 | int err; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 194 | |
| 195 | _enter(""); |
| 196 | |
| 197 | skb = skb_dequeue(&trans->error_queue); |
| 198 | if (!skb) |
| 199 | return; |
| 200 | |
| 201 | serr = SKB_EXT_ERR(skb); |
| 202 | ee = &serr->ee; |
| 203 | |
| 204 | _net("Rx Error o=%d t=%d c=%d e=%d", |
| 205 | ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno); |
| 206 | |
| 207 | err = ee->ee_errno; |
| 208 | |
| 209 | switch (ee->ee_origin) { |
| 210 | case SO_EE_ORIGIN_ICMP: |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 211 | switch (ee->ee_type) { |
| 212 | case ICMP_DEST_UNREACH: |
| 213 | switch (ee->ee_code) { |
| 214 | case ICMP_NET_UNREACH: |
| 215 | _net("Rx Received ICMP Network Unreachable"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 216 | break; |
| 217 | case ICMP_HOST_UNREACH: |
| 218 | _net("Rx Received ICMP Host Unreachable"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 219 | break; |
| 220 | case ICMP_PORT_UNREACH: |
| 221 | _net("Rx Received ICMP Port Unreachable"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 222 | break; |
| 223 | case ICMP_NET_UNKNOWN: |
| 224 | _net("Rx Received ICMP Unknown Network"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 225 | break; |
| 226 | case ICMP_HOST_UNKNOWN: |
| 227 | _net("Rx Received ICMP Unknown Host"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 228 | break; |
| 229 | default: |
| 230 | _net("Rx Received ICMP DestUnreach code=%u", |
| 231 | ee->ee_code); |
| 232 | break; |
| 233 | } |
| 234 | break; |
| 235 | |
| 236 | case ICMP_TIME_EXCEEDED: |
| 237 | _net("Rx Received ICMP TTL Exceeded"); |
| 238 | break; |
| 239 | |
| 240 | default: |
| 241 | _proto("Rx Received ICMP error { type=%u code=%u }", |
| 242 | ee->ee_type, ee->ee_code); |
| 243 | break; |
| 244 | } |
| 245 | break; |
| 246 | |
| 247 | case SO_EE_ORIGIN_LOCAL: |
David Howells | fe77d5f | 2016-04-04 14:00:34 +0100 | [diff] [blame^] | 248 | _proto("Rx Received local error { error=%d }", err); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 249 | break; |
| 250 | |
| 251 | case SO_EE_ORIGIN_NONE: |
| 252 | case SO_EE_ORIGIN_ICMP6: |
| 253 | default: |
David Howells | fe77d5f | 2016-04-04 14:00:34 +0100 | [diff] [blame^] | 254 | _proto("Rx Received error report { orig=%u }", ee->ee_origin); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 255 | break; |
| 256 | } |
| 257 | |
| 258 | /* terminate all the affected calls if there's an unrecoverable |
| 259 | * error */ |
| 260 | if (err) { |
| 261 | struct rxrpc_call *call, *_n; |
| 262 | |
| 263 | _debug("ISSUE ERROR %d", err); |
| 264 | |
| 265 | spin_lock_bh(&trans->peer->lock); |
| 266 | trans->peer->net_error = err; |
| 267 | |
| 268 | list_for_each_entry_safe(call, _n, &trans->peer->error_targets, |
| 269 | error_link) { |
| 270 | write_lock(&call->state_lock); |
| 271 | if (call->state != RXRPC_CALL_COMPLETE && |
| 272 | call->state < RXRPC_CALL_NETWORK_ERROR) { |
| 273 | call->state = RXRPC_CALL_NETWORK_ERROR; |
David Howells | 4c198ad | 2016-03-04 15:53:46 +0000 | [diff] [blame] | 274 | set_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events); |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 275 | rxrpc_queue_call(call); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 276 | } |
| 277 | write_unlock(&call->state_lock); |
| 278 | list_del_init(&call->error_link); |
| 279 | } |
| 280 | |
| 281 | spin_unlock_bh(&trans->peer->lock); |
| 282 | } |
| 283 | |
| 284 | if (!skb_queue_empty(&trans->error_queue)) |
David Howells | 651350d | 2007-04-26 15:50:17 -0700 | [diff] [blame] | 285 | rxrpc_queue_work(&trans->error_handler); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 286 | |
| 287 | rxrpc_free_skb(skb); |
| 288 | rxrpc_put_transport(trans); |
| 289 | _leave(""); |
| 290 | } |