blob: d4d1ae26d29313fffb1230a1fdcbe09d740a7cd0 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* Error message handling (ICMP)
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/errqueue.h>
16#include <linux/udp.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <linux/icmp.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
25/*
26 * handle an error received on the local endpoint
27 */
28void rxrpc_UDP_error_report(struct sock *sk)
29{
30 struct sock_exterr_skb *serr;
31 struct rxrpc_transport *trans;
32 struct rxrpc_local *local = sk->sk_user_data;
33 struct rxrpc_peer *peer;
34 struct sk_buff *skb;
35 __be32 addr;
36 __be16 port;
37
38 _enter("%p{%d}", sk, local->debug_id);
39
40 skb = skb_dequeue(&sk->sk_error_queue);
41 if (!skb) {
42 _leave("UDP socket errqueue empty");
43 return;
44 }
45
46 rxrpc_new_skb(skb);
47
48 serr = SKB_EXT_ERR(skb);
49 addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
50 port = serr->port;
51
Harvey Harrison21454aa2008-10-31 00:54:56 -070052 _net("Rx UDP Error from %pI4:%hu", &addr, ntohs(port));
David Howells17926a72007-04-26 15:48:28 -070053 _debug("Msg l:%d d:%d", skb->len, skb->data_len);
54
55 peer = rxrpc_find_peer(local, addr, port);
56 if (IS_ERR(peer)) {
57 rxrpc_free_skb(skb);
58 _leave(" [no peer]");
59 return;
60 }
61
62 trans = rxrpc_find_transport(local, peer);
63 if (!trans) {
64 rxrpc_put_peer(peer);
65 rxrpc_free_skb(skb);
66 _leave(" [no trans]");
67 return;
68 }
69
70 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
71 serr->ee.ee_type == ICMP_DEST_UNREACH &&
72 serr->ee.ee_code == ICMP_FRAG_NEEDED
73 ) {
74 u32 mtu = serr->ee.ee_info;
75
76 _net("Rx Received ICMP Fragmentation Needed (%d)", mtu);
77
78 /* wind down the local interface MTU */
79 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
80 peer->if_mtu = mtu;
81 _net("I/F MTU %u", mtu);
82 }
83
84 /* ip_rt_frag_needed() may have eaten the info */
85 if (mtu == 0)
86 mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
87
88 if (mtu == 0) {
89 /* they didn't give us a size, estimate one */
90 if (mtu > 1500) {
91 mtu >>= 1;
92 if (mtu < 1500)
93 mtu = 1500;
94 } else {
95 mtu -= 100;
96 if (mtu < peer->hdrsize)
97 mtu = peer->hdrsize + 4;
98 }
99 }
100
101 if (mtu < peer->mtu) {
David Howells224711d2007-05-04 12:41:11 -0700102 spin_lock_bh(&peer->lock);
David Howells17926a72007-04-26 15:48:28 -0700103 peer->mtu = mtu;
104 peer->maxdata = peer->mtu - peer->hdrsize;
David Howells224711d2007-05-04 12:41:11 -0700105 spin_unlock_bh(&peer->lock);
David Howells17926a72007-04-26 15:48:28 -0700106 _net("Net MTU %u (maxdata %u)",
107 peer->mtu, peer->maxdata);
108 }
109 }
110
111 rxrpc_put_peer(peer);
112
113 /* pass the transport ref to error_handler to release */
114 skb_queue_tail(&trans->error_queue, skb);
David Howells651350d2007-04-26 15:50:17 -0700115 rxrpc_queue_work(&trans->error_handler);
David Howells17926a72007-04-26 15:48:28 -0700116
117 /* reset and regenerate socket error */
118 spin_lock_bh(&sk->sk_error_queue.lock);
119 sk->sk_err = 0;
120 skb = skb_peek(&sk->sk_error_queue);
121 if (skb) {
122 sk->sk_err = SKB_EXT_ERR(skb)->ee.ee_errno;
123 spin_unlock_bh(&sk->sk_error_queue.lock);
124 sk->sk_error_report(sk);
125 } else {
126 spin_unlock_bh(&sk->sk_error_queue.lock);
127 }
128
129 _leave("");
130}
131
132/*
133 * deal with UDP error messages
134 */
135void rxrpc_UDP_error_handler(struct work_struct *work)
136{
137 struct sock_extended_err *ee;
138 struct sock_exterr_skb *serr;
139 struct rxrpc_transport *trans =
140 container_of(work, struct rxrpc_transport, error_handler);
141 struct sk_buff *skb;
142 int local, err;
143
144 _enter("");
145
146 skb = skb_dequeue(&trans->error_queue);
147 if (!skb)
148 return;
149
150 serr = SKB_EXT_ERR(skb);
151 ee = &serr->ee;
152
153 _net("Rx Error o=%d t=%d c=%d e=%d",
154 ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno);
155
156 err = ee->ee_errno;
157
158 switch (ee->ee_origin) {
159 case SO_EE_ORIGIN_ICMP:
160 local = 0;
161 switch (ee->ee_type) {
162 case ICMP_DEST_UNREACH:
163 switch (ee->ee_code) {
164 case ICMP_NET_UNREACH:
165 _net("Rx Received ICMP Network Unreachable");
166 err = ENETUNREACH;
167 break;
168 case ICMP_HOST_UNREACH:
169 _net("Rx Received ICMP Host Unreachable");
170 err = EHOSTUNREACH;
171 break;
172 case ICMP_PORT_UNREACH:
173 _net("Rx Received ICMP Port Unreachable");
174 err = ECONNREFUSED;
175 break;
176 case ICMP_FRAG_NEEDED:
177 _net("Rx Received ICMP Fragmentation Needed (%d)",
178 ee->ee_info);
179 err = 0; /* dealt with elsewhere */
180 break;
181 case ICMP_NET_UNKNOWN:
182 _net("Rx Received ICMP Unknown Network");
183 err = ENETUNREACH;
184 break;
185 case ICMP_HOST_UNKNOWN:
186 _net("Rx Received ICMP Unknown Host");
187 err = EHOSTUNREACH;
188 break;
189 default:
190 _net("Rx Received ICMP DestUnreach code=%u",
191 ee->ee_code);
192 break;
193 }
194 break;
195
196 case ICMP_TIME_EXCEEDED:
197 _net("Rx Received ICMP TTL Exceeded");
198 break;
199
200 default:
201 _proto("Rx Received ICMP error { type=%u code=%u }",
202 ee->ee_type, ee->ee_code);
203 break;
204 }
205 break;
206
207 case SO_EE_ORIGIN_LOCAL:
208 _proto("Rx Received local error { error=%d }",
209 ee->ee_errno);
210 local = 1;
211 break;
212
213 case SO_EE_ORIGIN_NONE:
214 case SO_EE_ORIGIN_ICMP6:
215 default:
216 _proto("Rx Received error report { orig=%u }",
217 ee->ee_origin);
218 local = 0;
219 break;
220 }
221
222 /* terminate all the affected calls if there's an unrecoverable
223 * error */
224 if (err) {
225 struct rxrpc_call *call, *_n;
226
227 _debug("ISSUE ERROR %d", err);
228
229 spin_lock_bh(&trans->peer->lock);
230 trans->peer->net_error = err;
231
232 list_for_each_entry_safe(call, _n, &trans->peer->error_targets,
233 error_link) {
234 write_lock(&call->state_lock);
235 if (call->state != RXRPC_CALL_COMPLETE &&
236 call->state < RXRPC_CALL_NETWORK_ERROR) {
237 call->state = RXRPC_CALL_NETWORK_ERROR;
238 set_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
David Howells651350d2007-04-26 15:50:17 -0700239 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700240 }
241 write_unlock(&call->state_lock);
242 list_del_init(&call->error_link);
243 }
244
245 spin_unlock_bh(&trans->peer->lock);
246 }
247
248 if (!skb_queue_empty(&trans->error_queue))
David Howells651350d2007-04-26 15:50:17 -0700249 rxrpc_queue_work(&trans->error_handler);
David Howells17926a72007-04-26 15:48:28 -0700250
251 rxrpc_free_skb(skb);
252 rxrpc_put_transport(trans);
253 _leave("");
254}