blob: 0e0a4553499f751c21d5fb1978ef5f8c8cac097b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* connection.c: Rx connection routines
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <rxrpc/rxrpc.h>
16#include <rxrpc/transport.h>
17#include <rxrpc/peer.h>
18#include <rxrpc/connection.h>
19#include <rxrpc/call.h>
20#include <rxrpc/message.h>
21#include <linux/udp.h>
22#include <linux/ip.h>
23#include <net/sock.h>
24#include <asm/uaccess.h>
25#include "internal.h"
26
27__RXACCT_DECL(atomic_t rxrpc_connection_count);
28
29LIST_HEAD(rxrpc_conns);
30DECLARE_RWSEM(rxrpc_conns_sem);
31unsigned long rxrpc_conn_timeout = 60 * 60;
32
33static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn);
34
35static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
36{
37 struct rxrpc_connection *conn =
38 list_entry(timer, struct rxrpc_connection, timeout);
39
40 _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage));
41
42 rxrpc_conn_do_timeout(conn);
43}
44
45static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
46 .timed_out = __rxrpc_conn_timeout,
47};
48
49/*****************************************************************************/
50/*
51 * create a new connection record
52 */
53static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
54 struct rxrpc_connection **_conn)
55{
56 struct rxrpc_connection *conn;
57
58 _enter("%p",peer);
59
60 /* allocate and initialise a connection record */
61 conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
62 if (!conn) {
63 _leave(" = -ENOMEM");
64 return -ENOMEM;
65 }
66
67 memset(conn, 0, sizeof(struct rxrpc_connection));
68 atomic_set(&conn->usage, 1);
69
70 INIT_LIST_HEAD(&conn->link);
71 INIT_LIST_HEAD(&conn->id_link);
72 init_waitqueue_head(&conn->chanwait);
73 spin_lock_init(&conn->lock);
74 rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops);
75
76 do_gettimeofday(&conn->atime);
77 conn->mtu_size = 1024;
78 conn->peer = peer;
79 conn->trans = peer->trans;
80
81 __RXACCT(atomic_inc(&rxrpc_connection_count));
82 *_conn = conn;
83 _leave(" = 0 (%p)", conn);
84
85 return 0;
86} /* end __rxrpc_create_connection() */
87
88/*****************************************************************************/
89/*
90 * create a new connection record for outgoing connections
91 */
92int rxrpc_create_connection(struct rxrpc_transport *trans,
93 __be16 port,
94 __be32 addr,
95 uint16_t service_id,
96 void *security,
97 struct rxrpc_connection **_conn)
98{
99 struct rxrpc_connection *candidate, *conn;
100 struct rxrpc_peer *peer;
101 struct list_head *_p;
102 __be32 connid;
103 int ret;
104
105 _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id);
106
107 /* get a peer record */
108 ret = rxrpc_peer_lookup(trans, addr, &peer);
109 if (ret < 0) {
110 _leave(" = %d", ret);
111 return ret;
112 }
113
114 /* allocate and initialise a connection record */
115 ret = __rxrpc_create_connection(peer, &candidate);
116 if (ret < 0) {
117 rxrpc_put_peer(peer);
118 _leave(" = %d", ret);
119 return ret;
120 }
121
122 /* fill in the specific bits */
123 candidate->addr.sin_family = AF_INET;
124 candidate->addr.sin_port = port;
125 candidate->addr.sin_addr.s_addr = addr;
126
127 candidate->in_epoch = rxrpc_epoch;
128 candidate->out_epoch = rxrpc_epoch;
129 candidate->in_clientflag = 0;
130 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
131 candidate->service_id = htons(service_id);
132
133 /* invent a unique connection ID */
134 write_lock(&peer->conn_idlock);
135
136 try_next_id:
137 connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK);
138 peer->conn_idcounter += RXRPC_MAXCALLS;
139
140 list_for_each(_p, &peer->conn_idlist) {
141 conn = list_entry(_p, struct rxrpc_connection, id_link);
142 if (connid == conn->conn_id)
143 goto try_next_id;
144 if (connid > conn->conn_id)
145 break;
146 }
147
148 _debug("selected candidate conn ID %x.%u",
149 ntohl(peer->addr.s_addr), ntohl(connid));
150
151 candidate->conn_id = connid;
152 list_add_tail(&candidate->id_link, _p);
153
154 write_unlock(&peer->conn_idlock);
155
156 /* attach to peer */
157 candidate->peer = peer;
158
159 write_lock(&peer->conn_lock);
160
161 /* search the peer's transport graveyard list */
162 spin_lock(&peer->conn_gylock);
163 list_for_each(_p, &peer->conn_graveyard) {
164 conn = list_entry(_p, struct rxrpc_connection, link);
165 if (conn->addr.sin_port == candidate->addr.sin_port &&
166 conn->security_ix == candidate->security_ix &&
167 conn->service_id == candidate->service_id &&
168 conn->in_clientflag == 0)
169 goto found_in_graveyard;
170 }
171 spin_unlock(&peer->conn_gylock);
172
173 /* pick the new candidate */
174 _debug("created connection: {%08x} [out]", ntohl(candidate->conn_id));
175 atomic_inc(&peer->conn_count);
176 conn = candidate;
177 candidate = NULL;
178
179 make_active:
180 list_add_tail(&conn->link, &peer->conn_active);
181 write_unlock(&peer->conn_lock);
182
183 if (candidate) {
184 write_lock(&peer->conn_idlock);
185 list_del(&candidate->id_link);
186 write_unlock(&peer->conn_idlock);
187
188 __RXACCT(atomic_dec(&rxrpc_connection_count));
189 kfree(candidate);
190 }
191 else {
192 down_write(&rxrpc_conns_sem);
193 list_add_tail(&conn->proc_link, &rxrpc_conns);
194 up_write(&rxrpc_conns_sem);
195 }
196
197 *_conn = conn;
198 _leave(" = 0 (%p)", conn);
199
200 return 0;
201
202 /* handle resurrecting a connection from the graveyard */
203 found_in_graveyard:
204 _debug("resurrecting connection: {%08x} [out]", ntohl(conn->conn_id));
205 rxrpc_get_connection(conn);
206 rxrpc_krxtimod_del_timer(&conn->timeout);
207 list_del_init(&conn->link);
208 spin_unlock(&peer->conn_gylock);
209 goto make_active;
210} /* end rxrpc_create_connection() */
211
212/*****************************************************************************/
213/*
214 * lookup the connection for an incoming packet
215 * - create a new connection record for unrecorded incoming connections
216 */
217int rxrpc_connection_lookup(struct rxrpc_peer *peer,
218 struct rxrpc_message *msg,
219 struct rxrpc_connection **_conn)
220{
221 struct rxrpc_connection *conn, *candidate = NULL;
222 struct list_head *_p;
Jesper Juhlea2e90d2006-01-10 13:07:44 -0800223 struct sk_buff *pkt = msg->pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 int ret, fresh = 0;
225 __be32 x_epoch, x_connid;
226 __be16 x_port, x_servid;
227 __u32 x_secix;
228 u8 x_clflag;
229
230 _enter("%p{{%hu}},%u,%hu",
231 peer,
232 peer->trans->port,
Jesper Juhlea2e90d2006-01-10 13:07:44 -0800233 ntohs(pkt->h.uh->source),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 ntohs(msg->hdr.serviceId));
235
Jesper Juhlea2e90d2006-01-10 13:07:44 -0800236 x_port = pkt->h.uh->source;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 x_epoch = msg->hdr.epoch;
238 x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
239 x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
240 x_servid = msg->hdr.serviceId;
241 x_secix = msg->hdr.securityIndex;
242
243 /* [common case] search the transport's active list first */
244 read_lock(&peer->conn_lock);
245 list_for_each(_p, &peer->conn_active) {
246 conn = list_entry(_p, struct rxrpc_connection, link);
247 if (conn->addr.sin_port == x_port &&
248 conn->in_epoch == x_epoch &&
249 conn->conn_id == x_connid &&
250 conn->security_ix == x_secix &&
251 conn->service_id == x_servid &&
252 conn->in_clientflag == x_clflag)
253 goto found_active;
254 }
255 read_unlock(&peer->conn_lock);
256
257 /* [uncommon case] not active
258 * - create a candidate for a new record if an inbound connection
259 * - only examine the graveyard for an outbound connection
260 */
261 if (x_clflag) {
262 ret = __rxrpc_create_connection(peer, &candidate);
263 if (ret < 0) {
264 _leave(" = %d", ret);
265 return ret;
266 }
267
268 /* fill in the specifics */
269 candidate->addr.sin_family = AF_INET;
270 candidate->addr.sin_port = x_port;
Jesper Juhlea2e90d2006-01-10 13:07:44 -0800271 candidate->addr.sin_addr.s_addr = pkt->nh.iph->saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 candidate->in_epoch = x_epoch;
273 candidate->out_epoch = x_epoch;
274 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
275 candidate->out_clientflag = 0;
276 candidate->conn_id = x_connid;
277 candidate->service_id = x_servid;
278 candidate->security_ix = x_secix;
279 }
280
281 /* search the active list again, just in case it appeared whilst we
282 * were busy */
283 write_lock(&peer->conn_lock);
284 list_for_each(_p, &peer->conn_active) {
285 conn = list_entry(_p, struct rxrpc_connection, link);
286 if (conn->addr.sin_port == x_port &&
287 conn->in_epoch == x_epoch &&
288 conn->conn_id == x_connid &&
289 conn->security_ix == x_secix &&
290 conn->service_id == x_servid &&
291 conn->in_clientflag == x_clflag)
292 goto found_active_second_chance;
293 }
294
295 /* search the transport's graveyard list */
296 spin_lock(&peer->conn_gylock);
297 list_for_each(_p, &peer->conn_graveyard) {
298 conn = list_entry(_p, struct rxrpc_connection, link);
299 if (conn->addr.sin_port == x_port &&
300 conn->in_epoch == x_epoch &&
301 conn->conn_id == x_connid &&
302 conn->security_ix == x_secix &&
303 conn->service_id == x_servid &&
304 conn->in_clientflag == x_clflag)
305 goto found_in_graveyard;
306 }
307 spin_unlock(&peer->conn_gylock);
308
309 /* outbound connections aren't created here */
310 if (!x_clflag) {
311 write_unlock(&peer->conn_lock);
312 _leave(" = -ENOENT");
313 return -ENOENT;
314 }
315
316 /* we can now add the new candidate to the list */
317 _debug("created connection: {%08x} [in]", ntohl(candidate->conn_id));
318 rxrpc_get_peer(peer);
319 conn = candidate;
320 candidate = NULL;
321 atomic_inc(&peer->conn_count);
322 fresh = 1;
323
324 make_active:
325 list_add_tail(&conn->link, &peer->conn_active);
326
327 success_uwfree:
328 write_unlock(&peer->conn_lock);
329
330 if (candidate) {
331 write_lock(&peer->conn_idlock);
332 list_del(&candidate->id_link);
333 write_unlock(&peer->conn_idlock);
334
335 __RXACCT(atomic_dec(&rxrpc_connection_count));
336 kfree(candidate);
337 }
338
339 if (fresh) {
340 down_write(&rxrpc_conns_sem);
341 list_add_tail(&conn->proc_link, &rxrpc_conns);
342 up_write(&rxrpc_conns_sem);
343 }
344
345 success:
346 *_conn = conn;
347 _leave(" = 0 (%p)", conn);
348 return 0;
349
350 /* handle the connection being found in the active list straight off */
351 found_active:
352 rxrpc_get_connection(conn);
353 read_unlock(&peer->conn_lock);
354 goto success;
355
356 /* handle resurrecting a connection from the graveyard */
357 found_in_graveyard:
358 _debug("resurrecting connection: {%08x} [in]", ntohl(conn->conn_id));
359 rxrpc_get_peer(peer);
360 rxrpc_get_connection(conn);
361 rxrpc_krxtimod_del_timer(&conn->timeout);
362 list_del_init(&conn->link);
363 spin_unlock(&peer->conn_gylock);
364 goto make_active;
365
366 /* handle finding the connection on the second time through the active
367 * list */
368 found_active_second_chance:
369 rxrpc_get_connection(conn);
370 goto success_uwfree;
371
372} /* end rxrpc_connection_lookup() */
373
374/*****************************************************************************/
375/*
376 * finish using a connection record
377 * - it will be transferred to the peer's connection graveyard when refcount
378 * reaches 0
379 */
380void rxrpc_put_connection(struct rxrpc_connection *conn)
381{
382 struct rxrpc_peer *peer;
383
384 if (!conn)
385 return;
386
387 _enter("%p{u=%d p=%hu}",
388 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
389
390 peer = conn->peer;
391 spin_lock(&peer->conn_gylock);
392
393 /* sanity check */
394 if (atomic_read(&conn->usage) <= 0)
395 BUG();
396
397 if (likely(!atomic_dec_and_test(&conn->usage))) {
398 spin_unlock(&peer->conn_gylock);
399 _leave("");
400 return;
401 }
402
403 /* move to graveyard queue */
404 _debug("burying connection: {%08x}", ntohl(conn->conn_id));
405 list_del(&conn->link);
406 list_add_tail(&conn->link, &peer->conn_graveyard);
407
408 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
409
410 spin_unlock(&peer->conn_gylock);
411
412 rxrpc_put_peer(conn->peer);
413
414 _leave(" [killed]");
415} /* end rxrpc_put_connection() */
416
417/*****************************************************************************/
418/*
419 * free a connection record
420 */
421static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
422{
423 struct rxrpc_peer *peer;
424
425 _enter("%p{u=%d p=%hu}",
426 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
427
428 peer = conn->peer;
429
430 if (atomic_read(&conn->usage) < 0)
431 BUG();
432
433 /* remove from graveyard if still dead */
434 spin_lock(&peer->conn_gylock);
435 if (atomic_read(&conn->usage) == 0) {
436 list_del_init(&conn->link);
437 }
438 else {
439 conn = NULL;
440 }
441 spin_unlock(&peer->conn_gylock);
442
443 if (!conn) {
444 _leave("");
445 return; /* resurrected */
446 }
447
448 _debug("--- Destroying Connection %p{%08x} ---",
449 conn, ntohl(conn->conn_id));
450
451 down_write(&rxrpc_conns_sem);
452 list_del(&conn->proc_link);
453 up_write(&rxrpc_conns_sem);
454
455 write_lock(&peer->conn_idlock);
456 list_del(&conn->id_link);
457 write_unlock(&peer->conn_idlock);
458
459 __RXACCT(atomic_dec(&rxrpc_connection_count));
460 kfree(conn);
461
462 /* if the graveyard is now empty, wake up anyone waiting for that */
463 if (atomic_dec_and_test(&peer->conn_count))
464 wake_up(&peer->conn_gy_waitq);
465
466 _leave(" [destroyed]");
467} /* end rxrpc_conn_do_timeout() */
468
469/*****************************************************************************/
470/*
471 * clear all connection records from a peer endpoint
472 */
473void rxrpc_conn_clearall(struct rxrpc_peer *peer)
474{
475 DECLARE_WAITQUEUE(myself, current);
476
477 struct rxrpc_connection *conn;
478 int err;
479
480 _enter("%p", peer);
481
482 /* there shouldn't be any active conns remaining */
483 if (!list_empty(&peer->conn_active))
484 BUG();
485
486 /* manually timeout all conns in the graveyard */
487 spin_lock(&peer->conn_gylock);
488 while (!list_empty(&peer->conn_graveyard)) {
489 conn = list_entry(peer->conn_graveyard.next,
490 struct rxrpc_connection, link);
491 err = rxrpc_krxtimod_del_timer(&conn->timeout);
492 spin_unlock(&peer->conn_gylock);
493
494 if (err == 0)
495 rxrpc_conn_do_timeout(conn);
496
497 spin_lock(&peer->conn_gylock);
498 }
499 spin_unlock(&peer->conn_gylock);
500
501 /* wait for the the conn graveyard to be completely cleared */
502 set_current_state(TASK_UNINTERRUPTIBLE);
503 add_wait_queue(&peer->conn_gy_waitq, &myself);
504
505 while (atomic_read(&peer->conn_count) != 0) {
506 schedule();
507 set_current_state(TASK_UNINTERRUPTIBLE);
508 }
509
510 remove_wait_queue(&peer->conn_gy_waitq, &myself);
511 set_current_state(TASK_RUNNING);
512
513 _leave("");
514} /* end rxrpc_conn_clearall() */
515
516/*****************************************************************************/
517/*
518 * allocate and prepare a message for sending out through the transport
519 * endpoint
520 */
521int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
522 struct rxrpc_call *call,
523 uint8_t type,
524 int dcount,
525 struct kvec diov[],
Al Virodd0fc662005-10-07 07:46:04 +0100526 gfp_t alloc_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 struct rxrpc_message **_msg)
528{
529 struct rxrpc_message *msg;
530 int loop;
531
532 _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type);
533
534 if (dcount > 3) {
535 _leave(" = -EINVAL");
536 return -EINVAL;
537 }
538
539 msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags);
540 if (!msg) {
541 _leave(" = -ENOMEM");
542 return -ENOMEM;
543 }
544
545 memset(msg, 0, sizeof(*msg));
546 atomic_set(&msg->usage, 1);
547
548 INIT_LIST_HEAD(&msg->link);
549
550 msg->state = RXRPC_MSG_PREPARED;
551
552 msg->hdr.epoch = conn->out_epoch;
553 msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0);
554 msg->hdr.callNumber = call ? call->call_id : 0;
555 msg->hdr.type = type;
556 msg->hdr.flags = conn->out_clientflag;
557 msg->hdr.securityIndex = conn->security_ix;
558 msg->hdr.serviceId = conn->service_id;
559
560 /* generate sequence numbers for data packets */
561 if (call) {
562 switch (type) {
563 case RXRPC_PACKET_TYPE_DATA:
564 msg->seq = ++call->snd_seq_count;
565 msg->hdr.seq = htonl(msg->seq);
566 break;
567 case RXRPC_PACKET_TYPE_ACK:
568 /* ACK sequence numbers are complicated. The following
569 * may be wrong:
570 * - jumbo packet ACKs should have a seq number
571 * - normal ACKs should not
572 */
573 default:
574 break;
575 }
576 }
577
578 msg->dcount = dcount + 1;
579 msg->dsize = sizeof(msg->hdr);
580 msg->data[0].iov_len = sizeof(msg->hdr);
581 msg->data[0].iov_base = &msg->hdr;
582
583 for (loop=0; loop < dcount; loop++) {
584 msg->dsize += diov[loop].iov_len;
585 msg->data[loop+1].iov_len = diov[loop].iov_len;
586 msg->data[loop+1].iov_base = diov[loop].iov_base;
587 }
588
589 __RXACCT(atomic_inc(&rxrpc_message_count));
590 *_msg = msg;
591 _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count));
592 return 0;
593} /* end rxrpc_conn_newmsg() */
594
595/*****************************************************************************/
596/*
597 * free a message
598 */
599void __rxrpc_put_message(struct rxrpc_message *msg)
600{
601 int loop;
602
603 _enter("%p #%d", msg, atomic_read(&rxrpc_message_count));
604
605 if (msg->pkt)
606 kfree_skb(msg->pkt);
607 rxrpc_put_connection(msg->conn);
608
609 for (loop = 0; loop < 8; loop++)
610 if (test_bit(loop, &msg->dfree))
611 kfree(msg->data[loop].iov_base);
612
613 __RXACCT(atomic_dec(&rxrpc_message_count));
614 kfree(msg);
615
616 _leave("");
617} /* end __rxrpc_put_message() */
618
619/*****************************************************************************/
620/*
621 * send a message out through the transport endpoint
622 */
623int rxrpc_conn_sendmsg(struct rxrpc_connection *conn,
624 struct rxrpc_message *msg)
625{
626 struct msghdr msghdr;
627 int ret;
628
629 _enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
630
631 /* fill in some fields in the header */
632 spin_lock(&conn->lock);
633 msg->hdr.serial = htonl(++conn->serial_counter);
634 msg->rttdone = 0;
635 spin_unlock(&conn->lock);
636
637 /* set up the message to be transmitted */
638 msghdr.msg_name = &conn->addr;
639 msghdr.msg_namelen = sizeof(conn->addr);
640 msghdr.msg_control = NULL;
641 msghdr.msg_controllen = 0;
642 msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
643
644 _net("Sending message type %d of %Zd bytes to %08x:%d",
645 msg->hdr.type,
646 msg->dsize,
647 ntohl(conn->addr.sin_addr.s_addr),
648 ntohs(conn->addr.sin_port));
649
650 /* send the message */
651 ret = kernel_sendmsg(conn->trans->socket, &msghdr,
652 msg->data, msg->dcount, msg->dsize);
653 if (ret < 0) {
654 msg->state = RXRPC_MSG_ERROR;
655 } else {
656 msg->state = RXRPC_MSG_SENT;
657 ret = 0;
658
659 spin_lock(&conn->lock);
660 do_gettimeofday(&conn->atime);
661 msg->stamp = conn->atime;
662 spin_unlock(&conn->lock);
663 }
664
665 _leave(" = %d", ret);
666
667 return ret;
668} /* end rxrpc_conn_sendmsg() */
669
670/*****************************************************************************/
671/*
672 * deal with a subsequent call packet
673 */
674int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
675 struct rxrpc_call *call,
676 struct rxrpc_message *msg)
677{
678 struct rxrpc_message *pmsg;
Jesper Juhlea2e90d2006-01-10 13:07:44 -0800679 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 struct list_head *_p;
681 unsigned cix, seq;
682 int ret = 0;
683
684 _enter("%p,%p,%p", conn, call, msg);
685
686 if (!call) {
687 cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
688
689 spin_lock(&conn->lock);
690 call = conn->channels[cix];
691
692 if (!call || call->call_id != msg->hdr.callNumber) {
693 spin_unlock(&conn->lock);
694 rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT);
695 goto out;
696 }
697 else {
698 rxrpc_get_call(call);
699 spin_unlock(&conn->lock);
700 }
701 }
702 else {
703 rxrpc_get_call(call);
704 }
705
706 _proto("Received packet %%%u [%u] on call %hu:%u:%u",
707 ntohl(msg->hdr.serial),
708 ntohl(msg->hdr.seq),
709 ntohs(msg->hdr.serviceId),
710 ntohl(conn->conn_id),
711 ntohl(call->call_id));
712
713 call->pkt_rcv_count++;
714
Jesper Juhlea2e90d2006-01-10 13:07:44 -0800715 dst = msg->pkt->dst;
716 if (dst && dst->dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 conn->peer->if_mtu =
Jesper Juhlea2e90d2006-01-10 13:07:44 -0800718 dst->dev->mtu - dst->dev->hard_header_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720 /* queue on the call in seq order */
721 rxrpc_get_message(msg);
722 seq = msg->seq;
723
724 spin_lock(&call->lock);
725 list_for_each(_p, &call->rcv_receiveq) {
726 pmsg = list_entry(_p, struct rxrpc_message, link);
727 if (pmsg->seq > seq)
728 break;
729 }
730 list_add_tail(&msg->link, _p);
731
732 /* reset the activity timeout */
733 call->flags |= RXRPC_CALL_RCV_PKT;
734 mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
735
736 spin_unlock(&call->lock);
737
738 rxrpc_krxiod_queue_call(call);
739
740 rxrpc_put_call(call);
741 out:
742 _leave(" = %d", ret);
743 return ret;
744} /* end rxrpc_conn_receive_call_packet() */
745
746/*****************************************************************************/
747/*
748 * handle an ICMP error being applied to a connection
749 */
750void rxrpc_conn_handle_error(struct rxrpc_connection *conn,
751 int local, int errno)
752{
753 struct rxrpc_call *calls[4];
754 int loop;
755
756 _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno);
757
758 /* get a ref to all my calls in one go */
759 memset(calls, 0, sizeof(calls));
760 spin_lock(&conn->lock);
761
762 for (loop = 3; loop >= 0; loop--) {
763 if (conn->channels[loop]) {
764 calls[loop] = conn->channels[loop];
765 rxrpc_get_call(calls[loop]);
766 }
767 }
768
769 spin_unlock(&conn->lock);
770
771 /* now kick them all */
772 for (loop = 3; loop >= 0; loop--) {
773 if (calls[loop]) {
774 rxrpc_call_handle_error(calls[loop], local, errno);
775 rxrpc_put_call(calls[loop]);
776 }
777 }
778
779 _leave("");
780} /* end rxrpc_conn_handle_error() */