blob: f067e5425560fe0d43c184589a397d614c12573b [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -04004 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010045
Ying Xue796c75d2013-06-17 10:54:48 -040046#include <linux/pkt_sched.h>
47
Erik Hugne2cf8aa12012-06-29 00:16:37 -040048/*
49 * Error message prefixes
50 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -040051static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -040052static const char *link_rst_msg = "Resetting link ";
Per Lidenb97bf3f2006-01-02 19:04:38 +010053
Richard Alpe7be57fc2014-11-20 10:29:12 +010054static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
55 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
56 [TIPC_NLA_LINK_NAME] = {
57 .type = NLA_STRING,
58 .len = TIPC_MAX_LINK_NAME
59 },
60 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
61 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
62 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
65 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
67 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
68};
69
Richard Alpe0655f6a2014-11-20 10:29:07 +010070/* Properties valid for media, bearar and link */
71static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
72 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
73 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
74 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
76};
77
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +090078/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -040079 * Interval between NACKs when packets arrive out of order
80 */
81#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
82/*
Allan Stephensa686e682008-06-04 17:29:39 -070083 * Out-of-range value for link session numbers
84 */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040085#define WILDCARD_SESSION 0x10000
Allan Stephensa686e682008-06-04 17:29:39 -070086
Jon Paul Maloy662921c2015-07-30 18:24:21 -040087/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040088 */
89enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -040090 LINK_ESTABLISHED = 0xe,
91 LINK_ESTABLISHING = 0xe << 4,
92 LINK_RESET = 0x1 << 8,
93 LINK_RESETTING = 0x2 << 12,
94 LINK_PEER_RESET = 0xd << 16,
95 LINK_FAILINGOVER = 0xf << 20,
96 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040097};
98
99/* Link FSM state checking routines
100 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400101static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400102{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400103 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400104}
105
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400106static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
107 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400108static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
109 u16 rcvgap, int tolerance, int priority,
110 struct sk_buff_head *xmitq);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500111static void link_reset_statistics(struct tipc_link *l_ptr);
112static void link_print(struct tipc_link *l_ptr, const char *str);
Ying Xue247f0f32014-02-18 16:06:46 +0800113static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400114
Per Lidenb97bf3f2006-01-02 19:04:38 +0100115/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800116 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100117 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400118bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100119{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400120 return link_is_up(l);
121}
122
123bool tipc_link_is_reset(struct tipc_link *l)
124{
125 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
126}
127
128bool tipc_link_is_synching(struct tipc_link *l)
129{
130 return l->state == LINK_SYNCHING;
131}
132
133bool tipc_link_is_failingover(struct tipc_link *l)
134{
135 return l->state == LINK_FAILINGOVER;
136}
137
138bool tipc_link_is_blocked(struct tipc_link *l)
139{
140 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100141}
142
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400143int tipc_link_is_active(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100144{
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400145 struct tipc_node *n = l->owner;
146
147 return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100148}
149
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400150static u32 link_own_addr(struct tipc_link *l)
151{
152 return msg_prevnode(l->pmsg);
153}
154
Per Lidenb97bf3f2006-01-02 19:04:38 +0100155/**
Per Liden4323add2006-01-18 00:38:21 +0100156 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400157 * @n: pointer to associated node
158 * @b: pointer to associated bearer
159 * @ownnode: identity of own node
160 * @peer: identity of peer node
161 * @maddr: media address to be used
162 * @inputq: queue to put messages ready for delivery
163 * @namedq: queue to put binding table update messages ready for delivery
164 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900165 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400166 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100167 */
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400168bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
169 u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
170 struct sk_buff_head *inputq, struct sk_buff_head *namedq,
171 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100172{
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400173 struct tipc_link *l;
174 struct tipc_msg *hdr;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100175 char *if_name;
Allan Stephens37b9c082011-02-28 11:32:27 -0500176
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400177 l = kzalloc(sizeof(*l), GFP_ATOMIC);
178 if (!l)
179 return false;
180 *link = l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500181
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400182 /* Note: peer i/f name is completed by reset/activate message */
183 if_name = strchr(b->name, ':') + 1;
184 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
185 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
186 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100187
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400188 l->addr = peer;
189 l->media_addr = maddr;
190 l->owner = n;
191 l->peer_session = WILDCARD_SESSION;
192 l->bearer_id = b->identity;
193 l->tolerance = b->tolerance;
194 l->net_plane = b->net_plane;
195 l->advertised_mtu = b->mtu;
196 l->mtu = b->mtu;
197 l->priority = b->priority;
198 tipc_link_set_queue_limits(l, b->window);
199 l->inputq = inputq;
200 l->namedq = namedq;
201 l->state = LINK_RESETTING;
202 l->pmsg = (struct tipc_msg *)&l->proto_msg;
203 hdr = l->pmsg;
204 tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
205 msg_set_size(hdr, sizeof(l->proto_msg));
206 msg_set_session(hdr, session);
207 msg_set_bearer_id(hdr, l->bearer_id);
208 strcpy((char *)msg_data(hdr), if_name);
209 __skb_queue_head_init(&l->transmq);
210 __skb_queue_head_init(&l->backlogq);
211 __skb_queue_head_init(&l->deferdq);
212 skb_queue_head_init(&l->wakeupq);
213 skb_queue_head_init(l->inputq);
214 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100215}
216
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400217/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
218 *
219 * Give a newly added peer node the sequence number where it should
220 * start receiving and acking broadcast packets.
221 */
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400222void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
223 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400224{
225 struct sk_buff *skb;
226 struct sk_buff_head list;
Jon Maloy5a4c3552015-07-29 18:28:01 -0400227 u16 last_sent;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400228
229 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
230 0, l->addr, link_own_addr(l), 0, 0, 0);
231 if (!skb)
232 return;
Jon Maloy5a4c3552015-07-29 18:28:01 -0400233 last_sent = tipc_bclink_get_last_sent(l->owner->net);
234 msg_set_last_bcast(buf_msg(skb), last_sent);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400235 __skb_queue_head_init(&list);
236 __skb_queue_tail(&list, skb);
237 tipc_link_xmit(l, &list, xmitq);
238}
239
Per Lidenb97bf3f2006-01-02 19:04:38 +0100240/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400241 * tipc_link_fsm_evt - link finite state machine
242 * @l: pointer to link
243 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400244 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400245int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400246{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400247 int rc = 0;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400248
249 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400250 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400251 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400252 case LINK_PEER_RESET_EVT:
253 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400254 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400255 case LINK_RESET_EVT:
256 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400257 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400258 case LINK_FAILURE_EVT:
259 case LINK_FAILOVER_BEGIN_EVT:
260 case LINK_ESTABLISH_EVT:
261 case LINK_FAILOVER_END_EVT:
262 case LINK_SYNCH_BEGIN_EVT:
263 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400264 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400265 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400266 }
267 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400268 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400269 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400270 case LINK_PEER_RESET_EVT:
271 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400272 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400273 case LINK_FAILOVER_BEGIN_EVT:
274 l->state = LINK_FAILINGOVER;
275 case LINK_FAILURE_EVT:
276 case LINK_RESET_EVT:
277 case LINK_ESTABLISH_EVT:
278 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400279 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400280 case LINK_SYNCH_BEGIN_EVT:
281 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400282 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400283 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400284 }
285 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400286 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400287 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400288 case LINK_RESET_EVT:
289 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400290 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400291 case LINK_PEER_RESET_EVT:
292 case LINK_ESTABLISH_EVT:
293 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400294 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400295 case LINK_SYNCH_BEGIN_EVT:
296 case LINK_SYNCH_END_EVT:
297 case LINK_FAILOVER_BEGIN_EVT:
298 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400299 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400300 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400301 }
302 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400303 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400304 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400305 case LINK_FAILOVER_END_EVT:
306 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400307 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400308 case LINK_PEER_RESET_EVT:
309 case LINK_RESET_EVT:
310 case LINK_ESTABLISH_EVT:
311 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400312 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400313 case LINK_FAILOVER_BEGIN_EVT:
314 case LINK_SYNCH_BEGIN_EVT:
315 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400316 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400317 goto illegal_evt;
318 }
319 break;
320 case LINK_ESTABLISHING:
321 switch (evt) {
322 case LINK_ESTABLISH_EVT:
323 l->state = LINK_ESTABLISHED;
324 rc |= TIPC_LINK_UP_EVT;
325 break;
326 case LINK_FAILOVER_BEGIN_EVT:
327 l->state = LINK_FAILINGOVER;
328 break;
329 case LINK_PEER_RESET_EVT:
330 case LINK_RESET_EVT:
331 case LINK_FAILURE_EVT:
332 case LINK_SYNCH_BEGIN_EVT:
333 case LINK_FAILOVER_END_EVT:
334 break;
335 case LINK_SYNCH_END_EVT:
336 default:
337 goto illegal_evt;
338 }
339 break;
340 case LINK_ESTABLISHED:
341 switch (evt) {
342 case LINK_PEER_RESET_EVT:
343 l->state = LINK_PEER_RESET;
344 rc |= TIPC_LINK_DOWN_EVT;
345 break;
346 case LINK_FAILURE_EVT:
347 l->state = LINK_RESETTING;
348 rc |= TIPC_LINK_DOWN_EVT;
349 break;
350 case LINK_RESET_EVT:
351 l->state = LINK_RESET;
352 break;
353 case LINK_ESTABLISH_EVT:
354 break;
355 case LINK_SYNCH_BEGIN_EVT:
356 l->state = LINK_SYNCHING;
357 break;
358 case LINK_SYNCH_END_EVT:
359 case LINK_FAILOVER_BEGIN_EVT:
360 case LINK_FAILOVER_END_EVT:
361 default:
362 goto illegal_evt;
363 }
364 break;
365 case LINK_SYNCHING:
366 switch (evt) {
367 case LINK_PEER_RESET_EVT:
368 l->state = LINK_PEER_RESET;
369 rc |= TIPC_LINK_DOWN_EVT;
370 break;
371 case LINK_FAILURE_EVT:
372 l->state = LINK_RESETTING;
373 rc |= TIPC_LINK_DOWN_EVT;
374 break;
375 case LINK_RESET_EVT:
376 l->state = LINK_RESET;
377 break;
378 case LINK_ESTABLISH_EVT:
379 case LINK_SYNCH_BEGIN_EVT:
380 break;
381 case LINK_SYNCH_END_EVT:
382 l->state = LINK_ESTABLISHED;
383 break;
384 case LINK_FAILOVER_BEGIN_EVT:
385 case LINK_FAILOVER_END_EVT:
386 default:
387 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400388 }
389 break;
390 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400391 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400392 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400393 return rc;
394illegal_evt:
395 pr_err("Illegal FSM event %x in state %x on link %s\n",
396 evt, l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400397 return rc;
398}
399
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400400/* link_profile_stats - update statistical profiling of traffic
401 */
402static void link_profile_stats(struct tipc_link *l)
403{
404 struct sk_buff *skb;
405 struct tipc_msg *msg;
406 int length;
407
408 /* Update counters used in statistical profiling of send traffic */
409 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
410 l->stats.queue_sz_counts++;
411
412 skb = skb_peek(&l->transmq);
413 if (!skb)
414 return;
415 msg = buf_msg(skb);
416 length = msg_size(msg);
417
418 if (msg_user(msg) == MSG_FRAGMENTER) {
419 if (msg_type(msg) != FIRST_FRAGMENT)
420 return;
421 length = msg_size(msg_get_wrapped(msg));
422 }
423 l->stats.msg_lengths_total += length;
424 l->stats.msg_length_counts++;
425 if (length <= 64)
426 l->stats.msg_length_profile[0]++;
427 else if (length <= 256)
428 l->stats.msg_length_profile[1]++;
429 else if (length <= 1024)
430 l->stats.msg_length_profile[2]++;
431 else if (length <= 4096)
432 l->stats.msg_length_profile[3]++;
433 else if (length <= 16384)
434 l->stats.msg_length_profile[4]++;
435 else if (length <= 32768)
436 l->stats.msg_length_profile[5]++;
437 else
438 l->stats.msg_length_profile[6]++;
439}
440
441/* tipc_link_timeout - perform periodic task as instructed from node timeout
442 */
443int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
444{
445 int rc = 0;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400446 int mtyp = STATE_MSG;
447 bool xmit = false;
448 bool prb = false;
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400449
450 link_profile_stats(l);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400451
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400452 switch (l->state) {
453 case LINK_ESTABLISHED:
454 case LINK_SYNCHING:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400455 if (!l->silent_intv_cnt) {
456 if (tipc_bclink_acks_missing(l->owner))
457 xmit = true;
458 } else if (l->silent_intv_cnt <= l->abort_limit) {
459 xmit = true;
460 prb = true;
461 } else {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400462 rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400463 }
464 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400465 break;
466 case LINK_RESET:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400467 xmit = true;
468 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400469 break;
470 case LINK_ESTABLISHING:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400471 xmit = true;
472 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400473 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400474 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400475 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400476 case LINK_FAILINGOVER:
477 break;
478 default:
479 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400480 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400481
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400482 if (xmit)
483 tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
484
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400485 return rc;
486}
487
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400488/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400489 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400490 * @link: congested link
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400491 * @list: message that was attempted sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400492 * Create pseudo msg to send back to user when congestion abates
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400493 * Does not consume buffer list
Per Lidenb97bf3f2006-01-02 19:04:38 +0100494 */
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400495static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100496{
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400497 struct tipc_msg *msg = buf_msg(skb_peek(list));
498 int imp = msg_importance(msg);
499 u32 oport = msg_origport(msg);
500 u32 addr = link_own_addr(link);
501 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100502
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400503 /* This really cannot happen... */
504 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
505 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400506 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400507 }
508 /* Non-blocking sender: */
509 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
510 return -ELINKCONG;
511
512 /* Create and schedule wakeup pseudo message */
513 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
514 addr, addr, oport, 0, 0);
515 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400516 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400517 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
518 TIPC_SKB_CB(skb)->chain_imp = imp;
519 skb_queue_tail(&link->wakeupq, skb);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400520 link->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400521 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100522}
523
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400524/**
525 * link_prepare_wakeup - prepare users for wakeup after congestion
526 * @link: congested link
527 * Move a number of waiting users, as permitted by available space in
528 * the send queue, from link wait queue to node wait queue for wakeup
529 */
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400530void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100531{
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400532 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
533 int imp, lim;
Ying Xue58d78b32014-11-26 11:41:51 +0800534 struct sk_buff *skb, *tmp;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100535
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400536 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
537 imp = TIPC_SKB_CB(skb)->chain_imp;
538 lim = l->window + l->backlog[imp].limit;
539 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
540 if ((pnd[imp] + l->backlog[imp].len) >= lim)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100541 break;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400542 skb_unlink(skb, &l->wakeupq);
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400543 skb_queue_tail(l->inputq, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100544 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100545}
546
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900547/**
Per Liden4323add2006-01-18 00:38:21 +0100548 * tipc_link_reset_fragments - purge link's inbound message fragments queue
Per Lidenb97bf3f2006-01-02 19:04:38 +0100549 * @l_ptr: pointer to link
550 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500551void tipc_link_reset_fragments(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100552{
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400553 kfree_skb(l_ptr->reasm_buf);
554 l_ptr->reasm_buf = NULL;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100555}
556
Jon Paul Maloy7d967b62015-06-28 09:44:44 -0400557void tipc_link_purge_backlog(struct tipc_link *l)
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400558{
559 __skb_queue_purge(&l->backlogq);
560 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
561 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
562 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
563 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
564 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
565}
566
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900567/**
Jon Paul Maloy581465f2014-01-07 17:02:44 -0500568 * tipc_link_purge_queues - purge all pkt queues associated with link
Per Lidenb97bf3f2006-01-02 19:04:38 +0100569 * @l_ptr: pointer to link
570 */
Jon Paul Maloy581465f2014-01-07 17:02:44 -0500571void tipc_link_purge_queues(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100572{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400573 __skb_queue_purge(&l_ptr->deferdq);
574 __skb_queue_purge(&l_ptr->transmq);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400575 tipc_link_purge_backlog(l_ptr);
Per Liden4323add2006-01-18 00:38:21 +0100576 tipc_link_reset_fragments(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100577}
578
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400579void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100580{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400581 tipc_link_fsm_evt(l, LINK_RESET_EVT);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100582
Allan Stephensa686e682008-06-04 17:29:39 -0700583 /* Link is down, accept any session */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400584 l->peer_session = WILDCARD_SESSION;
585
586 /* If peer is up, it only accepts an incremented session number */
587 msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100588
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400589 /* Prepare for renewed mtu size negotiation */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400590 l->mtu = l->advertised_mtu;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900591
Jon Paul Maloy23d83352015-07-30 18:24:24 -0400592 /* Clean up all queues: */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400593 __skb_queue_purge(&l->transmq);
594 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy23d83352015-07-30 18:24:24 -0400595 skb_queue_splice_init(&l->wakeupq, l->inputq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400596
597 tipc_link_purge_backlog(l);
598 kfree_skb(l->reasm_buf);
599 kfree_skb(l->failover_reasm_skb);
600 l->reasm_buf = NULL;
601 l->failover_reasm_skb = NULL;
602 l->rcv_unacked = 0;
603 l->snd_nxt = 1;
604 l->rcv_nxt = 1;
605 l->silent_intv_cnt = 0;
606 l->stats.recv_info = 0;
607 l->stale_count = 0;
608 link_reset_statistics(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100609}
610
Per Lidenb97bf3f2006-01-02 19:04:38 +0100611/**
Jon Paul Maloy9fbfb8b2014-07-16 20:41:03 -0400612 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500613 * @link: link to use
Ying Xuea6ca1092014-11-26 11:41:55 +0800614 * @list: chain of buffers containing message
615 *
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400616 * Consumes the buffer chain, except when returning an error code,
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400617 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
618 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500619 */
Ying Xue7f9f95d2015-01-09 15:27:06 +0800620int __tipc_link_xmit(struct net *net, struct tipc_link *link,
621 struct sk_buff_head *list)
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500622{
Ying Xuea6ca1092014-11-26 11:41:55 +0800623 struct tipc_msg *msg = buf_msg(skb_peek(list));
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400624 unsigned int maxwin = link->window;
Jon Paul Maloyf21e8972015-05-14 10:46:17 -0400625 unsigned int i, imp = msg_importance(msg);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400626 uint mtu = link->mtu;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400627 u16 ack = mod(link->rcv_nxt - 1);
628 u16 seqno = link->snd_nxt;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -0400629 u16 bc_last_in = link->owner->bclink.last_in;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400630 struct tipc_media_addr *addr = link->media_addr;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400631 struct sk_buff_head *transmq = &link->transmq;
632 struct sk_buff_head *backlogq = &link->backlogq;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400633 struct sk_buff *skb, *bskb;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500634
Jon Paul Maloyf21e8972015-05-14 10:46:17 -0400635 /* Match msg importance against this and all higher backlog limits: */
636 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
637 if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
638 return link_schedule_user(link, list);
639 }
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400640 if (unlikely(msg_size(msg) > mtu))
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500641 return -EMSGSIZE;
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400642
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400643 /* Prepare each packet for sending, and add to relevant queue: */
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400644 while (skb_queue_len(list)) {
645 skb = skb_peek(list);
Ying Xue58dc55f2014-11-26 11:41:52 +0800646 msg = buf_msg(skb);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400647 msg_set_seqno(msg, seqno);
648 msg_set_ack(msg, ack);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500649 msg_set_bcast_ack(msg, bc_last_in);
650
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400651 if (likely(skb_queue_len(transmq) < maxwin)) {
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400652 __skb_dequeue(list);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400653 __skb_queue_tail(transmq, skb);
654 tipc_bearer_send(net, link->bearer_id, skb, addr);
655 link->rcv_unacked = 0;
656 seqno++;
657 continue;
658 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400659 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
660 kfree_skb(__skb_dequeue(list));
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500661 link->stats.sent_bundled++;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500662 continue;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400663 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400664 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
665 kfree_skb(__skb_dequeue(list));
666 __skb_queue_tail(backlogq, bskb);
667 link->backlog[msg_importance(buf_msg(bskb))].len++;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500668 link->stats.sent_bundled++;
669 link->stats.sent_bundles++;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400670 continue;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500671 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400672 link->backlog[imp].len += skb_queue_len(list);
673 skb_queue_splice_tail_init(list, backlogq);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500674 }
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400675 link->snd_nxt = seqno;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500676 return 0;
677}
678
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400679/**
680 * tipc_link_xmit(): enqueue buffer list according to queue situation
681 * @link: link to use
682 * @list: chain of buffers containing message
683 * @xmitq: returned list of packets to be sent by caller
684 *
685 * Consumes the buffer chain, except when returning -ELINKCONG,
686 * since the caller then may want to make more send attempts.
687 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
688 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
689 */
690int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
691 struct sk_buff_head *xmitq)
692{
693 struct tipc_msg *hdr = buf_msg(skb_peek(list));
694 unsigned int maxwin = l->window;
695 unsigned int i, imp = msg_importance(hdr);
696 unsigned int mtu = l->mtu;
697 u16 ack = l->rcv_nxt - 1;
698 u16 seqno = l->snd_nxt;
699 u16 bc_last_in = l->owner->bclink.last_in;
700 struct sk_buff_head *transmq = &l->transmq;
701 struct sk_buff_head *backlogq = &l->backlogq;
702 struct sk_buff *skb, *_skb, *bskb;
703
704 /* Match msg importance against this and all higher backlog limits: */
705 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
706 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
707 return link_schedule_user(l, list);
708 }
709 if (unlikely(msg_size(hdr) > mtu))
710 return -EMSGSIZE;
711
712 /* Prepare each packet for sending, and add to relevant queue: */
713 while (skb_queue_len(list)) {
714 skb = skb_peek(list);
715 hdr = buf_msg(skb);
716 msg_set_seqno(hdr, seqno);
717 msg_set_ack(hdr, ack);
718 msg_set_bcast_ack(hdr, bc_last_in);
719
720 if (likely(skb_queue_len(transmq) < maxwin)) {
721 _skb = skb_clone(skb, GFP_ATOMIC);
722 if (!_skb)
723 return -ENOBUFS;
724 __skb_dequeue(list);
725 __skb_queue_tail(transmq, skb);
726 __skb_queue_tail(xmitq, _skb);
727 l->rcv_unacked = 0;
728 seqno++;
729 continue;
730 }
731 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
732 kfree_skb(__skb_dequeue(list));
733 l->stats.sent_bundled++;
734 continue;
735 }
736 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
737 kfree_skb(__skb_dequeue(list));
738 __skb_queue_tail(backlogq, bskb);
739 l->backlog[msg_importance(buf_msg(bskb))].len++;
740 l->stats.sent_bundled++;
741 l->stats.sent_bundles++;
742 continue;
743 }
744 l->backlog[imp].len += skb_queue_len(list);
745 skb_queue_splice_tail_init(list, backlogq);
746 }
747 l->snd_nxt = seqno;
748 return 0;
749}
750
Jon Maloyc64f7a62012-11-16 13:51:31 +0800751/*
Ying Xue247f0f32014-02-18 16:06:46 +0800752 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
Jon Maloyc64f7a62012-11-16 13:51:31 +0800753 * Receive the sequence number where we should start receiving and
754 * acking broadcast packets from a newly added peer node, and open
755 * up for reception of such packets.
756 *
757 * Called with node locked
758 */
Ying Xue247f0f32014-02-18 16:06:46 +0800759static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
Jon Maloyc64f7a62012-11-16 13:51:31 +0800760{
761 struct tipc_msg *msg = buf_msg(buf);
762
763 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
764 n->bclink.recv_permitted = true;
765 kfree_skb(buf);
766}
767
768/*
Ying Xue47b4c9a2014-11-26 11:41:48 +0800769 * tipc_link_push_packets - push unsent packets to bearer
770 *
771 * Push out the unsent messages of a link where congestion
772 * has abated. Node is locked.
773 *
774 * Called with node locked
Per Lidenb97bf3f2006-01-02 19:04:38 +0100775 */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400776void tipc_link_push_packets(struct tipc_link *link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100777{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400778 struct sk_buff *skb;
Ying Xue47b4c9a2014-11-26 11:41:48 +0800779 struct tipc_msg *msg;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400780 u16 seqno = link->snd_nxt;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400781 u16 ack = mod(link->rcv_nxt - 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100782
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400783 while (skb_queue_len(&link->transmq) < link->window) {
784 skb = __skb_dequeue(&link->backlogq);
785 if (!skb)
Ying Xue47b4c9a2014-11-26 11:41:48 +0800786 break;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400787 msg = buf_msg(skb);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400788 link->backlog[msg_importance(msg)].len--;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400789 msg_set_ack(msg, ack);
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400790 msg_set_seqno(msg, seqno);
791 seqno = mod(seqno + 1);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400792 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
793 link->rcv_unacked = 0;
794 __skb_queue_tail(&link->transmq, skb);
795 tipc_bearer_send(link->owner->net, link->bearer_id,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400796 skb, link->media_addr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100797 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400798 link->snd_nxt = seqno;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100799}
800
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400801void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
802{
803 struct sk_buff *skb, *_skb;
804 struct tipc_msg *hdr;
805 u16 seqno = l->snd_nxt;
806 u16 ack = l->rcv_nxt - 1;
807
808 while (skb_queue_len(&l->transmq) < l->window) {
809 skb = skb_peek(&l->backlogq);
810 if (!skb)
811 break;
812 _skb = skb_clone(skb, GFP_ATOMIC);
813 if (!_skb)
814 break;
815 __skb_dequeue(&l->backlogq);
816 hdr = buf_msg(skb);
817 l->backlog[msg_importance(hdr)].len--;
818 __skb_queue_tail(&l->transmq, skb);
819 __skb_queue_tail(xmitq, _skb);
820 msg_set_ack(hdr, ack);
821 msg_set_seqno(hdr, seqno);
822 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
823 l->rcv_unacked = 0;
824 seqno++;
825 }
826 l->snd_nxt = seqno;
827}
828
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500829static void link_retransmit_failure(struct tipc_link *l_ptr,
Paul Gortmakerae8509c2013-06-17 10:54:47 -0400830 struct sk_buff *buf)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700831{
832 struct tipc_msg *msg = buf_msg(buf);
Ying Xue1da46562015-01-09 15:27:07 +0800833 struct net *net = l_ptr->owner->net;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700834
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400835 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700836
837 if (l_ptr->addr) {
Allan Stephensd356eeb2006-06-25 23:40:01 -0700838 /* Handle failure on standard link */
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400839 link_print(l_ptr, "Resetting link ");
840 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
841 msg_user(msg), msg_type(msg), msg_size(msg),
842 msg_errcode(msg));
843 pr_info("sqno %u, prev: %x, src: %x\n",
844 msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
Allan Stephensd356eeb2006-06-25 23:40:01 -0700845 } else {
Allan Stephensd356eeb2006-06-25 23:40:01 -0700846 /* Handle failure on broadcast link */
David S. Miller6c000552008-09-02 23:38:32 -0700847 struct tipc_node *n_ptr;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700848 char addr_string[16];
849
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400850 pr_info("Msg seq number: %u, ", msg_seqno(msg));
851 pr_cont("Outstanding acks: %lu\n",
852 (unsigned long) TIPC_SKB_CB(buf)->handle);
Jeff Garzik617dbea2006-10-03 16:25:34 -0700853
Ying Xue1da46562015-01-09 15:27:07 +0800854 n_ptr = tipc_bclink_retransmit_to(net);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700855
Allan Stephensc68ca7b2010-05-11 14:30:12 +0000856 tipc_addr_string_fill(addr_string, n_ptr->addr);
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400857 pr_info("Broadcast link info for %s\n", addr_string);
Ying Xue389dd9b2012-11-16 13:51:30 +0800858 pr_info("Reception permitted: %d, Acked: %u\n",
859 n_ptr->bclink.recv_permitted,
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400860 n_ptr->bclink.acked);
861 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
862 n_ptr->bclink.last_in,
863 n_ptr->bclink.oos_state,
864 n_ptr->bclink.last_sent);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700865
Ying Xueb952b2b2015-03-26 18:10:23 +0800866 n_ptr->action_flags |= TIPC_BCAST_RESET;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700867 l_ptr->stale_count = 0;
868 }
869}
870
Ying Xue58dc55f2014-11-26 11:41:52 +0800871void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
Per Liden4323add2006-01-18 00:38:21 +0100872 u32 retransmits)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100873{
874 struct tipc_msg *msg;
875
Ying Xue58dc55f2014-11-26 11:41:52 +0800876 if (!skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700877 return;
878
Ying Xue58dc55f2014-11-26 11:41:52 +0800879 msg = buf_msg(skb);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900880
Erik Hugne512137e2013-12-06 10:08:00 -0500881 /* Detect repeated retransmit failures */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400882 if (l_ptr->last_retransm == msg_seqno(msg)) {
Erik Hugne512137e2013-12-06 10:08:00 -0500883 if (++l_ptr->stale_count > 100) {
Ying Xue58dc55f2014-11-26 11:41:52 +0800884 link_retransmit_failure(l_ptr, skb);
Erik Hugne512137e2013-12-06 10:08:00 -0500885 return;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700886 }
887 } else {
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400888 l_ptr->last_retransm = msg_seqno(msg);
Erik Hugne512137e2013-12-06 10:08:00 -0500889 l_ptr->stale_count = 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100890 }
Allan Stephensd356eeb2006-06-25 23:40:01 -0700891
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400892 skb_queue_walk_from(&l_ptr->transmq, skb) {
893 if (!retransmits)
Ying Xue58dc55f2014-11-26 11:41:52 +0800894 break;
895 msg = buf_msg(skb);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400896 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900897 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
Ying Xue7f9f95d2015-01-09 15:27:06 +0800898 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400899 l_ptr->media_addr);
Ying Xue3c294cb2012-11-15 11:34:45 +0800900 retransmits--;
901 l_ptr->stats.retransmitted++;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100902 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100903}
904
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400905static int tipc_link_retransm(struct tipc_link *l, int retransm,
906 struct sk_buff_head *xmitq)
907{
908 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
909 struct tipc_msg *hdr;
910
911 if (!skb)
912 return 0;
913
914 /* Detect repeated retransmit failures on same packet */
915 if (likely(l->last_retransm != buf_seqno(skb))) {
916 l->last_retransm = buf_seqno(skb);
917 l->stale_count = 1;
918 } else if (++l->stale_count > 100) {
919 link_retransmit_failure(l, skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400920 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400921 }
922 skb_queue_walk(&l->transmq, skb) {
923 if (!retransm)
924 return 0;
925 hdr = buf_msg(skb);
926 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
927 if (!_skb)
928 return 0;
929 hdr = buf_msg(_skb);
930 msg_set_ack(hdr, l->rcv_nxt - 1);
931 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
932 _skb->priority = TC_PRIO_CONTROL;
933 __skb_queue_tail(xmitq, _skb);
934 retransm--;
935 l->stats.retransmitted++;
936 }
937 return 0;
938}
939
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500940/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +0200941 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500942 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +0200943 * Node lock must be held
944 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400945static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
946 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +0200947{
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500948 struct tipc_node *node = link->owner;
Erik Hugne7ae934b2014-07-01 10:22:40 +0200949
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400950 switch (msg_user(buf_msg(skb))) {
Erik Hugne7ae934b2014-07-01 10:22:40 +0200951 case TIPC_LOW_IMPORTANCE:
952 case TIPC_MEDIUM_IMPORTANCE:
953 case TIPC_HIGH_IMPORTANCE:
954 case TIPC_CRITICAL_IMPORTANCE:
955 case CONN_MANAGER:
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400956 __skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500957 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +0200958 case NAME_DISTRIBUTOR:
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500959 node->bclink.recv_permitted = true;
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400960 skb_queue_tail(link->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500961 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +0200962 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400963 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500964 case MSG_FRAGMENTER:
965 case BCAST_PROTOCOL:
966 return false;
967 default:
968 pr_warn("Dropping received illegal msg type\n");
969 kfree_skb(skb);
970 return false;
971 };
972}
973
974/* tipc_link_input - process packet that has passed link protocol check
975 *
976 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500977 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400978static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
979 struct sk_buff_head *inputq)
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500980{
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400981 struct tipc_node *node = l->owner;
982 struct tipc_msg *hdr = buf_msg(skb);
983 struct sk_buff **reasm_skb = &l->reasm_buf;
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500984 struct sk_buff *iskb;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400985 int usr = msg_user(hdr);
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400986 int rc = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400987 int pos = 0;
988 int ipos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500989
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400990 if (unlikely(usr == TUNNEL_PROTOCOL)) {
991 if (msg_type(hdr) == SYNCH_MSG) {
992 __skb_queue_purge(&l->deferdq);
993 goto drop;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400994 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400995 if (!tipc_msg_extract(skb, &iskb, &ipos))
996 return rc;
997 kfree_skb(skb);
998 skb = iskb;
999 hdr = buf_msg(skb);
1000 if (less(msg_seqno(hdr), l->drop_point))
1001 goto drop;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001002 if (tipc_data_input(l, skb, inputq))
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001003 return rc;
1004 usr = msg_user(hdr);
1005 reasm_skb = &l->failover_reasm_skb;
1006 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001007
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001008 if (usr == MSG_BUNDLER) {
1009 l->stats.recv_bundles++;
1010 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001011 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001012 tipc_data_input(l, iskb, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001013 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001014 } else if (usr == MSG_FRAGMENTER) {
1015 l->stats.recv_fragments++;
1016 if (tipc_buf_append(reasm_skb, &skb)) {
1017 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001018 tipc_data_input(l, skb, inputq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001019 } else if (!*reasm_skb) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001020 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001021 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001022 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001023 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001024 tipc_link_sync_rcv(node, skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001025 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001026 }
1027drop:
1028 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001029 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001030}
1031
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001032static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1033{
1034 bool released = false;
1035 struct sk_buff *skb, *tmp;
1036
1037 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1038 if (more(buf_seqno(skb), acked))
1039 break;
1040 __skb_unlink(skb, &l->transmq);
1041 kfree_skb(skb);
1042 released = true;
1043 }
1044 return released;
1045}
1046
1047/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1048 * @link: the link that should handle the message
1049 * @skb: TIPC packet
1050 * @xmitq: queue to place packets to be sent after this call
1051 */
1052int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1053 struct sk_buff_head *xmitq)
1054{
1055 struct sk_buff_head *arrvq = &l->deferdq;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001056 struct sk_buff_head tmpq;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001057 struct tipc_msg *hdr;
1058 u16 seqno, rcv_nxt;
1059 int rc = 0;
1060
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001061 __skb_queue_head_init(&tmpq);
1062
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001063 if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
1064 if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
1065 tipc_link_build_proto_msg(l, STATE_MSG, 0,
1066 0, 0, 0, xmitq);
1067 return rc;
1068 }
1069
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001070 while ((skb = skb_peek(arrvq))) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001071 hdr = buf_msg(skb);
1072
1073 /* Verify and update link state */
1074 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
1075 __skb_dequeue(arrvq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001076 rc = tipc_link_proto_rcv(l, skb, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001077 continue;
1078 }
1079
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001080 if (unlikely(!link_is_up(l))) {
1081 rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
1082 if (!link_is_up(l)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001083 kfree_skb(__skb_dequeue(arrvq));
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001084 goto exit;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001085 }
1086 }
1087
1088 l->silent_intv_cnt = 0;
1089
1090 /* Forward queues and wake up waiting users */
1091 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1092 tipc_link_advance_backlog(l, xmitq);
1093 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1094 link_prepare_wakeup(l);
1095 }
1096
1097 /* Defer reception if there is a gap in the sequence */
1098 seqno = msg_seqno(hdr);
1099 rcv_nxt = l->rcv_nxt;
1100 if (unlikely(less(rcv_nxt, seqno))) {
1101 l->stats.deferred_recv++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001102 goto exit;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001103 }
1104
1105 __skb_dequeue(arrvq);
1106
1107 /* Drop if packet already received */
1108 if (unlikely(more(rcv_nxt, seqno))) {
1109 l->stats.duplicates++;
1110 kfree_skb(skb);
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001111 goto exit;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001112 }
1113
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001114 /* Packet can be delivered */
1115 l->rcv_nxt++;
1116 l->stats.recv_info++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001117 if (unlikely(!tipc_data_input(l, skb, &tmpq)))
1118 rc = tipc_link_input(l, skb, &tmpq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001119
1120 /* Ack at regular intervals */
1121 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1122 l->rcv_unacked = 0;
1123 l->stats.sent_acks++;
1124 tipc_link_build_proto_msg(l, STATE_MSG,
1125 0, 0, 0, 0, xmitq);
1126 }
1127 }
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001128exit:
1129 tipc_skb_queue_splice_tail(&tmpq, l->inputq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001130 return rc;
1131}
1132
Erik Hugne7ae934b2014-07-01 10:22:40 +02001133/**
Allan Stephens8809b252011-10-25 10:44:35 -04001134 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1135 *
1136 * Returns increase in queue length (i.e. 0 or 1)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001137 */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001138u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001139{
Ying Xuebc6fecd2014-11-26 11:41:53 +08001140 struct sk_buff *skb1;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001141 u16 seq_no = buf_seqno(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001142
1143 /* Empty queue ? */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001144 if (skb_queue_empty(list)) {
1145 __skb_queue_tail(list, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001146 return 1;
1147 }
1148
1149 /* Last ? */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001150 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1151 __skb_queue_tail(list, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001152 return 1;
1153 }
1154
Allan Stephens8809b252011-10-25 10:44:35 -04001155 /* Locate insertion point in queue, then insert; discard if duplicate */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001156 skb_queue_walk(list, skb1) {
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001157 u16 curr_seqno = buf_seqno(skb1);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001158
Allan Stephens8809b252011-10-25 10:44:35 -04001159 if (seq_no == curr_seqno) {
Ying Xuebc6fecd2014-11-26 11:41:53 +08001160 kfree_skb(skb);
Allan Stephens8809b252011-10-25 10:44:35 -04001161 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001162 }
Allan Stephens8809b252011-10-25 10:44:35 -04001163
1164 if (less(seq_no, curr_seqno))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001165 break;
Allan Stephens8809b252011-10-25 10:44:35 -04001166 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001167
Ying Xuebc6fecd2014-11-26 11:41:53 +08001168 __skb_queue_before(list, skb1, skb);
Allan Stephens8809b252011-10-25 10:44:35 -04001169 return 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001170}
1171
Allan Stephens8809b252011-10-25 10:44:35 -04001172/*
Per Lidenb97bf3f2006-01-02 19:04:38 +01001173 * Send protocol message to the other endpoint.
1174 */
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001175void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001176 u32 gap, u32 tolerance, u32 priority)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001177{
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001178 struct sk_buff *skb = NULL;
1179 struct sk_buff_head xmitq;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001180
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001181 __skb_queue_head_init(&xmitq);
1182 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
1183 tolerance, priority, &xmitq);
1184 skb = __skb_dequeue(&xmitq);
1185 if (!skb)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001186 return;
Jon Paul Maloy440d8962015-07-30 18:24:26 -04001187 tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001188 l->rcv_unacked = 0;
1189 kfree_skb(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001190}
1191
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001192/* tipc_link_build_proto_msg: prepare link protocol message for transmission
1193 */
1194static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1195 u16 rcvgap, int tolerance, int priority,
1196 struct sk_buff_head *xmitq)
1197{
1198 struct sk_buff *skb = NULL;
1199 struct tipc_msg *hdr = l->pmsg;
1200 u16 snd_nxt = l->snd_nxt;
1201 u16 rcv_nxt = l->rcv_nxt;
1202 u16 rcv_last = rcv_nxt - 1;
1203 int node_up = l->owner->bclink.recv_permitted;
1204
1205 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001206 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001207 return;
1208
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001209 msg_set_type(hdr, mtyp);
1210 msg_set_net_plane(hdr, l->net_plane);
1211 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
1212 msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
1213 msg_set_link_tolerance(hdr, tolerance);
1214 msg_set_linkprio(hdr, priority);
1215 msg_set_redundant_link(hdr, node_up);
1216 msg_set_seq_gap(hdr, 0);
1217
1218 /* Compatibility: created msg must not be in sequence with pkt flow */
1219 msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
1220
1221 if (mtyp == STATE_MSG) {
1222 if (!tipc_link_is_up(l))
1223 return;
1224 msg_set_next_sent(hdr, snd_nxt);
1225
1226 /* Override rcvgap if there are packets in deferred queue */
1227 if (!skb_queue_empty(&l->deferdq))
1228 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
1229 if (rcvgap) {
1230 msg_set_seq_gap(hdr, rcvgap);
1231 l->stats.sent_nacks++;
1232 }
1233 msg_set_ack(hdr, rcv_last);
1234 msg_set_probe(hdr, probe);
1235 if (probe)
1236 l->stats.sent_probes++;
1237 l->stats.sent_states++;
1238 } else {
1239 /* RESET_MSG or ACTIVATE_MSG */
1240 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001241 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001242 msg_set_next_sent(hdr, 1);
1243 }
1244 skb = tipc_buf_acquire(msg_size(hdr));
1245 if (!skb)
1246 return;
1247 skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
1248 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001249 __skb_queue_tail(xmitq, skb);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001250}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001251
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001252/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1253 * with contents of the link's tranmsit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001254 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001255void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1256 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001257{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001258 struct sk_buff *skb, *tnlskb;
1259 struct tipc_msg *hdr, tnlhdr;
1260 struct sk_buff_head *queue = &l->transmq;
1261 struct sk_buff_head tmpxq, tnlq;
1262 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001263
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001264 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001265 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001266
1267 skb_queue_head_init(&tnlq);
1268 skb_queue_head_init(&tmpxq);
1269
1270 /* At least one packet required for safe algorithm => add dummy */
1271 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1272 BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
1273 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001274 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001275 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001276 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001277 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001278 skb_queue_tail(&tnlq, skb);
1279 tipc_link_xmit(l, &tnlq, &tmpxq);
1280 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001281
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001282 /* Initialize reusable tunnel packet header */
1283 tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
1284 mtyp, INT_H_SIZE, l->addr);
1285 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1286 msg_set_msgcnt(&tnlhdr, pktcnt);
1287 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1288tnl:
1289 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001290 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001291 hdr = buf_msg(skb);
1292 if (queue == &l->backlogq)
1293 msg_set_seqno(hdr, seqno++);
1294 pktlen = msg_size(hdr);
1295 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1296 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1297 if (!tnlskb) {
1298 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001299 return;
1300 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001301 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1302 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1303 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001304 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001305 if (queue != &l->backlogq) {
1306 queue = &l->backlogq;
1307 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001308 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001309
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001310 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001311
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001312 if (mtyp == FAILOVER_MSG) {
1313 tnl->drop_point = l->rcv_nxt;
1314 tnl->failover_reasm_skb = l->reasm_buf;
1315 l->reasm_buf = NULL;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001316 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001317}
1318
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001319/* tipc_link_proto_rcv(): receive link level protocol message :
1320 * Note that network plane id propagates through the network, and may
1321 * change at any time. The node with lowest numerical id determines
1322 * network plane
1323 */
1324static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1325 struct sk_buff_head *xmitq)
1326{
1327 struct tipc_msg *hdr = buf_msg(skb);
1328 u16 rcvgap = 0;
1329 u16 nacked_gap = msg_seq_gap(hdr);
1330 u16 peers_snd_nxt = msg_next_sent(hdr);
1331 u16 peers_tol = msg_link_tolerance(hdr);
1332 u16 peers_prio = msg_linkprio(hdr);
1333 char *if_name;
1334 int rc = 0;
1335
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001336 if (tipc_link_is_blocked(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001337 goto exit;
1338
1339 if (link_own_addr(l) > msg_prevnode(hdr))
1340 l->net_plane = msg_net_plane(hdr);
1341
1342 switch (msg_type(hdr)) {
1343 case RESET_MSG:
1344
1345 /* Ignore duplicate RESET with old session number */
1346 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1347 (l->peer_session != WILDCARD_SESSION))
1348 break;
1349 /* fall thru' */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001350
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001351 case ACTIVATE_MSG:
1352
1353 /* Complete own link name with peer's interface name */
1354 if_name = strrchr(l->name, ':') + 1;
1355 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1356 break;
1357 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1358 break;
1359 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
1360
1361 /* Update own tolerance if peer indicates a non-zero value */
1362 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1363 l->tolerance = peers_tol;
1364
1365 /* Update own priority if peer's priority is higher */
1366 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1367 l->priority = peers_prio;
1368
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001369 if (msg_type(hdr) == RESET_MSG) {
1370 rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1371 } else if (!link_is_up(l)) {
1372 tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1373 rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
1374 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001375 l->peer_session = msg_session(hdr);
1376 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001377 if (l->mtu > msg_max_pkt(hdr))
1378 l->mtu = msg_max_pkt(hdr);
1379 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001380
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001381 case STATE_MSG:
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001382
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001383 /* Update own tolerance if peer indicates a non-zero value */
1384 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1385 l->tolerance = peers_tol;
1386
1387 l->silent_intv_cnt = 0;
1388 l->stats.recv_states++;
1389 if (msg_probe(hdr))
1390 l->stats.recv_probes++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001391 rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
1392 if (!link_is_up(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001393 break;
1394
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001395 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001396 if (more(peers_snd_nxt, l->rcv_nxt))
1397 rcvgap = peers_snd_nxt - l->rcv_nxt;
1398 if (rcvgap || (msg_probe(hdr)))
1399 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
Jon Paul Maloy16040892015-07-21 06:42:28 -04001400 0, 0, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001401 tipc_link_release_pkts(l, msg_ack(hdr));
1402
1403 /* If NACK, retransmit will now start at right position */
1404 if (nacked_gap) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001405 rc = tipc_link_retransm(l, nacked_gap, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001406 l->stats.recv_nacks++;
1407 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001408
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001409 tipc_link_advance_backlog(l, xmitq);
1410 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1411 link_prepare_wakeup(l);
1412 }
1413exit:
1414 kfree_skb(skb);
1415 return rc;
1416}
1417
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001418void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001419{
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001420 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001421
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001422 l->window = win;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001423 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1424 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1425 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1426 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1427 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001428}
1429
Jon Paul Maloye099e862014-02-13 17:29:18 -05001430/* tipc_link_find_owner - locate owner node of link by link's name
Ying Xuef2f98002015-01-09 15:27:05 +08001431 * @net: the applicable net namespace
Jon Paul Maloye099e862014-02-13 17:29:18 -05001432 * @name: pointer to link name string
1433 * @bearer_id: pointer to index in 'node->links' array where the link was found.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001434 *
Jon Paul Maloye099e862014-02-13 17:29:18 -05001435 * Returns pointer to node owning the link, or 0 if no matching link is found.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001436 */
Ying Xuef2f98002015-01-09 15:27:05 +08001437static struct tipc_node *tipc_link_find_owner(struct net *net,
1438 const char *link_name,
Jon Paul Maloye099e862014-02-13 17:29:18 -05001439 unsigned int *bearer_id)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001440{
Ying Xuef2f98002015-01-09 15:27:05 +08001441 struct tipc_net *tn = net_generic(net, tipc_net_id);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001442 struct tipc_link *l_ptr;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001443 struct tipc_node *n_ptr;
Fabian Frederick886eaa12014-12-25 12:05:50 +01001444 struct tipc_node *found_node = NULL;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001445 int i;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001446
Jon Paul Maloye099e862014-02-13 17:29:18 -05001447 *bearer_id = 0;
Ying Xue6c7a7622014-03-27 12:54:37 +08001448 rcu_read_lock();
Ying Xuef2f98002015-01-09 15:27:05 +08001449 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001450 tipc_node_lock(n_ptr);
Erik Hugnebbfbe472013-10-18 07:23:21 +02001451 for (i = 0; i < MAX_BEARERS; i++) {
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001452 l_ptr = n_ptr->links[i].link;
Jon Paul Maloye099e862014-02-13 17:29:18 -05001453 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1454 *bearer_id = i;
1455 found_node = n_ptr;
1456 break;
1457 }
Erik Hugnebbfbe472013-10-18 07:23:21 +02001458 }
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001459 tipc_node_unlock(n_ptr);
Jon Paul Maloye099e862014-02-13 17:29:18 -05001460 if (found_node)
1461 break;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001462 }
Ying Xue6c7a7622014-03-27 12:54:37 +08001463 rcu_read_unlock();
1464
Jon Paul Maloye099e862014-02-13 17:29:18 -05001465 return found_node;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001466}
1467
Allan Stephens5c216e12011-10-18 11:34:29 -04001468/**
Per Lidenb97bf3f2006-01-02 19:04:38 +01001469 * link_reset_statistics - reset link statistics
1470 * @l_ptr: pointer to link
1471 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001472static void link_reset_statistics(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001473{
1474 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001475 l_ptr->stats.sent_info = l_ptr->snd_nxt;
1476 l_ptr->stats.recv_info = l_ptr->rcv_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001477}
1478
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001479static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001480{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001481 struct sk_buff *hskb = skb_peek(&l->transmq);
1482 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
1483 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08001484
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001485 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001486 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1487 skb_queue_len(&l->transmq), head, tail,
1488 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001489}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001490
1491/* Parse and validate nested (link) properties valid for media, bearer and link
1492 */
1493int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1494{
1495 int err;
1496
1497 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1498 tipc_nl_prop_policy);
1499 if (err)
1500 return err;
1501
1502 if (props[TIPC_NLA_PROP_PRIO]) {
1503 u32 prio;
1504
1505 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1506 if (prio > TIPC_MAX_LINK_PRI)
1507 return -EINVAL;
1508 }
1509
1510 if (props[TIPC_NLA_PROP_TOL]) {
1511 u32 tol;
1512
1513 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1514 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1515 return -EINVAL;
1516 }
1517
1518 if (props[TIPC_NLA_PROP_WIN]) {
1519 u32 win;
1520
1521 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1522 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1523 return -EINVAL;
1524 }
1525
1526 return 0;
1527}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001528
Richard Alpef96ce7a2014-11-20 10:29:13 +01001529int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1530{
1531 int err;
1532 int res = 0;
1533 int bearer_id;
1534 char *name;
1535 struct tipc_link *link;
1536 struct tipc_node *node;
1537 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe37e2d482015-02-09 09:50:08 +01001538 struct net *net = sock_net(skb->sk);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001539
1540 if (!info->attrs[TIPC_NLA_LINK])
1541 return -EINVAL;
1542
1543 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1544 info->attrs[TIPC_NLA_LINK],
1545 tipc_nl_link_policy);
1546 if (err)
1547 return err;
1548
1549 if (!attrs[TIPC_NLA_LINK_NAME])
1550 return -EINVAL;
1551
1552 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1553
Richard Alpe670f4f82015-05-06 13:58:55 +02001554 if (strcmp(name, tipc_bclink_name) == 0)
1555 return tipc_nl_bc_link_set(net, attrs);
1556
Ying Xuef2f98002015-01-09 15:27:05 +08001557 node = tipc_link_find_owner(net, name, &bearer_id);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001558 if (!node)
1559 return -EINVAL;
1560
1561 tipc_node_lock(node);
1562
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001563 link = node->links[bearer_id].link;
Richard Alpef96ce7a2014-11-20 10:29:13 +01001564 if (!link) {
1565 res = -EINVAL;
1566 goto out;
1567 }
1568
1569 if (attrs[TIPC_NLA_LINK_PROP]) {
1570 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1571
1572 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1573 props);
1574 if (err) {
1575 res = err;
1576 goto out;
1577 }
1578
1579 if (props[TIPC_NLA_PROP_TOL]) {
1580 u32 tol;
1581
1582 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -04001583 link->tolerance = tol;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001584 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001585 }
1586 if (props[TIPC_NLA_PROP_PRIO]) {
1587 u32 prio;
1588
1589 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1590 link->priority = prio;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001591 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001592 }
1593 if (props[TIPC_NLA_PROP_WIN]) {
1594 u32 win;
1595
1596 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1597 tipc_link_set_queue_limits(link, win);
1598 }
1599 }
1600
1601out:
1602 tipc_node_unlock(node);
1603
1604 return res;
1605}
Richard Alped8182802014-11-24 11:10:29 +01001606
1607static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001608{
1609 int i;
1610 struct nlattr *stats;
1611
1612 struct nla_map {
1613 u32 key;
1614 u32 val;
1615 };
1616
1617 struct nla_map map[] = {
1618 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1619 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1620 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1621 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1622 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1623 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1624 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1625 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1626 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1627 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1628 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1629 s->msg_length_counts : 1},
1630 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1631 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1632 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1633 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1634 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1635 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1636 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1637 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1638 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1639 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1640 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1641 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1642 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1643 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1644 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1645 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1646 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1647 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1648 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1649 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1650 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1651 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1652 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1653 };
1654
1655 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1656 if (!stats)
1657 return -EMSGSIZE;
1658
1659 for (i = 0; i < ARRAY_SIZE(map); i++)
1660 if (nla_put_u32(skb, map[i].key, map[i].val))
1661 goto msg_full;
1662
1663 nla_nest_end(skb, stats);
1664
1665 return 0;
1666msg_full:
1667 nla_nest_cancel(skb, stats);
1668
1669 return -EMSGSIZE;
1670}
1671
1672/* Caller should hold appropriate locks to protect the link */
Ying Xue34747532015-01-09 15:27:10 +08001673static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001674 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001675{
1676 int err;
1677 void *hdr;
1678 struct nlattr *attrs;
1679 struct nlattr *prop;
Ying Xue34747532015-01-09 15:27:10 +08001680 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001681
Richard Alpebfb3e5d2015-02-09 09:50:03 +01001682 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001683 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001684 if (!hdr)
1685 return -EMSGSIZE;
1686
1687 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1688 if (!attrs)
1689 goto msg_full;
1690
1691 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1692 goto attr_msg_full;
1693 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
Ying Xue34747532015-01-09 15:27:10 +08001694 tipc_cluster_mask(tn->own_addr)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001695 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001696 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001697 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001698 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001699 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001700 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001701 goto attr_msg_full;
1702
1703 if (tipc_link_is_up(link))
1704 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1705 goto attr_msg_full;
1706 if (tipc_link_is_active(link))
1707 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1708 goto attr_msg_full;
1709
1710 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1711 if (!prop)
1712 goto attr_msg_full;
1713 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1714 goto prop_msg_full;
1715 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1716 goto prop_msg_full;
1717 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001718 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001719 goto prop_msg_full;
1720 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1721 goto prop_msg_full;
1722 nla_nest_end(msg->skb, prop);
1723
1724 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1725 if (err)
1726 goto attr_msg_full;
1727
1728 nla_nest_end(msg->skb, attrs);
1729 genlmsg_end(msg->skb, hdr);
1730
1731 return 0;
1732
1733prop_msg_full:
1734 nla_nest_cancel(msg->skb, prop);
1735attr_msg_full:
1736 nla_nest_cancel(msg->skb, attrs);
1737msg_full:
1738 genlmsg_cancel(msg->skb, hdr);
1739
1740 return -EMSGSIZE;
1741}
1742
1743/* Caller should hold node lock */
Ying Xue34747532015-01-09 15:27:10 +08001744static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1745 struct tipc_node *node, u32 *prev_link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001746{
1747 u32 i;
1748 int err;
1749
1750 for (i = *prev_link; i < MAX_BEARERS; i++) {
1751 *prev_link = i;
1752
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001753 if (!node->links[i].link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001754 continue;
1755
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001756 err = __tipc_nl_add_link(net, msg,
1757 node->links[i].link, NLM_F_MULTI);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001758 if (err)
1759 return err;
1760 }
1761 *prev_link = 0;
1762
1763 return 0;
1764}
1765
1766int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
1767{
Ying Xuef2f98002015-01-09 15:27:05 +08001768 struct net *net = sock_net(skb->sk);
1769 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001770 struct tipc_node *node;
1771 struct tipc_nl_msg msg;
1772 u32 prev_node = cb->args[0];
1773 u32 prev_link = cb->args[1];
1774 int done = cb->args[2];
1775 int err;
1776
1777 if (done)
1778 return 0;
1779
1780 msg.skb = skb;
1781 msg.portid = NETLINK_CB(cb->skb).portid;
1782 msg.seq = cb->nlh->nlmsg_seq;
1783
1784 rcu_read_lock();
Richard Alpe7be57fc2014-11-20 10:29:12 +01001785 if (prev_node) {
Ying Xuef2f98002015-01-09 15:27:05 +08001786 node = tipc_node_find(net, prev_node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001787 if (!node) {
1788 /* We never set seq or call nl_dump_check_consistent()
1789 * this means that setting prev_seq here will cause the
1790 * consistence check to fail in the netlink callback
1791 * handler. Resulting in the last NLMSG_DONE message
1792 * having the NLM_F_DUMP_INTR flag set.
1793 */
1794 cb->prev_seq = 1;
1795 goto out;
1796 }
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001797 tipc_node_put(node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001798
Ying Xuef2f98002015-01-09 15:27:05 +08001799 list_for_each_entry_continue_rcu(node, &tn->node_list,
1800 list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01001801 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08001802 err = __tipc_nl_add_node_links(net, &msg, node,
1803 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001804 tipc_node_unlock(node);
1805 if (err)
1806 goto out;
1807
1808 prev_node = node->addr;
1809 }
1810 } else {
Ying Xue1da46562015-01-09 15:27:07 +08001811 err = tipc_nl_add_bc_link(net, &msg);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001812 if (err)
1813 goto out;
1814
Ying Xuef2f98002015-01-09 15:27:05 +08001815 list_for_each_entry_rcu(node, &tn->node_list, list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01001816 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08001817 err = __tipc_nl_add_node_links(net, &msg, node,
1818 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001819 tipc_node_unlock(node);
1820 if (err)
1821 goto out;
1822
1823 prev_node = node->addr;
1824 }
1825 }
1826 done = 1;
1827out:
1828 rcu_read_unlock();
1829
1830 cb->args[0] = prev_node;
1831 cb->args[1] = prev_link;
1832 cb->args[2] = done;
1833
1834 return skb->len;
1835}
1836
1837int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
1838{
Ying Xuef2f98002015-01-09 15:27:05 +08001839 struct net *net = genl_info_net(info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001840 struct tipc_nl_msg msg;
Richard Alpe7be57fc2014-11-20 10:29:12 +01001841 char *name;
Richard Alpe7be57fc2014-11-20 10:29:12 +01001842 int err;
1843
Richard Alpe7be57fc2014-11-20 10:29:12 +01001844 msg.portid = info->snd_portid;
1845 msg.seq = info->snd_seq;
1846
Richard Alpe670f4f82015-05-06 13:58:55 +02001847 if (!info->attrs[TIPC_NLA_LINK_NAME])
1848 return -EINVAL;
1849 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
1850
1851 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1852 if (!msg.skb)
1853 return -ENOMEM;
1854
1855 if (strcmp(name, tipc_bclink_name) == 0) {
1856 err = tipc_nl_add_bc_link(net, &msg);
1857 if (err) {
1858 nlmsg_free(msg.skb);
1859 return err;
1860 }
1861 } else {
1862 int bearer_id;
1863 struct tipc_node *node;
1864 struct tipc_link *link;
1865
1866 node = tipc_link_find_owner(net, name, &bearer_id);
1867 if (!node)
1868 return -EINVAL;
1869
1870 tipc_node_lock(node);
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001871 link = node->links[bearer_id].link;
Richard Alpe670f4f82015-05-06 13:58:55 +02001872 if (!link) {
1873 tipc_node_unlock(node);
1874 nlmsg_free(msg.skb);
1875 return -EINVAL;
1876 }
1877
1878 err = __tipc_nl_add_link(net, &msg, link, 0);
1879 tipc_node_unlock(node);
1880 if (err) {
1881 nlmsg_free(msg.skb);
1882 return err;
1883 }
Richard Alpe7be57fc2014-11-20 10:29:12 +01001884 }
1885
Richard Alpe670f4f82015-05-06 13:58:55 +02001886 return genlmsg_reply(msg.skb, info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001887}
Richard Alpeae363422014-11-20 10:29:14 +01001888
1889int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
1890{
1891 int err;
1892 char *link_name;
1893 unsigned int bearer_id;
1894 struct tipc_link *link;
1895 struct tipc_node *node;
1896 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe18178772015-02-09 09:50:09 +01001897 struct net *net = sock_net(skb->sk);
Richard Alpeae363422014-11-20 10:29:14 +01001898
1899 if (!info->attrs[TIPC_NLA_LINK])
1900 return -EINVAL;
1901
1902 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1903 info->attrs[TIPC_NLA_LINK],
1904 tipc_nl_link_policy);
1905 if (err)
1906 return err;
1907
1908 if (!attrs[TIPC_NLA_LINK_NAME])
1909 return -EINVAL;
1910
1911 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1912
1913 if (strcmp(link_name, tipc_bclink_name) == 0) {
Ying Xue1da46562015-01-09 15:27:07 +08001914 err = tipc_bclink_reset_stats(net);
Richard Alpeae363422014-11-20 10:29:14 +01001915 if (err)
1916 return err;
1917 return 0;
1918 }
1919
Ying Xuef2f98002015-01-09 15:27:05 +08001920 node = tipc_link_find_owner(net, link_name, &bearer_id);
Richard Alpeae363422014-11-20 10:29:14 +01001921 if (!node)
1922 return -EINVAL;
1923
1924 tipc_node_lock(node);
1925
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001926 link = node->links[bearer_id].link;
Richard Alpeae363422014-11-20 10:29:14 +01001927 if (!link) {
1928 tipc_node_unlock(node);
1929 return -EINVAL;
1930 }
1931
1932 link_reset_statistics(link);
1933
1934 tipc_node_unlock(node);
1935
1936 return 0;
1937}