blob: 91aea071ab27fee550e3c88ecc4097adac399d83 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -04004 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010045
Ying Xue796c75d2013-06-17 10:54:48 -040046#include <linux/pkt_sched.h>
47
Erik Hugne2cf8aa12012-06-29 00:16:37 -040048/*
49 * Error message prefixes
50 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -040051static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -040052static const char *link_rst_msg = "Resetting link ";
Jon Paul Maloy32301902015-10-22 08:51:37 -040053static const char tipc_bclink_name[] = "broadcast-link";
Per Lidenb97bf3f2006-01-02 19:04:38 +010054
Richard Alpe7be57fc2014-11-20 10:29:12 +010055static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
58 .type = NLA_STRING,
59 .len = TIPC_MAX_LINK_NAME
60 },
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
69};
70
Richard Alpe0655f6a2014-11-20 10:29:07 +010071/* Properties valid for media, bearar and link */
72static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
77};
78
Jon Paul Maloy52666982015-10-22 08:51:41 -040079/* Send states for broadcast NACKs
80 */
81enum {
82 BC_NACK_SND_CONDITIONAL,
83 BC_NACK_SND_UNCONDITIONAL,
84 BC_NACK_SND_SUPPRESS,
85};
86
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +090087/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -040088 * Interval between NACKs when packets arrive out of order
89 */
90#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
91/*
Allan Stephensa686e682008-06-04 17:29:39 -070092 * Out-of-range value for link session numbers
93 */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040094#define WILDCARD_SESSION 0x10000
Allan Stephensa686e682008-06-04 17:29:39 -070095
Jon Paul Maloy662921c2015-07-30 18:24:21 -040096/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040097 */
98enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -040099 LINK_ESTABLISHED = 0xe,
100 LINK_ESTABLISHING = 0xe << 4,
101 LINK_RESET = 0x1 << 8,
102 LINK_RESETTING = 0x2 << 12,
103 LINK_PEER_RESET = 0xd << 16,
104 LINK_FAILINGOVER = 0xf << 20,
105 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400106};
107
108/* Link FSM state checking routines
109 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400110static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400111{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400112 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400113}
114
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400115static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
116 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400117static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
118 u16 rcvgap, int tolerance, int priority,
119 struct sk_buff_head *xmitq);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500120static void link_reset_statistics(struct tipc_link *l_ptr);
121static void link_print(struct tipc_link *l_ptr, const char *str);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400122static void tipc_link_build_nack_msg(struct tipc_link *l,
123 struct sk_buff_head *xmitq);
124static void tipc_link_build_bc_init_msg(struct tipc_link *l,
125 struct sk_buff_head *xmitq);
126static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400127
Per Lidenb97bf3f2006-01-02 19:04:38 +0100128/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800129 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100130 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400131bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100132{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400133 return link_is_up(l);
134}
135
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400136bool tipc_link_peer_is_down(struct tipc_link *l)
137{
138 return l->state == LINK_PEER_RESET;
139}
140
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400141bool tipc_link_is_reset(struct tipc_link *l)
142{
143 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
144}
145
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400146bool tipc_link_is_establishing(struct tipc_link *l)
147{
148 return l->state == LINK_ESTABLISHING;
149}
150
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400151bool tipc_link_is_synching(struct tipc_link *l)
152{
153 return l->state == LINK_SYNCHING;
154}
155
156bool tipc_link_is_failingover(struct tipc_link *l)
157{
158 return l->state == LINK_FAILINGOVER;
159}
160
161bool tipc_link_is_blocked(struct tipc_link *l)
162{
163 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100164}
165
Wu Fengguang742e0382015-10-24 22:56:01 +0800166static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400167{
168 return !l->bc_sndlink;
169}
170
Wu Fengguang742e0382015-10-24 22:56:01 +0800171static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400172{
173 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
174}
175
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400176int tipc_link_is_active(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100177{
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400178 return l->active;
179}
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400180
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400181void tipc_link_set_active(struct tipc_link *l, bool active)
182{
183 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100184}
185
Jon Paul Maloy52666982015-10-22 08:51:41 -0400186void tipc_link_add_bc_peer(struct tipc_link *snd_l,
187 struct tipc_link *uc_l,
188 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400189{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400190 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
191
192 snd_l->ackers++;
193 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500194 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400195 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400196}
197
Jon Paul Maloy52666982015-10-22 08:51:41 -0400198void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
199 struct tipc_link *rcv_l,
200 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400201{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400202 u16 ack = snd_l->snd_nxt - 1;
203
204 snd_l->ackers--;
205 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
206 tipc_link_reset(rcv_l);
207 rcv_l->state = LINK_RESET;
208 if (!snd_l->ackers) {
209 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500210 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400211 __skb_queue_purge(xmitq);
212 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400213}
214
215int tipc_link_bc_peers(struct tipc_link *l)
216{
217 return l->ackers;
218}
219
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400220void tipc_link_set_mtu(struct tipc_link *l, int mtu)
221{
222 l->mtu = mtu;
223}
224
225int tipc_link_mtu(struct tipc_link *l)
226{
227 return l->mtu;
228}
229
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400230static u32 link_own_addr(struct tipc_link *l)
231{
232 return msg_prevnode(l->pmsg);
233}
234
Per Lidenb97bf3f2006-01-02 19:04:38 +0100235/**
Per Liden4323add2006-01-18 00:38:21 +0100236 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400237 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400238 * @if_name: associated interface name
239 * @bearer_id: id (index) of associated bearer
240 * @tolerance: link tolerance to be used by link
241 * @net_plane: network plane (A,B,c..) this link belongs to
242 * @mtu: mtu to be advertised by link
243 * @priority: priority to be used by link
244 * @window: send window to be used by link
245 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400246 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400247 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400248 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400249 * @bc_sndlink: the namespace global link used for broadcast sending
250 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400251 * @inputq: queue to put messages ready for delivery
252 * @namedq: queue to put binding table update messages ready for delivery
253 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900254 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400255 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100256 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400257bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400258 int tolerance, char net_plane, u32 mtu, int priority,
259 int window, u32 session, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400260 u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400261 struct tipc_link *bc_sndlink,
262 struct tipc_link *bc_rcvlink,
263 struct sk_buff_head *inputq,
264 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400265 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100266{
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400267 struct tipc_link *l;
268 struct tipc_msg *hdr;
Allan Stephens37b9c082011-02-28 11:32:27 -0500269
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400270 l = kzalloc(sizeof(*l), GFP_ATOMIC);
271 if (!l)
272 return false;
273 *link = l;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400274 l->pmsg = (struct tipc_msg *)&l->proto_msg;
275 hdr = l->pmsg;
276 tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
277 msg_set_size(hdr, sizeof(l->proto_msg));
278 msg_set_session(hdr, session);
279 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400280
281 /* Note: peer i/f name is completed by reset/activate message */
282 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
283 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
284 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400285 strcpy((char *)msg_data(hdr), if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400286
287 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400288 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400289 l->net = net;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400290 l->peer_session = WILDCARD_SESSION;
291 l->bearer_id = bearer_id;
292 l->tolerance = tolerance;
293 l->net_plane = net_plane;
294 l->advertised_mtu = mtu;
295 l->mtu = mtu;
296 l->priority = priority;
297 tipc_link_set_queue_limits(l, window);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400298 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400299 l->bc_sndlink = bc_sndlink;
300 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400301 l->inputq = inputq;
302 l->namedq = namedq;
303 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400304 __skb_queue_head_init(&l->transmq);
305 __skb_queue_head_init(&l->backlogq);
306 __skb_queue_head_init(&l->deferdq);
307 skb_queue_head_init(&l->wakeupq);
308 skb_queue_head_init(l->inputq);
309 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100310}
311
Jon Paul Maloy32301902015-10-22 08:51:37 -0400312/**
313 * tipc_link_bc_create - create new link to be used for broadcast
314 * @n: pointer to associated node
315 * @mtu: mtu to be used
316 * @window: send window to be used
317 * @inputq: queue to put messages ready for delivery
318 * @namedq: queue to put binding table update messages ready for delivery
319 * @link: return value, pointer to put the created link
320 *
321 * Returns true if link was created, otherwise false
322 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400323bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400324 int mtu, int window, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400325 struct sk_buff_head *inputq,
326 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400327 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400328 struct tipc_link **link)
329{
330 struct tipc_link *l;
331
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400332 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400333 0, ownnode, peer, peer_caps, bc_sndlink,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400334 NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400335 return false;
336
337 l = *link;
338 strcpy(l->name, tipc_bclink_name);
339 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400340 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400341 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400342 l->bc_rcvlink = l;
343
344 /* Broadcast send link is always up */
345 if (link_is_bc_sndlink(l))
346 l->state = LINK_ESTABLISHED;
347
Jon Paul Maloy32301902015-10-22 08:51:37 -0400348 return true;
349}
350
Per Lidenb97bf3f2006-01-02 19:04:38 +0100351/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400352 * tipc_link_fsm_evt - link finite state machine
353 * @l: pointer to link
354 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400355 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400356int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400357{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400358 int rc = 0;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400359
360 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400361 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400362 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400363 case LINK_PEER_RESET_EVT:
364 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400365 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400366 case LINK_RESET_EVT:
367 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400368 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400369 case LINK_FAILURE_EVT:
370 case LINK_FAILOVER_BEGIN_EVT:
371 case LINK_ESTABLISH_EVT:
372 case LINK_FAILOVER_END_EVT:
373 case LINK_SYNCH_BEGIN_EVT:
374 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400375 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400376 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400377 }
378 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400379 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400380 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400381 case LINK_PEER_RESET_EVT:
382 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400383 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400384 case LINK_FAILOVER_BEGIN_EVT:
385 l->state = LINK_FAILINGOVER;
386 case LINK_FAILURE_EVT:
387 case LINK_RESET_EVT:
388 case LINK_ESTABLISH_EVT:
389 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400390 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400391 case LINK_SYNCH_BEGIN_EVT:
392 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400393 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400394 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400395 }
396 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400397 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400398 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400399 case LINK_RESET_EVT:
400 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400401 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400402 case LINK_PEER_RESET_EVT:
403 case LINK_ESTABLISH_EVT:
404 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400405 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400406 case LINK_SYNCH_BEGIN_EVT:
407 case LINK_SYNCH_END_EVT:
408 case LINK_FAILOVER_BEGIN_EVT:
409 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400410 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400411 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400412 }
413 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400414 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400415 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400416 case LINK_FAILOVER_END_EVT:
417 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400418 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400419 case LINK_PEER_RESET_EVT:
420 case LINK_RESET_EVT:
421 case LINK_ESTABLISH_EVT:
422 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400423 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400424 case LINK_FAILOVER_BEGIN_EVT:
425 case LINK_SYNCH_BEGIN_EVT:
426 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400427 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400428 goto illegal_evt;
429 }
430 break;
431 case LINK_ESTABLISHING:
432 switch (evt) {
433 case LINK_ESTABLISH_EVT:
434 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400435 break;
436 case LINK_FAILOVER_BEGIN_EVT:
437 l->state = LINK_FAILINGOVER;
438 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400439 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400440 l->state = LINK_RESET;
441 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400442 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400443 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400444 case LINK_SYNCH_BEGIN_EVT:
445 case LINK_FAILOVER_END_EVT:
446 break;
447 case LINK_SYNCH_END_EVT:
448 default:
449 goto illegal_evt;
450 }
451 break;
452 case LINK_ESTABLISHED:
453 switch (evt) {
454 case LINK_PEER_RESET_EVT:
455 l->state = LINK_PEER_RESET;
456 rc |= TIPC_LINK_DOWN_EVT;
457 break;
458 case LINK_FAILURE_EVT:
459 l->state = LINK_RESETTING;
460 rc |= TIPC_LINK_DOWN_EVT;
461 break;
462 case LINK_RESET_EVT:
463 l->state = LINK_RESET;
464 break;
465 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400466 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400467 break;
468 case LINK_SYNCH_BEGIN_EVT:
469 l->state = LINK_SYNCHING;
470 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400471 case LINK_FAILOVER_BEGIN_EVT:
472 case LINK_FAILOVER_END_EVT:
473 default:
474 goto illegal_evt;
475 }
476 break;
477 case LINK_SYNCHING:
478 switch (evt) {
479 case LINK_PEER_RESET_EVT:
480 l->state = LINK_PEER_RESET;
481 rc |= TIPC_LINK_DOWN_EVT;
482 break;
483 case LINK_FAILURE_EVT:
484 l->state = LINK_RESETTING;
485 rc |= TIPC_LINK_DOWN_EVT;
486 break;
487 case LINK_RESET_EVT:
488 l->state = LINK_RESET;
489 break;
490 case LINK_ESTABLISH_EVT:
491 case LINK_SYNCH_BEGIN_EVT:
492 break;
493 case LINK_SYNCH_END_EVT:
494 l->state = LINK_ESTABLISHED;
495 break;
496 case LINK_FAILOVER_BEGIN_EVT:
497 case LINK_FAILOVER_END_EVT:
498 default:
499 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400500 }
501 break;
502 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400503 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400504 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400505 return rc;
506illegal_evt:
507 pr_err("Illegal FSM event %x in state %x on link %s\n",
508 evt, l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400509 return rc;
510}
511
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400512/* link_profile_stats - update statistical profiling of traffic
513 */
514static void link_profile_stats(struct tipc_link *l)
515{
516 struct sk_buff *skb;
517 struct tipc_msg *msg;
518 int length;
519
520 /* Update counters used in statistical profiling of send traffic */
521 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
522 l->stats.queue_sz_counts++;
523
524 skb = skb_peek(&l->transmq);
525 if (!skb)
526 return;
527 msg = buf_msg(skb);
528 length = msg_size(msg);
529
530 if (msg_user(msg) == MSG_FRAGMENTER) {
531 if (msg_type(msg) != FIRST_FRAGMENT)
532 return;
533 length = msg_size(msg_get_wrapped(msg));
534 }
535 l->stats.msg_lengths_total += length;
536 l->stats.msg_length_counts++;
537 if (length <= 64)
538 l->stats.msg_length_profile[0]++;
539 else if (length <= 256)
540 l->stats.msg_length_profile[1]++;
541 else if (length <= 1024)
542 l->stats.msg_length_profile[2]++;
543 else if (length <= 4096)
544 l->stats.msg_length_profile[3]++;
545 else if (length <= 16384)
546 l->stats.msg_length_profile[4]++;
547 else if (length <= 32768)
548 l->stats.msg_length_profile[5]++;
549 else
550 l->stats.msg_length_profile[6]++;
551}
552
553/* tipc_link_timeout - perform periodic task as instructed from node timeout
554 */
Jon Paul Maloy52666982015-10-22 08:51:41 -0400555/* tipc_link_timeout - perform periodic task as instructed from node timeout
556 */
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400557int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
558{
559 int rc = 0;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400560 int mtyp = STATE_MSG;
561 bool xmit = false;
562 bool prb = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400563 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
564 u16 bc_acked = l->bc_rcvlink->acked;
565 bool bc_up = link_is_up(l->bc_rcvlink);
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400566
567 link_profile_stats(l);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400568
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400569 switch (l->state) {
570 case LINK_ESTABLISHED:
571 case LINK_SYNCHING:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400572 if (!l->silent_intv_cnt) {
Jon Paul Maloy52666982015-10-22 08:51:41 -0400573 if (bc_up && (bc_acked != bc_snt))
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400574 xmit = true;
575 } else if (l->silent_intv_cnt <= l->abort_limit) {
576 xmit = true;
577 prb = true;
578 } else {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400579 rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400580 }
581 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400582 break;
583 case LINK_RESET:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400584 xmit = true;
585 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400586 break;
587 case LINK_ESTABLISHING:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400588 xmit = true;
589 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400590 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400591 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400592 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400593 case LINK_FAILINGOVER:
594 break;
595 default:
596 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400597 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400598
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400599 if (xmit)
600 tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
601
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400602 return rc;
603}
604
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400605/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400606 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400607 * @link: congested link
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400608 * @list: message that was attempted sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400609 * Create pseudo msg to send back to user when congestion abates
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400610 * Does not consume buffer list
Per Lidenb97bf3f2006-01-02 19:04:38 +0100611 */
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400612static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100613{
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400614 struct tipc_msg *msg = buf_msg(skb_peek(list));
615 int imp = msg_importance(msg);
616 u32 oport = msg_origport(msg);
617 u32 addr = link_own_addr(link);
618 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100619
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400620 /* This really cannot happen... */
621 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
622 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400623 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400624 }
625 /* Non-blocking sender: */
626 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
627 return -ELINKCONG;
628
629 /* Create and schedule wakeup pseudo message */
630 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
631 addr, addr, oport, 0, 0);
632 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400633 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400634 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
635 TIPC_SKB_CB(skb)->chain_imp = imp;
636 skb_queue_tail(&link->wakeupq, skb);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400637 link->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400638 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100639}
640
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400641/**
642 * link_prepare_wakeup - prepare users for wakeup after congestion
643 * @link: congested link
644 * Move a number of waiting users, as permitted by available space in
645 * the send queue, from link wait queue to node wait queue for wakeup
646 */
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400647void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100648{
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400649 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
650 int imp, lim;
Ying Xue58d78b32014-11-26 11:41:51 +0800651 struct sk_buff *skb, *tmp;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100652
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400653 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
654 imp = TIPC_SKB_CB(skb)->chain_imp;
655 lim = l->window + l->backlog[imp].limit;
656 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
657 if ((pnd[imp] + l->backlog[imp].len) >= lim)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100658 break;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400659 skb_unlink(skb, &l->wakeupq);
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400660 skb_queue_tail(l->inputq, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100661 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100662}
663
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400664void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100665{
Allan Stephensa686e682008-06-04 17:29:39 -0700666 /* Link is down, accept any session */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400667 l->peer_session = WILDCARD_SESSION;
668
669 /* If peer is up, it only accepts an incremented session number */
670 msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100671
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400672 /* Prepare for renewed mtu size negotiation */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400673 l->mtu = l->advertised_mtu;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900674
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400675 /* Clean up all queues and counters: */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400676 __skb_queue_purge(&l->transmq);
677 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy23d83352015-07-30 18:24:24 -0400678 skb_queue_splice_init(&l->wakeupq, l->inputq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400679 __skb_queue_purge(&l->backlogq);
680 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
681 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
682 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
683 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
684 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400685 kfree_skb(l->reasm_buf);
686 kfree_skb(l->failover_reasm_skb);
687 l->reasm_buf = NULL;
688 l->failover_reasm_skb = NULL;
689 l->rcv_unacked = 0;
690 l->snd_nxt = 1;
691 l->rcv_nxt = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400692 l->acked = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400693 l->silent_intv_cnt = 0;
694 l->stats.recv_info = 0;
695 l->stale_count = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400696 l->bc_peer_is_up = false;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400697 link_reset_statistics(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100698}
699
Per Lidenb97bf3f2006-01-02 19:04:38 +0100700/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400701 * tipc_link_xmit(): enqueue buffer list according to queue situation
702 * @link: link to use
703 * @list: chain of buffers containing message
704 * @xmitq: returned list of packets to be sent by caller
705 *
706 * Consumes the buffer chain, except when returning -ELINKCONG,
707 * since the caller then may want to make more send attempts.
708 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
709 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
710 */
711int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
712 struct sk_buff_head *xmitq)
713{
714 struct tipc_msg *hdr = buf_msg(skb_peek(list));
715 unsigned int maxwin = l->window;
716 unsigned int i, imp = msg_importance(hdr);
717 unsigned int mtu = l->mtu;
718 u16 ack = l->rcv_nxt - 1;
719 u16 seqno = l->snd_nxt;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400720 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400721 struct sk_buff_head *transmq = &l->transmq;
722 struct sk_buff_head *backlogq = &l->backlogq;
723 struct sk_buff *skb, *_skb, *bskb;
724
725 /* Match msg importance against this and all higher backlog limits: */
726 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
727 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
728 return link_schedule_user(l, list);
729 }
730 if (unlikely(msg_size(hdr) > mtu))
731 return -EMSGSIZE;
732
733 /* Prepare each packet for sending, and add to relevant queue: */
734 while (skb_queue_len(list)) {
735 skb = skb_peek(list);
736 hdr = buf_msg(skb);
737 msg_set_seqno(hdr, seqno);
738 msg_set_ack(hdr, ack);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400739 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400740
741 if (likely(skb_queue_len(transmq) < maxwin)) {
742 _skb = skb_clone(skb, GFP_ATOMIC);
743 if (!_skb)
744 return -ENOBUFS;
745 __skb_dequeue(list);
746 __skb_queue_tail(transmq, skb);
747 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400748 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400749 l->rcv_unacked = 0;
750 seqno++;
751 continue;
752 }
753 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
754 kfree_skb(__skb_dequeue(list));
755 l->stats.sent_bundled++;
756 continue;
757 }
758 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
759 kfree_skb(__skb_dequeue(list));
760 __skb_queue_tail(backlogq, bskb);
761 l->backlog[msg_importance(buf_msg(bskb))].len++;
762 l->stats.sent_bundled++;
763 l->stats.sent_bundles++;
764 continue;
765 }
766 l->backlog[imp].len += skb_queue_len(list);
767 skb_queue_splice_tail_init(list, backlogq);
768 }
769 l->snd_nxt = seqno;
770 return 0;
771}
772
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400773void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
774{
775 struct sk_buff *skb, *_skb;
776 struct tipc_msg *hdr;
777 u16 seqno = l->snd_nxt;
778 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400779 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400780
781 while (skb_queue_len(&l->transmq) < l->window) {
782 skb = skb_peek(&l->backlogq);
783 if (!skb)
784 break;
785 _skb = skb_clone(skb, GFP_ATOMIC);
786 if (!_skb)
787 break;
788 __skb_dequeue(&l->backlogq);
789 hdr = buf_msg(skb);
790 l->backlog[msg_importance(hdr)].len--;
791 __skb_queue_tail(&l->transmq, skb);
792 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400793 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400794 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400795 msg_set_ack(hdr, ack);
796 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400797 l->rcv_unacked = 0;
798 seqno++;
799 }
800 l->snd_nxt = seqno;
801}
802
Jon Paul Maloy52666982015-10-22 08:51:41 -0400803static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700804{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400805 struct tipc_msg *hdr = buf_msg(skb);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700806
Jon Paul Maloy52666982015-10-22 08:51:41 -0400807 pr_warn("Retransmission failure on link <%s>\n", l->name);
808 link_print(l, "Resetting link ");
809 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
810 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
811 pr_info("sqno %u, prev: %x, src: %x\n",
812 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
Allan Stephensd356eeb2006-06-25 23:40:01 -0700813}
814
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400815int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
816 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400817{
818 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
819 struct tipc_msg *hdr;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400820 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400821 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400822
823 if (!skb)
824 return 0;
825
826 /* Detect repeated retransmit failures on same packet */
827 if (likely(l->last_retransm != buf_seqno(skb))) {
828 l->last_retransm = buf_seqno(skb);
829 l->stale_count = 1;
830 } else if (++l->stale_count > 100) {
831 link_retransmit_failure(l, skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400832 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400833 }
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400834
835 /* Move forward to where retransmission should start */
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400836 skb_queue_walk(&l->transmq, skb) {
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400837 if (!less(buf_seqno(skb), from))
838 break;
839 }
840
841 skb_queue_walk_from(&l->transmq, skb) {
842 if (more(buf_seqno(skb), to))
843 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400844 hdr = buf_msg(skb);
845 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
846 if (!_skb)
847 return 0;
848 hdr = buf_msg(_skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400849 msg_set_ack(hdr, ack);
850 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400851 _skb->priority = TC_PRIO_CONTROL;
852 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400853 l->stats.retransmitted++;
854 }
855 return 0;
856}
857
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500858/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +0200859 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500860 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +0200861 * Node lock must be held
862 */
Jon Paul Maloy52666982015-10-22 08:51:41 -0400863static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400864 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +0200865{
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400866 switch (msg_user(buf_msg(skb))) {
Erik Hugne7ae934b2014-07-01 10:22:40 +0200867 case TIPC_LOW_IMPORTANCE:
868 case TIPC_MEDIUM_IMPORTANCE:
869 case TIPC_HIGH_IMPORTANCE:
870 case TIPC_CRITICAL_IMPORTANCE:
871 case CONN_MANAGER:
Jon Paul Maloy9945e802015-10-15 14:52:40 -0400872 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500873 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +0200874 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -0400875 l->bc_rcvlink->state = LINK_ESTABLISHED;
876 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500877 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +0200878 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400879 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500880 case MSG_FRAGMENTER:
881 case BCAST_PROTOCOL:
882 return false;
883 default:
884 pr_warn("Dropping received illegal msg type\n");
885 kfree_skb(skb);
886 return false;
887 };
888}
889
890/* tipc_link_input - process packet that has passed link protocol check
891 *
892 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500893 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400894static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
895 struct sk_buff_head *inputq)
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500896{
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400897 struct tipc_msg *hdr = buf_msg(skb);
898 struct sk_buff **reasm_skb = &l->reasm_buf;
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500899 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -0400900 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400901 int usr = msg_user(hdr);
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400902 int rc = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400903 int pos = 0;
904 int ipos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500905
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400906 if (unlikely(usr == TUNNEL_PROTOCOL)) {
907 if (msg_type(hdr) == SYNCH_MSG) {
908 __skb_queue_purge(&l->deferdq);
909 goto drop;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400910 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400911 if (!tipc_msg_extract(skb, &iskb, &ipos))
912 return rc;
913 kfree_skb(skb);
914 skb = iskb;
915 hdr = buf_msg(skb);
916 if (less(msg_seqno(hdr), l->drop_point))
917 goto drop;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400918 if (tipc_data_input(l, skb, inputq))
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400919 return rc;
920 usr = msg_user(hdr);
921 reasm_skb = &l->failover_reasm_skb;
922 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500923
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400924 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -0400925 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400926 l->stats.recv_bundles++;
927 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500928 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -0400929 tipc_data_input(l, iskb, &tmpq);
930 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400931 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400932 } else if (usr == MSG_FRAGMENTER) {
933 l->stats.recv_fragments++;
934 if (tipc_buf_append(reasm_skb, &skb)) {
935 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400936 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400937 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
938 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400939 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500940 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400941 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400942 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400943 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400944 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400945 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400946 }
947drop:
948 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400949 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +0200950}
951
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400952static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
953{
954 bool released = false;
955 struct sk_buff *skb, *tmp;
956
957 skb_queue_walk_safe(&l->transmq, skb, tmp) {
958 if (more(buf_seqno(skb), acked))
959 break;
960 __skb_unlink(skb, &l->transmq);
961 kfree_skb(skb);
962 released = true;
963 }
964 return released;
965}
966
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -0400967/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -0400968 *
969 * Note that sending of broadcast ack is coordinated among nodes, to reduce
970 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -0400971 */
Jon Paul Maloy52666982015-10-22 08:51:41 -0400972int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -0400973{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400974 if (!l)
975 return 0;
976
977 /* Broadcast ACK must be sent via a unicast link => defer to caller */
978 if (link_is_bc_rcvlink(l)) {
979 if (((l->rcv_nxt ^ link_own_addr(l)) & 0xf) != 0xf)
980 return 0;
981 l->rcv_unacked = 0;
982 return TIPC_LINK_SND_BC_ACK;
983 }
984
985 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -0400986 l->rcv_unacked = 0;
987 l->stats.sent_acks++;
988 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400989 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -0400990}
991
Jon Paul Maloy282b3a02015-10-15 14:52:45 -0400992/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
993 */
994void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
995{
996 int mtyp = RESET_MSG;
997
998 if (l->state == LINK_ESTABLISHING)
999 mtyp = ACTIVATE_MSG;
1000
1001 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
1002}
1003
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001004/* tipc_link_build_nack_msg: prepare link nack message for transmission
1005 */
1006static void tipc_link_build_nack_msg(struct tipc_link *l,
1007 struct sk_buff_head *xmitq)
1008{
1009 u32 def_cnt = ++l->stats.deferred_recv;
1010
Jon Paul Maloy52666982015-10-22 08:51:41 -04001011 if (link_is_bc_rcvlink(l))
1012 return;
1013
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001014 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1015 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1016}
1017
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001018/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001019 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001020 * @skb: TIPC packet
1021 * @xmitq: queue to place packets to be sent after this call
1022 */
1023int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1024 struct sk_buff_head *xmitq)
1025{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001026 struct sk_buff_head *defq = &l->deferdq;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001027 struct tipc_msg *hdr;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001028 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001029 int rc = 0;
1030
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001031 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001032 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001033 seqno = msg_seqno(hdr);
1034 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001035 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001036
1037 /* Verify and update link state */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001038 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1039 return tipc_link_proto_rcv(l, skb, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001040
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001041 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001042 if (l->state == LINK_ESTABLISHING)
1043 rc = TIPC_LINK_UP_EVT;
1044 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001045 }
1046
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001047 /* Don't send probe at next timeout expiration */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001048 l->silent_intv_cnt = 0;
1049
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001050 /* Drop if outside receive window */
1051 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1052 l->stats.duplicates++;
1053 goto drop;
1054 }
1055
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001056 /* Forward queues and wake up waiting users */
1057 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1058 tipc_link_advance_backlog(l, xmitq);
1059 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1060 link_prepare_wakeup(l);
1061 }
1062
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001063 /* Defer delivery if sequence gap */
1064 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001065 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001066 tipc_link_build_nack_msg(l, xmitq);
1067 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001068 }
1069
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001070 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001071 l->rcv_nxt++;
1072 l->stats.recv_info++;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001073 if (!tipc_data_input(l, skb, l->inputq))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001074 rc |= tipc_link_input(l, skb, l->inputq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001075 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001076 rc |= tipc_link_build_ack_msg(l, xmitq);
1077 if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
1078 break;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001079 } while ((skb = __skb_dequeue(defq)));
1080
1081 return rc;
1082drop:
1083 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001084 return rc;
1085}
1086
Allan Stephens8809b252011-10-25 10:44:35 -04001087/*
Per Lidenb97bf3f2006-01-02 19:04:38 +01001088 * Send protocol message to the other endpoint.
1089 */
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001090void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001091 u32 gap, u32 tolerance, u32 priority)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001092{
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001093 struct sk_buff *skb = NULL;
1094 struct sk_buff_head xmitq;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001095
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001096 __skb_queue_head_init(&xmitq);
1097 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
1098 tolerance, priority, &xmitq);
1099 skb = __skb_dequeue(&xmitq);
1100 if (!skb)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001101 return;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001102 tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001103 l->rcv_unacked = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001104}
1105
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001106static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1107 u16 rcvgap, int tolerance, int priority,
1108 struct sk_buff_head *xmitq)
1109{
1110 struct sk_buff *skb = NULL;
1111 struct tipc_msg *hdr = l->pmsg;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001112 bool node_up = link_is_up(l->bc_rcvlink);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001113
1114 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001115 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001116 return;
1117
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001118 msg_set_type(hdr, mtyp);
1119 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001120 msg_set_next_sent(hdr, l->snd_nxt);
1121 msg_set_ack(hdr, l->rcv_nxt - 1);
1122 msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
1123 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001124 msg_set_link_tolerance(hdr, tolerance);
1125 msg_set_linkprio(hdr, priority);
1126 msg_set_redundant_link(hdr, node_up);
1127 msg_set_seq_gap(hdr, 0);
1128
1129 /* Compatibility: created msg must not be in sequence with pkt flow */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001130 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001131
1132 if (mtyp == STATE_MSG) {
1133 if (!tipc_link_is_up(l))
1134 return;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001135
1136 /* Override rcvgap if there are packets in deferred queue */
1137 if (!skb_queue_empty(&l->deferdq))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001138 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - l->rcv_nxt;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001139 if (rcvgap) {
1140 msg_set_seq_gap(hdr, rcvgap);
1141 l->stats.sent_nacks++;
1142 }
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001143 msg_set_probe(hdr, probe);
1144 if (probe)
1145 l->stats.sent_probes++;
1146 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001147 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001148 } else {
1149 /* RESET_MSG or ACTIVATE_MSG */
1150 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001151 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001152 msg_set_next_sent(hdr, 1);
1153 }
1154 skb = tipc_buf_acquire(msg_size(hdr));
1155 if (!skb)
1156 return;
1157 skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
1158 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001159 __skb_queue_tail(xmitq, skb);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001160}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001161
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001162/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001163 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001164 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001165void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1166 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001167{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001168 struct sk_buff *skb, *tnlskb;
1169 struct tipc_msg *hdr, tnlhdr;
1170 struct sk_buff_head *queue = &l->transmq;
1171 struct sk_buff_head tmpxq, tnlq;
1172 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001173
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001174 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001175 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001176
1177 skb_queue_head_init(&tnlq);
1178 skb_queue_head_init(&tmpxq);
1179
1180 /* At least one packet required for safe algorithm => add dummy */
1181 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1182 BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
1183 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001184 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001185 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001186 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001187 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001188 skb_queue_tail(&tnlq, skb);
1189 tipc_link_xmit(l, &tnlq, &tmpxq);
1190 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001191
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001192 /* Initialize reusable tunnel packet header */
1193 tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
1194 mtyp, INT_H_SIZE, l->addr);
1195 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1196 msg_set_msgcnt(&tnlhdr, pktcnt);
1197 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1198tnl:
1199 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001200 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001201 hdr = buf_msg(skb);
1202 if (queue == &l->backlogq)
1203 msg_set_seqno(hdr, seqno++);
1204 pktlen = msg_size(hdr);
1205 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1206 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1207 if (!tnlskb) {
1208 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001209 return;
1210 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001211 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1212 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1213 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001214 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001215 if (queue != &l->backlogq) {
1216 queue = &l->backlogq;
1217 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001218 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001219
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001220 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001221
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001222 if (mtyp == FAILOVER_MSG) {
1223 tnl->drop_point = l->rcv_nxt;
1224 tnl->failover_reasm_skb = l->reasm_buf;
1225 l->reasm_buf = NULL;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001226 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001227}
1228
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001229/* tipc_link_proto_rcv(): receive link level protocol message :
1230 * Note that network plane id propagates through the network, and may
1231 * change at any time. The node with lowest numerical id determines
1232 * network plane
1233 */
1234static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1235 struct sk_buff_head *xmitq)
1236{
1237 struct tipc_msg *hdr = buf_msg(skb);
1238 u16 rcvgap = 0;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001239 u16 ack = msg_ack(hdr);
1240 u16 gap = msg_seq_gap(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001241 u16 peers_snd_nxt = msg_next_sent(hdr);
1242 u16 peers_tol = msg_link_tolerance(hdr);
1243 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001244 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001245 int mtyp = msg_type(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001246 char *if_name;
1247 int rc = 0;
1248
Jon Paul Maloy52666982015-10-22 08:51:41 -04001249 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001250 goto exit;
1251
1252 if (link_own_addr(l) > msg_prevnode(hdr))
1253 l->net_plane = msg_net_plane(hdr);
1254
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001255 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001256 case RESET_MSG:
1257
1258 /* Ignore duplicate RESET with old session number */
1259 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1260 (l->peer_session != WILDCARD_SESSION))
1261 break;
1262 /* fall thru' */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001263
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001264 case ACTIVATE_MSG:
1265
1266 /* Complete own link name with peer's interface name */
1267 if_name = strrchr(l->name, ':') + 1;
1268 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1269 break;
1270 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1271 break;
1272 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
1273
1274 /* Update own tolerance if peer indicates a non-zero value */
1275 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1276 l->tolerance = peers_tol;
1277
1278 /* Update own priority if peer's priority is higher */
1279 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1280 l->priority = peers_prio;
1281
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001282 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1283 if ((mtyp == RESET_MSG) || !link_is_up(l))
1284 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1285
1286 /* ACTIVATE_MSG takes up link if it was already locally reset */
1287 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1288 rc = TIPC_LINK_UP_EVT;
1289
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001290 l->peer_session = msg_session(hdr);
1291 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001292 if (l->mtu > msg_max_pkt(hdr))
1293 l->mtu = msg_max_pkt(hdr);
1294 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001295
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001296 case STATE_MSG:
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001297
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001298 /* Update own tolerance if peer indicates a non-zero value */
1299 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1300 l->tolerance = peers_tol;
1301
1302 l->silent_intv_cnt = 0;
1303 l->stats.recv_states++;
1304 if (msg_probe(hdr))
1305 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001306
1307 if (!link_is_up(l)) {
1308 if (l->state == LINK_ESTABLISHING)
1309 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001310 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001311 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001312
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001313 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001314 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001315 rcvgap = peers_snd_nxt - l->rcv_nxt;
1316 if (rcvgap || (msg_probe(hdr)))
1317 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
Jon Paul Maloy16040892015-07-21 06:42:28 -04001318 0, 0, xmitq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001319 tipc_link_release_pkts(l, ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001320
1321 /* If NACK, retransmit will now start at right position */
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001322 if (gap) {
1323 rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001324 l->stats.recv_nacks++;
1325 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001326
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001327 tipc_link_advance_backlog(l, xmitq);
1328 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1329 link_prepare_wakeup(l);
1330 }
1331exit:
1332 kfree_skb(skb);
1333 return rc;
1334}
1335
Jon Paul Maloy52666982015-10-22 08:51:41 -04001336/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1337 */
1338static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1339 u16 peers_snd_nxt,
1340 struct sk_buff_head *xmitq)
1341{
1342 struct sk_buff *skb;
1343 struct tipc_msg *hdr;
1344 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1345 u16 ack = l->rcv_nxt - 1;
1346 u16 gap_to = peers_snd_nxt - 1;
1347
1348 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1349 0, l->addr, link_own_addr(l), 0, 0, 0);
1350 if (!skb)
1351 return false;
1352 hdr = buf_msg(skb);
1353 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1354 msg_set_bcast_ack(hdr, ack);
1355 msg_set_bcgap_after(hdr, ack);
1356 if (dfrd_skb)
1357 gap_to = buf_seqno(dfrd_skb) - 1;
1358 msg_set_bcgap_to(hdr, gap_to);
1359 msg_set_non_seq(hdr, bcast);
1360 __skb_queue_tail(xmitq, skb);
1361 return true;
1362}
1363
1364/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1365 *
1366 * Give a newly added peer node the sequence number where it should
1367 * start receiving and acking broadcast packets.
1368 */
Wu Fengguang742e0382015-10-24 22:56:01 +08001369static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1370 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001371{
1372 struct sk_buff_head list;
1373
1374 __skb_queue_head_init(&list);
1375 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1376 return;
1377 tipc_link_xmit(l, &list, xmitq);
1378}
1379
1380/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1381 */
1382void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1383{
1384 int mtyp = msg_type(hdr);
1385 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1386
1387 if (link_is_up(l))
1388 return;
1389
1390 if (msg_user(hdr) == BCAST_PROTOCOL) {
1391 l->rcv_nxt = peers_snd_nxt;
1392 l->state = LINK_ESTABLISHED;
1393 return;
1394 }
1395
1396 if (l->peer_caps & TIPC_BCAST_SYNCH)
1397 return;
1398
1399 if (msg_peer_node_is_up(hdr))
1400 return;
1401
1402 /* Compatibility: accept older, less safe initial synch data */
1403 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1404 l->rcv_nxt = peers_snd_nxt;
1405}
1406
1407/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1408 */
1409void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1410 struct sk_buff_head *xmitq)
1411{
1412 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1413
1414 if (!link_is_up(l))
1415 return;
1416
1417 if (!msg_peer_node_is_up(hdr))
1418 return;
1419
1420 l->bc_peer_is_up = true;
1421
1422 /* Ignore if peers_snd_nxt goes beyond receive window */
1423 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1424 return;
1425
1426 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1427 l->nack_state = BC_NACK_SND_CONDITIONAL;
1428 return;
1429 }
1430
1431 /* Don't NACK if one was recently sent or peeked */
1432 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1433 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1434 return;
1435 }
1436
1437 /* Conditionally delay NACK sending until next synch rcv */
1438 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1439 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1440 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1441 return;
1442 }
1443
1444 /* Send NACK now but suppress next one */
1445 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1446 l->nack_state = BC_NACK_SND_SUPPRESS;
1447}
1448
1449void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1450 struct sk_buff_head *xmitq)
1451{
1452 struct sk_buff *skb, *tmp;
1453 struct tipc_link *snd_l = l->bc_sndlink;
1454
1455 if (!link_is_up(l) || !l->bc_peer_is_up)
1456 return;
1457
1458 if (!more(acked, l->acked))
1459 return;
1460
1461 /* Skip over packets peer has already acked */
1462 skb_queue_walk(&snd_l->transmq, skb) {
1463 if (more(buf_seqno(skb), l->acked))
1464 break;
1465 }
1466
1467 /* Update/release the packets peer is acking now */
1468 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1469 if (more(buf_seqno(skb), acked))
1470 break;
1471 if (!--TIPC_SKB_CB(skb)->ackers) {
1472 __skb_unlink(skb, &snd_l->transmq);
1473 kfree_skb(skb);
1474 }
1475 }
1476 l->acked = acked;
1477 tipc_link_advance_backlog(snd_l, xmitq);
1478 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1479 link_prepare_wakeup(snd_l);
1480}
1481
1482/* tipc_link_bc_nack_rcv(): receive broadcast nack message
1483 */
1484int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1485 struct sk_buff_head *xmitq)
1486{
1487 struct tipc_msg *hdr = buf_msg(skb);
1488 u32 dnode = msg_destnode(hdr);
1489 int mtyp = msg_type(hdr);
1490 u16 acked = msg_bcast_ack(hdr);
1491 u16 from = acked + 1;
1492 u16 to = msg_bcgap_to(hdr);
1493 u16 peers_snd_nxt = to + 1;
1494 int rc = 0;
1495
1496 kfree_skb(skb);
1497
1498 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1499 return 0;
1500
1501 if (mtyp != STATE_MSG)
1502 return 0;
1503
1504 if (dnode == link_own_addr(l)) {
1505 tipc_link_bc_ack_rcv(l, acked, xmitq);
1506 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1507 l->stats.recv_nacks++;
1508 return rc;
1509 }
1510
1511 /* Msg for other node => suppress own NACK at next sync if applicable */
1512 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1513 l->nack_state = BC_NACK_SND_SUPPRESS;
1514
1515 return 0;
1516}
1517
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001518void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001519{
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001520 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001521
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001522 l->window = win;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001523 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1524 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1525 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1526 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1527 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001528}
1529
Jon Paul Maloye099e862014-02-13 17:29:18 -05001530/* tipc_link_find_owner - locate owner node of link by link's name
Ying Xuef2f98002015-01-09 15:27:05 +08001531 * @net: the applicable net namespace
Jon Paul Maloye099e862014-02-13 17:29:18 -05001532 * @name: pointer to link name string
1533 * @bearer_id: pointer to index in 'node->links' array where the link was found.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001534 *
Jon Paul Maloye099e862014-02-13 17:29:18 -05001535 * Returns pointer to node owning the link, or 0 if no matching link is found.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001536 */
Ying Xuef2f98002015-01-09 15:27:05 +08001537static struct tipc_node *tipc_link_find_owner(struct net *net,
1538 const char *link_name,
Jon Paul Maloye099e862014-02-13 17:29:18 -05001539 unsigned int *bearer_id)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001540{
Ying Xuef2f98002015-01-09 15:27:05 +08001541 struct tipc_net *tn = net_generic(net, tipc_net_id);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001542 struct tipc_link *l_ptr;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001543 struct tipc_node *n_ptr;
Fabian Frederick886eaa12014-12-25 12:05:50 +01001544 struct tipc_node *found_node = NULL;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001545 int i;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001546
Jon Paul Maloye099e862014-02-13 17:29:18 -05001547 *bearer_id = 0;
Ying Xue6c7a7622014-03-27 12:54:37 +08001548 rcu_read_lock();
Ying Xuef2f98002015-01-09 15:27:05 +08001549 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001550 tipc_node_lock(n_ptr);
Erik Hugnebbfbe472013-10-18 07:23:21 +02001551 for (i = 0; i < MAX_BEARERS; i++) {
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001552 l_ptr = n_ptr->links[i].link;
Jon Paul Maloye099e862014-02-13 17:29:18 -05001553 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1554 *bearer_id = i;
1555 found_node = n_ptr;
1556 break;
1557 }
Erik Hugnebbfbe472013-10-18 07:23:21 +02001558 }
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001559 tipc_node_unlock(n_ptr);
Jon Paul Maloye099e862014-02-13 17:29:18 -05001560 if (found_node)
1561 break;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001562 }
Ying Xue6c7a7622014-03-27 12:54:37 +08001563 rcu_read_unlock();
1564
Jon Paul Maloye099e862014-02-13 17:29:18 -05001565 return found_node;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001566}
1567
Allan Stephens5c216e12011-10-18 11:34:29 -04001568/**
Per Lidenb97bf3f2006-01-02 19:04:38 +01001569 * link_reset_statistics - reset link statistics
1570 * @l_ptr: pointer to link
1571 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001572static void link_reset_statistics(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001573{
1574 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001575 l_ptr->stats.sent_info = l_ptr->snd_nxt;
1576 l_ptr->stats.recv_info = l_ptr->rcv_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001577}
1578
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001579static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001580{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001581 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001582 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001583 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08001584
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001585 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001586 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1587 skb_queue_len(&l->transmq), head, tail,
1588 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001589}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001590
1591/* Parse and validate nested (link) properties valid for media, bearer and link
1592 */
1593int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1594{
1595 int err;
1596
1597 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1598 tipc_nl_prop_policy);
1599 if (err)
1600 return err;
1601
1602 if (props[TIPC_NLA_PROP_PRIO]) {
1603 u32 prio;
1604
1605 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1606 if (prio > TIPC_MAX_LINK_PRI)
1607 return -EINVAL;
1608 }
1609
1610 if (props[TIPC_NLA_PROP_TOL]) {
1611 u32 tol;
1612
1613 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1614 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1615 return -EINVAL;
1616 }
1617
1618 if (props[TIPC_NLA_PROP_WIN]) {
1619 u32 win;
1620
1621 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1622 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1623 return -EINVAL;
1624 }
1625
1626 return 0;
1627}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001628
Richard Alpef96ce7a2014-11-20 10:29:13 +01001629int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1630{
1631 int err;
1632 int res = 0;
1633 int bearer_id;
1634 char *name;
1635 struct tipc_link *link;
1636 struct tipc_node *node;
1637 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe37e2d482015-02-09 09:50:08 +01001638 struct net *net = sock_net(skb->sk);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001639
1640 if (!info->attrs[TIPC_NLA_LINK])
1641 return -EINVAL;
1642
1643 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1644 info->attrs[TIPC_NLA_LINK],
1645 tipc_nl_link_policy);
1646 if (err)
1647 return err;
1648
1649 if (!attrs[TIPC_NLA_LINK_NAME])
1650 return -EINVAL;
1651
1652 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1653
Richard Alpe670f4f82015-05-06 13:58:55 +02001654 if (strcmp(name, tipc_bclink_name) == 0)
1655 return tipc_nl_bc_link_set(net, attrs);
1656
Ying Xuef2f98002015-01-09 15:27:05 +08001657 node = tipc_link_find_owner(net, name, &bearer_id);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001658 if (!node)
1659 return -EINVAL;
1660
1661 tipc_node_lock(node);
1662
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001663 link = node->links[bearer_id].link;
Richard Alpef96ce7a2014-11-20 10:29:13 +01001664 if (!link) {
1665 res = -EINVAL;
1666 goto out;
1667 }
1668
1669 if (attrs[TIPC_NLA_LINK_PROP]) {
1670 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1671
1672 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1673 props);
1674 if (err) {
1675 res = err;
1676 goto out;
1677 }
1678
1679 if (props[TIPC_NLA_PROP_TOL]) {
1680 u32 tol;
1681
1682 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -04001683 link->tolerance = tol;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001684 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001685 }
1686 if (props[TIPC_NLA_PROP_PRIO]) {
1687 u32 prio;
1688
1689 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1690 link->priority = prio;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001691 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001692 }
1693 if (props[TIPC_NLA_PROP_WIN]) {
1694 u32 win;
1695
1696 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1697 tipc_link_set_queue_limits(link, win);
1698 }
1699 }
1700
1701out:
1702 tipc_node_unlock(node);
1703
1704 return res;
1705}
Richard Alped8182802014-11-24 11:10:29 +01001706
1707static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001708{
1709 int i;
1710 struct nlattr *stats;
1711
1712 struct nla_map {
1713 u32 key;
1714 u32 val;
1715 };
1716
1717 struct nla_map map[] = {
1718 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1719 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1720 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1721 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1722 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1723 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1724 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1725 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1726 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1727 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1728 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1729 s->msg_length_counts : 1},
1730 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1731 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1732 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1733 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1734 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1735 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1736 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1737 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1738 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1739 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1740 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1741 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1742 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1743 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1744 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1745 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1746 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1747 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1748 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1749 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1750 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1751 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1752 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1753 };
1754
1755 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1756 if (!stats)
1757 return -EMSGSIZE;
1758
1759 for (i = 0; i < ARRAY_SIZE(map); i++)
1760 if (nla_put_u32(skb, map[i].key, map[i].val))
1761 goto msg_full;
1762
1763 nla_nest_end(skb, stats);
1764
1765 return 0;
1766msg_full:
1767 nla_nest_cancel(skb, stats);
1768
1769 return -EMSGSIZE;
1770}
1771
1772/* Caller should hold appropriate locks to protect the link */
Ying Xue34747532015-01-09 15:27:10 +08001773static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001774 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001775{
1776 int err;
1777 void *hdr;
1778 struct nlattr *attrs;
1779 struct nlattr *prop;
Ying Xue34747532015-01-09 15:27:10 +08001780 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001781
Richard Alpebfb3e5d2015-02-09 09:50:03 +01001782 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001783 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001784 if (!hdr)
1785 return -EMSGSIZE;
1786
1787 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1788 if (!attrs)
1789 goto msg_full;
1790
1791 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1792 goto attr_msg_full;
1793 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
Ying Xue34747532015-01-09 15:27:10 +08001794 tipc_cluster_mask(tn->own_addr)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001795 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001796 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001797 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001798 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001799 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001800 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001801 goto attr_msg_full;
1802
1803 if (tipc_link_is_up(link))
1804 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1805 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001806 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001807 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1808 goto attr_msg_full;
1809
1810 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1811 if (!prop)
1812 goto attr_msg_full;
1813 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1814 goto prop_msg_full;
1815 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1816 goto prop_msg_full;
1817 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001818 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001819 goto prop_msg_full;
1820 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1821 goto prop_msg_full;
1822 nla_nest_end(msg->skb, prop);
1823
1824 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1825 if (err)
1826 goto attr_msg_full;
1827
1828 nla_nest_end(msg->skb, attrs);
1829 genlmsg_end(msg->skb, hdr);
1830
1831 return 0;
1832
1833prop_msg_full:
1834 nla_nest_cancel(msg->skb, prop);
1835attr_msg_full:
1836 nla_nest_cancel(msg->skb, attrs);
1837msg_full:
1838 genlmsg_cancel(msg->skb, hdr);
1839
1840 return -EMSGSIZE;
1841}
1842
1843/* Caller should hold node lock */
Ying Xue34747532015-01-09 15:27:10 +08001844static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1845 struct tipc_node *node, u32 *prev_link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001846{
1847 u32 i;
1848 int err;
1849
1850 for (i = *prev_link; i < MAX_BEARERS; i++) {
1851 *prev_link = i;
1852
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001853 if (!node->links[i].link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001854 continue;
1855
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001856 err = __tipc_nl_add_link(net, msg,
1857 node->links[i].link, NLM_F_MULTI);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001858 if (err)
1859 return err;
1860 }
1861 *prev_link = 0;
1862
1863 return 0;
1864}
1865
1866int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
1867{
Ying Xuef2f98002015-01-09 15:27:05 +08001868 struct net *net = sock_net(skb->sk);
1869 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001870 struct tipc_node *node;
1871 struct tipc_nl_msg msg;
1872 u32 prev_node = cb->args[0];
1873 u32 prev_link = cb->args[1];
1874 int done = cb->args[2];
1875 int err;
1876
1877 if (done)
1878 return 0;
1879
1880 msg.skb = skb;
1881 msg.portid = NETLINK_CB(cb->skb).portid;
1882 msg.seq = cb->nlh->nlmsg_seq;
1883
1884 rcu_read_lock();
Richard Alpe7be57fc2014-11-20 10:29:12 +01001885 if (prev_node) {
Ying Xuef2f98002015-01-09 15:27:05 +08001886 node = tipc_node_find(net, prev_node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001887 if (!node) {
1888 /* We never set seq or call nl_dump_check_consistent()
1889 * this means that setting prev_seq here will cause the
1890 * consistence check to fail in the netlink callback
1891 * handler. Resulting in the last NLMSG_DONE message
1892 * having the NLM_F_DUMP_INTR flag set.
1893 */
1894 cb->prev_seq = 1;
1895 goto out;
1896 }
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001897 tipc_node_put(node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001898
Ying Xuef2f98002015-01-09 15:27:05 +08001899 list_for_each_entry_continue_rcu(node, &tn->node_list,
1900 list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01001901 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08001902 err = __tipc_nl_add_node_links(net, &msg, node,
1903 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001904 tipc_node_unlock(node);
1905 if (err)
1906 goto out;
1907
1908 prev_node = node->addr;
1909 }
1910 } else {
Ying Xue1da46562015-01-09 15:27:07 +08001911 err = tipc_nl_add_bc_link(net, &msg);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001912 if (err)
1913 goto out;
1914
Ying Xuef2f98002015-01-09 15:27:05 +08001915 list_for_each_entry_rcu(node, &tn->node_list, list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01001916 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08001917 err = __tipc_nl_add_node_links(net, &msg, node,
1918 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001919 tipc_node_unlock(node);
1920 if (err)
1921 goto out;
1922
1923 prev_node = node->addr;
1924 }
1925 }
1926 done = 1;
1927out:
1928 rcu_read_unlock();
1929
1930 cb->args[0] = prev_node;
1931 cb->args[1] = prev_link;
1932 cb->args[2] = done;
1933
1934 return skb->len;
1935}
1936
1937int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
1938{
Ying Xuef2f98002015-01-09 15:27:05 +08001939 struct net *net = genl_info_net(info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001940 struct tipc_nl_msg msg;
Richard Alpe7be57fc2014-11-20 10:29:12 +01001941 char *name;
Richard Alpe7be57fc2014-11-20 10:29:12 +01001942 int err;
1943
Richard Alpe7be57fc2014-11-20 10:29:12 +01001944 msg.portid = info->snd_portid;
1945 msg.seq = info->snd_seq;
1946
Richard Alpe670f4f82015-05-06 13:58:55 +02001947 if (!info->attrs[TIPC_NLA_LINK_NAME])
1948 return -EINVAL;
1949 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
1950
1951 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1952 if (!msg.skb)
1953 return -ENOMEM;
1954
1955 if (strcmp(name, tipc_bclink_name) == 0) {
1956 err = tipc_nl_add_bc_link(net, &msg);
1957 if (err) {
1958 nlmsg_free(msg.skb);
1959 return err;
1960 }
1961 } else {
1962 int bearer_id;
1963 struct tipc_node *node;
1964 struct tipc_link *link;
1965
1966 node = tipc_link_find_owner(net, name, &bearer_id);
1967 if (!node)
1968 return -EINVAL;
1969
1970 tipc_node_lock(node);
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001971 link = node->links[bearer_id].link;
Richard Alpe670f4f82015-05-06 13:58:55 +02001972 if (!link) {
1973 tipc_node_unlock(node);
1974 nlmsg_free(msg.skb);
1975 return -EINVAL;
1976 }
1977
1978 err = __tipc_nl_add_link(net, &msg, link, 0);
1979 tipc_node_unlock(node);
1980 if (err) {
1981 nlmsg_free(msg.skb);
1982 return err;
1983 }
Richard Alpe7be57fc2014-11-20 10:29:12 +01001984 }
1985
Richard Alpe670f4f82015-05-06 13:58:55 +02001986 return genlmsg_reply(msg.skb, info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001987}
Richard Alpeae363422014-11-20 10:29:14 +01001988
1989int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
1990{
1991 int err;
1992 char *link_name;
1993 unsigned int bearer_id;
1994 struct tipc_link *link;
1995 struct tipc_node *node;
1996 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe18178772015-02-09 09:50:09 +01001997 struct net *net = sock_net(skb->sk);
Richard Alpeae363422014-11-20 10:29:14 +01001998
1999 if (!info->attrs[TIPC_NLA_LINK])
2000 return -EINVAL;
2001
2002 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2003 info->attrs[TIPC_NLA_LINK],
2004 tipc_nl_link_policy);
2005 if (err)
2006 return err;
2007
2008 if (!attrs[TIPC_NLA_LINK_NAME])
2009 return -EINVAL;
2010
2011 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2012
2013 if (strcmp(link_name, tipc_bclink_name) == 0) {
Ying Xue1da46562015-01-09 15:27:07 +08002014 err = tipc_bclink_reset_stats(net);
Richard Alpeae363422014-11-20 10:29:14 +01002015 if (err)
2016 return err;
2017 return 0;
2018 }
2019
Ying Xuef2f98002015-01-09 15:27:05 +08002020 node = tipc_link_find_owner(net, link_name, &bearer_id);
Richard Alpeae363422014-11-20 10:29:14 +01002021 if (!node)
2022 return -EINVAL;
2023
2024 tipc_node_lock(node);
2025
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04002026 link = node->links[bearer_id].link;
Richard Alpeae363422014-11-20 10:29:14 +01002027 if (!link) {
2028 tipc_node_unlock(node);
2029 return -EINVAL;
2030 }
2031
2032 link_reset_statistics(link);
2033
2034 tipc_node_unlock(node);
2035
2036 return 0;
2037}