blob: 35a2da688db12d7cf649c65243c1744f36e034f5 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -04004 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010045
Ying Xue796c75d2013-06-17 10:54:48 -040046#include <linux/pkt_sched.h>
47
Erik Hugne2cf8aa12012-06-29 00:16:37 -040048/*
49 * Error message prefixes
50 */
51static const char *link_co_err = "Link changeover error, ";
52static const char *link_rst_msg = "Resetting link ";
53static const char *link_unk_evt = "Unknown link event ";
Per Lidenb97bf3f2006-01-02 19:04:38 +010054
Richard Alpe7be57fc2014-11-20 10:29:12 +010055static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
58 .type = NLA_STRING,
59 .len = TIPC_MAX_LINK_NAME
60 },
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
69};
70
Richard Alpe0655f6a2014-11-20 10:29:07 +010071/* Properties valid for media, bearar and link */
72static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
77};
78
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +090079/*
Allan Stephensa686e682008-06-04 17:29:39 -070080 * Out-of-range value for link session numbers
81 */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040082#define WILDCARD_SESSION 0x10000
Allan Stephensa686e682008-06-04 17:29:39 -070083
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040084/* State value stored in 'failover_pkts'
Per Lidenb97bf3f2006-01-02 19:04:38 +010085 */
Jon Paul Maloydff29b12015-04-02 09:33:01 -040086#define FIRST_FAILOVER 0xffffu
Per Lidenb97bf3f2006-01-02 19:04:38 +010087
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040088/* Link FSM states and events:
89 */
90enum {
91 WORKING_WORKING,
92 WORKING_UNKNOWN,
93 RESET_RESET,
94 RESET_UNKNOWN
95};
96
97enum {
98 PEER_RESET_EVT = RESET_MSG,
99 ACTIVATE_EVT = ACTIVATE_MSG,
100 TRAFFIC_EVT, /* Any other valid msg from peer */
101 SILENCE_EVT /* Peer was silent during last timer interval*/
102};
103
104/* Link FSM state checking routines
105 */
106static int link_working_working(struct tipc_link *l)
107{
108 return l->state == WORKING_WORKING;
109}
110
111static int link_working_unknown(struct tipc_link *l)
112{
113 return l->state == WORKING_UNKNOWN;
114}
115
116static int link_reset_unknown(struct tipc_link *l)
117{
118 return l->state == RESET_UNKNOWN;
119}
120
121static int link_reset_reset(struct tipc_link *l)
122{
123 return l->state == RESET_RESET;
124}
125
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500126static void link_handle_out_of_seq_msg(struct tipc_link *link,
127 struct sk_buff *skb);
128static void tipc_link_proto_rcv(struct tipc_link *link,
129 struct sk_buff *skb);
Ying Xue2f55c432015-01-09 15:27:00 +0800130static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500131static void link_state_event(struct tipc_link *l_ptr, u32 event);
132static void link_reset_statistics(struct tipc_link *l_ptr);
133static void link_print(struct tipc_link *l_ptr, const char *str);
Ying Xue247f0f32014-02-18 16:06:46 +0800134static void tipc_link_sync_xmit(struct tipc_link *l);
135static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500136static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
137static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400138static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400139static void link_set_timer(struct tipc_link *link, unsigned long time);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100140/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800141 * Simple link routines
Per Lidenb97bf3f2006-01-02 19:04:38 +0100142 */
Sam Ravnborg05790c62006-03-20 22:37:04 -0800143static unsigned int align(unsigned int i)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100144{
145 return (i + 3) & ~3u;
146}
147
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500148static void tipc_link_release(struct kref *kref)
149{
150 kfree(container_of(kref, struct tipc_link, ref));
151}
152
153static void tipc_link_get(struct tipc_link *l_ptr)
154{
155 kref_get(&l_ptr->ref);
156}
157
158static void tipc_link_put(struct tipc_link *l_ptr)
159{
160 kref_put(&l_ptr->ref, tipc_link_release);
161}
162
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400163static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
164{
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400165 struct tipc_node *n = l->owner;
166
167 if (node_active_link(n, 0) != l)
168 return node_active_link(n, 0);
169 return node_active_link(n, 1);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400170}
171
Per Lidenb97bf3f2006-01-02 19:04:38 +0100172/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800173 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100174 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500175int tipc_link_is_up(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100176{
177 if (!l_ptr)
178 return 0;
Eric Dumazeta02cec22010-09-22 20:43:57 +0000179 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100180}
181
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400182int tipc_link_is_active(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100183{
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400184 struct tipc_node *n = l->owner;
185
186 return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100187}
188
189/**
Per Lidenb97bf3f2006-01-02 19:04:38 +0100190 * link_timeout - handle expiration of link timer
191 * @l_ptr: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +0100192 */
Ying Xue2f55c432015-01-09 15:27:00 +0800193static void link_timeout(unsigned long data)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100194{
Ying Xue2f55c432015-01-09 15:27:00 +0800195 struct tipc_link *l_ptr = (struct tipc_link *)data;
Ying Xue58dc55f2014-11-26 11:41:52 +0800196 struct sk_buff *skb;
197
Per Liden4323add2006-01-18 00:38:21 +0100198 tipc_node_lock(l_ptr->owner);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100199
200 /* update counters used in statistical profiling of send traffic */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400201 l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100202 l_ptr->stats.queue_sz_counts++;
203
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400204 skb = skb_peek(&l_ptr->transmq);
Ying Xue58dc55f2014-11-26 11:41:52 +0800205 if (skb) {
206 struct tipc_msg *msg = buf_msg(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100207 u32 length = msg_size(msg);
208
Joe Perchesf64f9e72009-11-29 16:55:45 -0800209 if ((msg_user(msg) == MSG_FRAGMENTER) &&
210 (msg_type(msg) == FIRST_FRAGMENT)) {
Per Lidenb97bf3f2006-01-02 19:04:38 +0100211 length = msg_size(msg_get_wrapped(msg));
212 }
213 if (length) {
214 l_ptr->stats.msg_lengths_total += length;
215 l_ptr->stats.msg_length_counts++;
216 if (length <= 64)
217 l_ptr->stats.msg_length_profile[0]++;
218 else if (length <= 256)
219 l_ptr->stats.msg_length_profile[1]++;
220 else if (length <= 1024)
221 l_ptr->stats.msg_length_profile[2]++;
222 else if (length <= 4096)
223 l_ptr->stats.msg_length_profile[3]++;
224 else if (length <= 16384)
225 l_ptr->stats.msg_length_profile[4]++;
226 else if (length <= 32768)
227 l_ptr->stats.msg_length_profile[5]++;
228 else
229 l_ptr->stats.msg_length_profile[6]++;
230 }
231 }
232
233 /* do all other link processing performed on a periodic basis */
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400234 if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
235 link_state_event(l_ptr, SILENCE_EVT);
236 l_ptr->silent_intv_cnt++;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400237 if (skb_queue_len(&l_ptr->backlogq))
Ying Xue47b4c9a2014-11-26 11:41:48 +0800238 tipc_link_push_packets(l_ptr);
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400239 link_set_timer(l_ptr, l_ptr->keepalive_intv);
Per Liden4323add2006-01-18 00:38:21 +0100240 tipc_node_unlock(l_ptr->owner);
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500241 tipc_link_put(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100242}
243
Ying Xue2f55c432015-01-09 15:27:00 +0800244static void link_set_timer(struct tipc_link *link, unsigned long time)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100245{
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500246 if (!mod_timer(&link->timer, jiffies + time))
247 tipc_link_get(link);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100248}
249
250/**
Per Liden4323add2006-01-18 00:38:21 +0100251 * tipc_link_create - create a new link
Allan Stephens37b9c082011-02-28 11:32:27 -0500252 * @n_ptr: pointer to associated node
Per Lidenb97bf3f2006-01-02 19:04:38 +0100253 * @b_ptr: pointer to associated bearer
Per Lidenb97bf3f2006-01-02 19:04:38 +0100254 * @media_addr: media address to use when sending messages over link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900255 *
Per Lidenb97bf3f2006-01-02 19:04:38 +0100256 * Returns pointer to link.
257 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500258struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
Ying Xuec61dd612014-02-13 17:29:09 -0500259 struct tipc_bearer *b_ptr,
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400260 const struct tipc_media_addr *media_addr,
261 struct sk_buff_head *inputq,
262 struct sk_buff_head *namedq)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100263{
Ying Xue34747532015-01-09 15:27:10 +0800264 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500265 struct tipc_link *l_ptr;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100266 struct tipc_msg *msg;
267 char *if_name;
Allan Stephens37b9c082011-02-28 11:32:27 -0500268 char addr_string[16];
269 u32 peer = n_ptr->addr;
270
Holger Brunck0372bf52014-11-14 18:33:19 +0100271 if (n_ptr->link_cnt >= MAX_BEARERS) {
Allan Stephens37b9c082011-02-28 11:32:27 -0500272 tipc_addr_string_fill(addr_string, n_ptr->addr);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400273 pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
274 n_ptr->link_cnt, addr_string, MAX_BEARERS);
Allan Stephens37b9c082011-02-28 11:32:27 -0500275 return NULL;
276 }
277
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400278 if (n_ptr->links[b_ptr->identity].link) {
Allan Stephens37b9c082011-02-28 11:32:27 -0500279 tipc_addr_string_fill(addr_string, n_ptr->addr);
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400280 pr_err("Attempt to establish second link on <%s> to %s\n",
281 b_ptr->name, addr_string);
Allan Stephens37b9c082011-02-28 11:32:27 -0500282 return NULL;
283 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100284
Panagiotis Issaris0da974f2006-07-21 14:51:30 -0700285 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100286 if (!l_ptr) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400287 pr_warn("Link creation failed, no memory\n");
Per Lidenb97bf3f2006-01-02 19:04:38 +0100288 return NULL;
289 }
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500290 kref_init(&l_ptr->ref);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100291 l_ptr->addr = peer;
Allan Stephens2d627b92011-01-07 13:00:11 -0500292 if_name = strchr(b_ptr->name, ':') + 1;
Allan Stephens062b4c92011-04-07 09:28:47 -0400293 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
Ying Xue34747532015-01-09 15:27:10 +0800294 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
295 tipc_node(tn->own_addr),
Per Lidenb97bf3f2006-01-02 19:04:38 +0100296 if_name,
297 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
Allan Stephens062b4c92011-04-07 09:28:47 -0400298 /* note: peer i/f name is updated by reset/activate message */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100299 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
Allan Stephens37b9c082011-02-28 11:32:27 -0500300 l_ptr->owner = n_ptr;
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400301 l_ptr->peer_session = WILDCARD_SESSION;
Ying Xue7a2f7d12014-04-21 10:55:46 +0800302 l_ptr->bearer_id = b_ptr->identity;
Allan Stephens5c216e12011-10-18 11:34:29 -0400303 link_set_supervision_props(l_ptr, b_ptr->tolerance);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100304 l_ptr->state = RESET_UNKNOWN;
305
306 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
307 msg = l_ptr->pmsg;
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500308 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
Ying Xue34747532015-01-09 15:27:10 +0800309 l_ptr->addr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100310 msg_set_size(msg, sizeof(l_ptr->proto_msg));
Ying Xuebafa29e2015-01-09 15:27:12 +0800311 msg_set_session(msg, (tn->random & 0xffff));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100312 msg_set_bearer_id(msg, b_ptr->identity);
313 strcpy((char *)msg_data(msg), if_name);
Ying Xue7a2f7d12014-04-21 10:55:46 +0800314 l_ptr->net_plane = b_ptr->net_plane;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400315 l_ptr->advertised_mtu = b_ptr->mtu;
316 l_ptr->mtu = l_ptr->advertised_mtu;
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -0400317 l_ptr->priority = b_ptr->priority;
318 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400319 l_ptr->snd_nxt = 1;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400320 __skb_queue_head_init(&l_ptr->transmq);
321 __skb_queue_head_init(&l_ptr->backlogq);
322 __skb_queue_head_init(&l_ptr->deferdq);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500323 skb_queue_head_init(&l_ptr->wakeupq);
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400324 l_ptr->inputq = inputq;
325 l_ptr->namedq = namedq;
326 skb_queue_head_init(l_ptr->inputq);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100327 link_reset_statistics(l_ptr);
Allan Stephens37b9c082011-02-28 11:32:27 -0500328 tipc_node_attach_link(n_ptr, l_ptr);
Ying Xue2f55c432015-01-09 15:27:00 +0800329 setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400330 link_set_timer(l_ptr, l_ptr->keepalive_intv);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100331 return l_ptr;
332}
333
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500334/**
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400335 * tipc_link_delete - Delete a link
336 * @l: link to be deleted
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500337 */
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400338void tipc_link_delete(struct tipc_link *l)
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500339{
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400340 tipc_link_reset(l);
341 if (del_timer(&l->timer))
342 tipc_link_put(l);
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400343 /* Delete link now, or when timer is finished: */
344 tipc_link_reset_fragments(l);
345 tipc_node_detach_link(l->owner, l);
346 tipc_link_put(l);
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500347}
348
Jon Paul Maloyb1c29f62015-05-14 10:46:11 -0400349void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
Ying Xue8d8439b2014-02-13 17:29:07 -0500350{
Ying Xuef2f98002015-01-09 15:27:05 +0800351 struct tipc_net *tn = net_generic(net, tipc_net_id);
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500352 struct tipc_link *link;
353 struct tipc_node *node;
Ying Xue8d8439b2014-02-13 17:29:07 -0500354
Ying Xue6c7a7622014-03-27 12:54:37 +0800355 rcu_read_lock();
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500356 list_for_each_entry_rcu(node, &tn->node_list, list) {
357 tipc_node_lock(node);
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400358 link = node->links[bearer_id].link;
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400359 if (link)
Jon Paul Maloy2d72d492015-02-03 08:59:17 -0500360 tipc_link_delete(link);
361 tipc_node_unlock(node);
Ying Xue8d8439b2014-02-13 17:29:07 -0500362 }
Ying Xue6c7a7622014-03-27 12:54:37 +0800363 rcu_read_unlock();
Ying Xue8d8439b2014-02-13 17:29:07 -0500364}
Per Lidenb97bf3f2006-01-02 19:04:38 +0100365
366/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400367 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400368 * @link: congested link
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400369 * @list: message that was attempted sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400370 * Create pseudo msg to send back to user when congestion abates
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400371 * Does not consume buffer list
Per Lidenb97bf3f2006-01-02 19:04:38 +0100372 */
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400373static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100374{
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400375 struct tipc_msg *msg = buf_msg(skb_peek(list));
376 int imp = msg_importance(msg);
377 u32 oport = msg_origport(msg);
378 u32 addr = link_own_addr(link);
379 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100380
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400381 /* This really cannot happen... */
382 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
383 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400384 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400385 }
386 /* Non-blocking sender: */
387 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
388 return -ELINKCONG;
389
390 /* Create and schedule wakeup pseudo message */
391 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
392 addr, addr, oport, 0, 0);
393 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400394 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400395 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
396 TIPC_SKB_CB(skb)->chain_imp = imp;
397 skb_queue_tail(&link->wakeupq, skb);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400398 link->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400399 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100400}
401
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400402/**
403 * link_prepare_wakeup - prepare users for wakeup after congestion
404 * @link: congested link
405 * Move a number of waiting users, as permitted by available space in
406 * the send queue, from link wait queue to node wait queue for wakeup
407 */
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400408void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100409{
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400410 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
411 int imp, lim;
Ying Xue58d78b32014-11-26 11:41:51 +0800412 struct sk_buff *skb, *tmp;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100413
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400414 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
415 imp = TIPC_SKB_CB(skb)->chain_imp;
416 lim = l->window + l->backlog[imp].limit;
417 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
418 if ((pnd[imp] + l->backlog[imp].len) >= lim)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100419 break;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400420 skb_unlink(skb, &l->wakeupq);
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400421 skb_queue_tail(l->inputq, skb);
422 l->owner->inputq = l->inputq;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400423 l->owner->action_flags |= TIPC_MSG_EVT;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100424 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100425}
426
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900427/**
Per Liden4323add2006-01-18 00:38:21 +0100428 * tipc_link_reset_fragments - purge link's inbound message fragments queue
Per Lidenb97bf3f2006-01-02 19:04:38 +0100429 * @l_ptr: pointer to link
430 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500431void tipc_link_reset_fragments(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100432{
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400433 kfree_skb(l_ptr->reasm_buf);
434 l_ptr->reasm_buf = NULL;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100435}
436
Jon Paul Maloy7d967b62015-06-28 09:44:44 -0400437void tipc_link_purge_backlog(struct tipc_link *l)
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400438{
439 __skb_queue_purge(&l->backlogq);
440 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
441 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
442 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
443 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
444 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
445}
446
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900447/**
Jon Paul Maloy581465f2014-01-07 17:02:44 -0500448 * tipc_link_purge_queues - purge all pkt queues associated with link
Per Lidenb97bf3f2006-01-02 19:04:38 +0100449 * @l_ptr: pointer to link
450 */
Jon Paul Maloy581465f2014-01-07 17:02:44 -0500451void tipc_link_purge_queues(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100452{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400453 __skb_queue_purge(&l_ptr->deferdq);
454 __skb_queue_purge(&l_ptr->transmq);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400455 tipc_link_purge_backlog(l_ptr);
Per Liden4323add2006-01-18 00:38:21 +0100456 tipc_link_reset_fragments(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100457}
458
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500459void tipc_link_reset(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100460{
Per Lidenb97bf3f2006-01-02 19:04:38 +0100461 u32 prev_state = l_ptr->state;
Allan Stephens5392d642006-06-25 23:52:50 -0700462 int was_active_link = tipc_link_is_active(l_ptr);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400463 struct tipc_node *owner = l_ptr->owner;
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400464 struct tipc_link *pl = tipc_parallel_link(l_ptr);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900465
Allan Stephensa686e682008-06-04 17:29:39 -0700466 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100467
Allan Stephensa686e682008-06-04 17:29:39 -0700468 /* Link is down, accept any session */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400469 l_ptr->peer_session = WILDCARD_SESSION;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100470
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400471 /* Prepare for renewed mtu size negotiation */
472 l_ptr->mtu = l_ptr->advertised_mtu;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900473
Per Lidenb97bf3f2006-01-02 19:04:38 +0100474 l_ptr->state = RESET_UNKNOWN;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100475
476 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
477 return;
478
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400479 tipc_node_link_down(l_ptr->owner, l_ptr->bearer_id);
Ying Xue7f9f95d2015-01-09 15:27:06 +0800480 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
Paul Gortmaker7368ddf2010-10-12 14:25:58 +0000481
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400482 if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400483 l_ptr->exec_mode = TIPC_LINK_BLOCKED;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400484 l_ptr->failover_checkpt = l_ptr->rcv_nxt;
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400485 pl->failover_pkts = FIRST_FAILOVER;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400486 pl->failover_checkpt = l_ptr->rcv_nxt;
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400487 pl->failover_skb = l_ptr->reasm_buf;
488 } else {
489 kfree_skb(l_ptr->reasm_buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100490 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500491 /* Clean up all queues, except inputq: */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400492 __skb_queue_purge(&l_ptr->transmq);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400493 __skb_queue_purge(&l_ptr->deferdq);
Jon Paul Maloye6441ba2015-03-09 16:16:22 -0400494 if (!owner->inputq)
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400495 owner->inputq = l_ptr->inputq;
Jon Paul Maloye6441ba2015-03-09 16:16:22 -0400496 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
497 if (!skb_queue_empty(owner->inputq))
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500498 owner->action_flags |= TIPC_MSG_EVT;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400499 tipc_link_purge_backlog(l_ptr);
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400500 l_ptr->reasm_buf = NULL;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400501 l_ptr->rcv_unacked = 0;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400502 l_ptr->snd_nxt = 1;
503 l_ptr->silent_intv_cnt = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100504 l_ptr->stale_count = 0;
505 link_reset_statistics(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100506}
507
Ying Xue7f9f95d2015-01-09 15:27:06 +0800508static void link_activate(struct tipc_link *link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100509{
Ying Xue7f9f95d2015-01-09 15:27:06 +0800510 struct tipc_node *node = link->owner;
511
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400512 link->rcv_nxt = 1;
Ying Xue7f9f95d2015-01-09 15:27:06 +0800513 link->stats.recv_info = 1;
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400514 link->silent_intv_cnt = 0;
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400515 tipc_node_link_up(node, link->bearer_id);
Ying Xue7f9f95d2015-01-09 15:27:06 +0800516 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100517}
518
519/**
520 * link_state_event - link finite state machine
521 * @l_ptr: pointer to link
522 * @event: state machine event to process
523 */
Eric Dumazet95c96172012-04-15 05:58:06 +0000524static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100525{
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500526 struct tipc_link *other;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100527
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400528 if (l_ptr->exec_mode == TIPC_LINK_BLOCKED)
Ying Xue77a7e072013-12-10 20:45:44 -0800529 return;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100530
531 switch (l_ptr->state) {
532 case WORKING_WORKING:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100533 switch (event) {
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400534 case TRAFFIC_EVT:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100535 case ACTIVATE_MSG:
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400536 l_ptr->silent_intv_cnt = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100537 break;
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400538 case SILENCE_EVT:
539 if (!l_ptr->silent_intv_cnt) {
540 if (tipc_bclink_acks_missing(l_ptr->owner))
Ying Xue247f0f32014-02-18 16:06:46 +0800541 tipc_link_proto_xmit(l_ptr, STATE_MSG,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400542 0, 0, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100543 break;
544 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100545 l_ptr->state = WORKING_UNKNOWN;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400546 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100547 break;
548 case RESET_MSG:
Erik Hugne3fa9cac2015-01-22 17:10:31 +0100549 pr_debug("%s<%s>, requested by peer\n",
550 link_rst_msg, l_ptr->name);
Per Liden4323add2006-01-18 00:38:21 +0100551 tipc_link_reset(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100552 l_ptr->state = RESET_RESET;
Ying Xue247f0f32014-02-18 16:06:46 +0800553 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400554 0, 0, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100555 break;
556 default:
Erik Hugne3fa9cac2015-01-22 17:10:31 +0100557 pr_debug("%s%u in WW state\n", link_unk_evt, event);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100558 }
559 break;
560 case WORKING_UNKNOWN:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100561 switch (event) {
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400562 case TRAFFIC_EVT:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100563 case ACTIVATE_MSG:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100564 l_ptr->state = WORKING_WORKING;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400565 l_ptr->silent_intv_cnt = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100566 break;
567 case RESET_MSG:
Erik Hugne3fa9cac2015-01-22 17:10:31 +0100568 pr_debug("%s<%s>, requested by peer while probing\n",
569 link_rst_msg, l_ptr->name);
Per Liden4323add2006-01-18 00:38:21 +0100570 tipc_link_reset(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100571 l_ptr->state = RESET_RESET;
Ying Xue247f0f32014-02-18 16:06:46 +0800572 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400573 0, 0, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100574 break;
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400575 case SILENCE_EVT:
576 if (!l_ptr->silent_intv_cnt) {
Per Lidenb97bf3f2006-01-02 19:04:38 +0100577 l_ptr->state = WORKING_WORKING;
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400578 if (tipc_bclink_acks_missing(l_ptr->owner))
Ying Xue247f0f32014-02-18 16:06:46 +0800579 tipc_link_proto_xmit(l_ptr, STATE_MSG,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400580 0, 0, 0, 0);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400581 } else if (l_ptr->silent_intv_cnt <
582 l_ptr->abort_limit) {
Ying Xue247f0f32014-02-18 16:06:46 +0800583 tipc_link_proto_xmit(l_ptr, STATE_MSG,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400584 1, 0, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100585 } else { /* Link has failed */
Erik Hugne3fa9cac2015-01-22 17:10:31 +0100586 pr_debug("%s<%s>, peer not responding\n",
587 link_rst_msg, l_ptr->name);
Per Liden4323add2006-01-18 00:38:21 +0100588 tipc_link_reset(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100589 l_ptr->state = RESET_UNKNOWN;
Ying Xue247f0f32014-02-18 16:06:46 +0800590 tipc_link_proto_xmit(l_ptr, RESET_MSG,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400591 0, 0, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100592 }
593 break;
594 default:
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400595 pr_err("%s%u in WU state\n", link_unk_evt, event);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100596 }
597 break;
598 case RESET_UNKNOWN:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100599 switch (event) {
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400600 case TRAFFIC_EVT:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100601 break;
602 case ACTIVATE_MSG:
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400603 other = node_active_link(l_ptr->owner, 0);
Allan Stephens8d64a5b2010-12-31 18:59:27 +0000604 if (other && link_working_unknown(other))
Per Lidenb97bf3f2006-01-02 19:04:38 +0100605 break;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100606 l_ptr->state = WORKING_WORKING;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100607 link_activate(l_ptr);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400608 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
Jon Maloyc64f7a62012-11-16 13:51:31 +0800609 if (l_ptr->owner->working_links == 1)
Ying Xue247f0f32014-02-18 16:06:46 +0800610 tipc_link_sync_xmit(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100611 break;
612 case RESET_MSG:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100613 l_ptr->state = RESET_RESET;
Ying Xue247f0f32014-02-18 16:06:46 +0800614 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400615 1, 0, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100616 break;
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400617 case SILENCE_EVT:
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400618 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100619 break;
620 default:
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400621 pr_err("%s%u in RU state\n", link_unk_evt, event);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100622 }
623 break;
624 case RESET_RESET:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100625 switch (event) {
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400626 case TRAFFIC_EVT:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100627 case ACTIVATE_MSG:
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400628 other = node_active_link(l_ptr->owner, 0);
Allan Stephens8d64a5b2010-12-31 18:59:27 +0000629 if (other && link_working_unknown(other))
Per Lidenb97bf3f2006-01-02 19:04:38 +0100630 break;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100631 l_ptr->state = WORKING_WORKING;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100632 link_activate(l_ptr);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400633 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
Jon Maloyc64f7a62012-11-16 13:51:31 +0800634 if (l_ptr->owner->working_links == 1)
Ying Xue247f0f32014-02-18 16:06:46 +0800635 tipc_link_sync_xmit(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100636 break;
637 case RESET_MSG:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100638 break;
Jon Paul Maloycd4eee32015-05-14 10:46:16 -0400639 case SILENCE_EVT:
Ying Xue247f0f32014-02-18 16:06:46 +0800640 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400641 0, 0, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100642 break;
643 default:
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400644 pr_err("%s%u in RR state\n", link_unk_evt, event);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100645 }
646 break;
647 default:
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400648 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100649 }
650}
651
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500652/**
Jon Paul Maloy9fbfb8b2014-07-16 20:41:03 -0400653 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500654 * @link: link to use
Ying Xuea6ca1092014-11-26 11:41:55 +0800655 * @list: chain of buffers containing message
656 *
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400657 * Consumes the buffer chain, except when returning an error code,
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400658 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
659 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500660 */
Ying Xue7f9f95d2015-01-09 15:27:06 +0800661int __tipc_link_xmit(struct net *net, struct tipc_link *link,
662 struct sk_buff_head *list)
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500663{
Ying Xuea6ca1092014-11-26 11:41:55 +0800664 struct tipc_msg *msg = buf_msg(skb_peek(list));
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400665 unsigned int maxwin = link->window;
Jon Paul Maloyf21e8972015-05-14 10:46:17 -0400666 unsigned int i, imp = msg_importance(msg);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400667 uint mtu = link->mtu;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400668 u16 ack = mod(link->rcv_nxt - 1);
669 u16 seqno = link->snd_nxt;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -0400670 u16 bc_last_in = link->owner->bclink.last_in;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500671 struct tipc_media_addr *addr = &link->media_addr;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400672 struct sk_buff_head *transmq = &link->transmq;
673 struct sk_buff_head *backlogq = &link->backlogq;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400674 struct sk_buff *skb, *bskb;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500675
Jon Paul Maloyf21e8972015-05-14 10:46:17 -0400676 /* Match msg importance against this and all higher backlog limits: */
677 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
678 if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
679 return link_schedule_user(link, list);
680 }
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400681 if (unlikely(msg_size(msg) > mtu))
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500682 return -EMSGSIZE;
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400683
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400684 /* Prepare each packet for sending, and add to relevant queue: */
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400685 while (skb_queue_len(list)) {
686 skb = skb_peek(list);
Ying Xue58dc55f2014-11-26 11:41:52 +0800687 msg = buf_msg(skb);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400688 msg_set_seqno(msg, seqno);
689 msg_set_ack(msg, ack);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500690 msg_set_bcast_ack(msg, bc_last_in);
691
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400692 if (likely(skb_queue_len(transmq) < maxwin)) {
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400693 __skb_dequeue(list);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400694 __skb_queue_tail(transmq, skb);
695 tipc_bearer_send(net, link->bearer_id, skb, addr);
696 link->rcv_unacked = 0;
697 seqno++;
698 continue;
699 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400700 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
701 kfree_skb(__skb_dequeue(list));
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500702 link->stats.sent_bundled++;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500703 continue;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400704 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400705 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
706 kfree_skb(__skb_dequeue(list));
707 __skb_queue_tail(backlogq, bskb);
708 link->backlog[msg_importance(buf_msg(bskb))].len++;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500709 link->stats.sent_bundled++;
710 link->stats.sent_bundles++;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400711 continue;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500712 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400713 link->backlog[imp].len += skb_queue_len(list);
714 skb_queue_splice_tail_init(list, backlogq);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500715 }
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400716 link->snd_nxt = seqno;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500717 return 0;
718}
719
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400720/**
721 * tipc_link_xmit(): enqueue buffer list according to queue situation
722 * @link: link to use
723 * @list: chain of buffers containing message
724 * @xmitq: returned list of packets to be sent by caller
725 *
726 * Consumes the buffer chain, except when returning -ELINKCONG,
727 * since the caller then may want to make more send attempts.
728 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
729 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
730 */
731int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
732 struct sk_buff_head *xmitq)
733{
734 struct tipc_msg *hdr = buf_msg(skb_peek(list));
735 unsigned int maxwin = l->window;
736 unsigned int i, imp = msg_importance(hdr);
737 unsigned int mtu = l->mtu;
738 u16 ack = l->rcv_nxt - 1;
739 u16 seqno = l->snd_nxt;
740 u16 bc_last_in = l->owner->bclink.last_in;
741 struct sk_buff_head *transmq = &l->transmq;
742 struct sk_buff_head *backlogq = &l->backlogq;
743 struct sk_buff *skb, *_skb, *bskb;
744
745 /* Match msg importance against this and all higher backlog limits: */
746 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
747 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
748 return link_schedule_user(l, list);
749 }
750 if (unlikely(msg_size(hdr) > mtu))
751 return -EMSGSIZE;
752
753 /* Prepare each packet for sending, and add to relevant queue: */
754 while (skb_queue_len(list)) {
755 skb = skb_peek(list);
756 hdr = buf_msg(skb);
757 msg_set_seqno(hdr, seqno);
758 msg_set_ack(hdr, ack);
759 msg_set_bcast_ack(hdr, bc_last_in);
760
761 if (likely(skb_queue_len(transmq) < maxwin)) {
762 _skb = skb_clone(skb, GFP_ATOMIC);
763 if (!_skb)
764 return -ENOBUFS;
765 __skb_dequeue(list);
766 __skb_queue_tail(transmq, skb);
767 __skb_queue_tail(xmitq, _skb);
768 l->rcv_unacked = 0;
769 seqno++;
770 continue;
771 }
772 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
773 kfree_skb(__skb_dequeue(list));
774 l->stats.sent_bundled++;
775 continue;
776 }
777 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
778 kfree_skb(__skb_dequeue(list));
779 __skb_queue_tail(backlogq, bskb);
780 l->backlog[msg_importance(buf_msg(bskb))].len++;
781 l->stats.sent_bundled++;
782 l->stats.sent_bundles++;
783 continue;
784 }
785 l->backlog[imp].len += skb_queue_len(list);
786 skb_queue_splice_tail_init(list, backlogq);
787 }
788 l->snd_nxt = seqno;
789 return 0;
790}
791
Ying Xuea6ca1092014-11-26 11:41:55 +0800792static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
793{
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500794 skb_queue_head_init(list);
Ying Xuea6ca1092014-11-26 11:41:55 +0800795 __skb_queue_tail(list, skb);
796}
797
798static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
799{
800 struct sk_buff_head head;
801
802 skb2list(skb, &head);
Ying Xue7f9f95d2015-01-09 15:27:06 +0800803 return __tipc_link_xmit(link->owner->net, link, &head);
Ying Xuea6ca1092014-11-26 11:41:55 +0800804}
805
Jon Maloyc64f7a62012-11-16 13:51:31 +0800806/*
Ying Xue247f0f32014-02-18 16:06:46 +0800807 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
Jon Maloyc64f7a62012-11-16 13:51:31 +0800808 *
809 * Give a newly added peer node the sequence number where it should
810 * start receiving and acking broadcast packets.
811 *
812 * Called with node locked
813 */
Jon Paul Maloy25b660c2014-07-16 20:40:59 -0400814static void tipc_link_sync_xmit(struct tipc_link *link)
Jon Maloyc64f7a62012-11-16 13:51:31 +0800815{
Ying Xuea6ca1092014-11-26 11:41:55 +0800816 struct sk_buff *skb;
Jon Maloyc64f7a62012-11-16 13:51:31 +0800817 struct tipc_msg *msg;
818
Ying Xuea6ca1092014-11-26 11:41:55 +0800819 skb = tipc_buf_acquire(INT_H_SIZE);
820 if (!skb)
Jon Maloyc64f7a62012-11-16 13:51:31 +0800821 return;
822
Ying Xuea6ca1092014-11-26 11:41:55 +0800823 msg = buf_msg(skb);
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500824 tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
Ying Xue34747532015-01-09 15:27:10 +0800825 INT_H_SIZE, link->addr);
Jon Paul Maloy25b660c2014-07-16 20:40:59 -0400826 msg_set_last_bcast(msg, link->owner->bclink.acked);
Ying Xuea6ca1092014-11-26 11:41:55 +0800827 __tipc_link_xmit_skb(link, skb);
Jon Maloyc64f7a62012-11-16 13:51:31 +0800828}
829
830/*
Ying Xue247f0f32014-02-18 16:06:46 +0800831 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
Jon Maloyc64f7a62012-11-16 13:51:31 +0800832 * Receive the sequence number where we should start receiving and
833 * acking broadcast packets from a newly added peer node, and open
834 * up for reception of such packets.
835 *
836 * Called with node locked
837 */
Ying Xue247f0f32014-02-18 16:06:46 +0800838static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
Jon Maloyc64f7a62012-11-16 13:51:31 +0800839{
840 struct tipc_msg *msg = buf_msg(buf);
841
842 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
843 n->bclink.recv_permitted = true;
844 kfree_skb(buf);
845}
846
847/*
Ying Xue47b4c9a2014-11-26 11:41:48 +0800848 * tipc_link_push_packets - push unsent packets to bearer
849 *
850 * Push out the unsent messages of a link where congestion
851 * has abated. Node is locked.
852 *
853 * Called with node locked
Per Lidenb97bf3f2006-01-02 19:04:38 +0100854 */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400855void tipc_link_push_packets(struct tipc_link *link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100856{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400857 struct sk_buff *skb;
Ying Xue47b4c9a2014-11-26 11:41:48 +0800858 struct tipc_msg *msg;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400859 u16 seqno = link->snd_nxt;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400860 u16 ack = mod(link->rcv_nxt - 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100861
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400862 while (skb_queue_len(&link->transmq) < link->window) {
863 skb = __skb_dequeue(&link->backlogq);
864 if (!skb)
Ying Xue47b4c9a2014-11-26 11:41:48 +0800865 break;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400866 msg = buf_msg(skb);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400867 link->backlog[msg_importance(msg)].len--;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400868 msg_set_ack(msg, ack);
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400869 msg_set_seqno(msg, seqno);
870 seqno = mod(seqno + 1);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400871 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
872 link->rcv_unacked = 0;
873 __skb_queue_tail(&link->transmq, skb);
874 tipc_bearer_send(link->owner->net, link->bearer_id,
875 skb, &link->media_addr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100876 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400877 link->snd_nxt = seqno;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100878}
879
Ying Xue3f5a12b2014-05-05 08:56:17 +0800880void tipc_link_reset_all(struct tipc_node *node)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700881{
Allan Stephensd356eeb2006-06-25 23:40:01 -0700882 char addr_string[16];
883 u32 i;
884
Ying Xue3f5a12b2014-05-05 08:56:17 +0800885 tipc_node_lock(node);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700886
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400887 pr_warn("Resetting all links to %s\n",
Ying Xue3f5a12b2014-05-05 08:56:17 +0800888 tipc_addr_string_fill(addr_string, node->addr));
Allan Stephensd356eeb2006-06-25 23:40:01 -0700889
890 for (i = 0; i < MAX_BEARERS; i++) {
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400891 if (node->links[i].link) {
892 link_print(node->links[i].link, "Resetting link\n");
893 tipc_link_reset(node->links[i].link);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700894 }
895 }
896
Ying Xue3f5a12b2014-05-05 08:56:17 +0800897 tipc_node_unlock(node);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700898}
899
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500900static void link_retransmit_failure(struct tipc_link *l_ptr,
Paul Gortmakerae8509c2013-06-17 10:54:47 -0400901 struct sk_buff *buf)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700902{
903 struct tipc_msg *msg = buf_msg(buf);
Ying Xue1da46562015-01-09 15:27:07 +0800904 struct net *net = l_ptr->owner->net;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700905
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400906 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700907
908 if (l_ptr->addr) {
Allan Stephensd356eeb2006-06-25 23:40:01 -0700909 /* Handle failure on standard link */
Allan Stephens8d64a5b2010-12-31 18:59:27 +0000910 link_print(l_ptr, "Resetting link\n");
Allan Stephensd356eeb2006-06-25 23:40:01 -0700911 tipc_link_reset(l_ptr);
912
913 } else {
Allan Stephensd356eeb2006-06-25 23:40:01 -0700914 /* Handle failure on broadcast link */
David S. Miller6c000552008-09-02 23:38:32 -0700915 struct tipc_node *n_ptr;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700916 char addr_string[16];
917
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400918 pr_info("Msg seq number: %u, ", msg_seqno(msg));
919 pr_cont("Outstanding acks: %lu\n",
920 (unsigned long) TIPC_SKB_CB(buf)->handle);
Jeff Garzik617dbea2006-10-03 16:25:34 -0700921
Ying Xue1da46562015-01-09 15:27:07 +0800922 n_ptr = tipc_bclink_retransmit_to(net);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700923
Allan Stephensc68ca7b2010-05-11 14:30:12 +0000924 tipc_addr_string_fill(addr_string, n_ptr->addr);
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400925 pr_info("Broadcast link info for %s\n", addr_string);
Ying Xue389dd9b2012-11-16 13:51:30 +0800926 pr_info("Reception permitted: %d, Acked: %u\n",
927 n_ptr->bclink.recv_permitted,
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400928 n_ptr->bclink.acked);
929 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
930 n_ptr->bclink.last_in,
931 n_ptr->bclink.oos_state,
932 n_ptr->bclink.last_sent);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700933
Ying Xueb952b2b2015-03-26 18:10:23 +0800934 n_ptr->action_flags |= TIPC_BCAST_RESET;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700935 l_ptr->stale_count = 0;
936 }
937}
938
Ying Xue58dc55f2014-11-26 11:41:52 +0800939void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
Per Liden4323add2006-01-18 00:38:21 +0100940 u32 retransmits)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100941{
942 struct tipc_msg *msg;
943
Ying Xue58dc55f2014-11-26 11:41:52 +0800944 if (!skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700945 return;
946
Ying Xue58dc55f2014-11-26 11:41:52 +0800947 msg = buf_msg(skb);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900948
Erik Hugne512137e2013-12-06 10:08:00 -0500949 /* Detect repeated retransmit failures */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400950 if (l_ptr->last_retransm == msg_seqno(msg)) {
Erik Hugne512137e2013-12-06 10:08:00 -0500951 if (++l_ptr->stale_count > 100) {
Ying Xue58dc55f2014-11-26 11:41:52 +0800952 link_retransmit_failure(l_ptr, skb);
Erik Hugne512137e2013-12-06 10:08:00 -0500953 return;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700954 }
955 } else {
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400956 l_ptr->last_retransm = msg_seqno(msg);
Erik Hugne512137e2013-12-06 10:08:00 -0500957 l_ptr->stale_count = 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100958 }
Allan Stephensd356eeb2006-06-25 23:40:01 -0700959
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400960 skb_queue_walk_from(&l_ptr->transmq, skb) {
961 if (!retransmits)
Ying Xue58dc55f2014-11-26 11:41:52 +0800962 break;
963 msg = buf_msg(skb);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400964 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900965 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
Ying Xue7f9f95d2015-01-09 15:27:06 +0800966 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
967 &l_ptr->media_addr);
Ying Xue3c294cb2012-11-15 11:34:45 +0800968 retransmits--;
969 l_ptr->stats.retransmitted++;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100970 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100971}
972
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400973/* link_synch(): check if all packets arrived before the synch
974 * point have been consumed
975 * Returns true if the parallel links are synched, otherwise false
976 */
977static bool link_synch(struct tipc_link *l)
978{
979 unsigned int post_synch;
980 struct tipc_link *pl;
981
982 pl = tipc_parallel_link(l);
983 if (pl == l)
984 goto synched;
985
986 /* Was last pre-synch packet added to input queue ? */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400987 if (less_eq(pl->rcv_nxt, l->synch_point))
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400988 return false;
989
990 /* Is it still in the input queue ? */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400991 post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400992 if (skb_queue_len(pl->inputq) > post_synch)
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400993 return false;
994synched:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400995 l->exec_mode = TIPC_LINK_OPEN;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400996 return true;
997}
998
Ying Xuef03273f2014-11-26 11:41:54 +0800999static void link_retrieve_defq(struct tipc_link *link,
1000 struct sk_buff_head *list)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001001{
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001002 u16 seq_no;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001003
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001004 if (skb_queue_empty(&link->deferdq))
Ying Xuef03273f2014-11-26 11:41:54 +08001005 return;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001006
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001007 seq_no = buf_seqno(skb_peek(&link->deferdq));
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001008 if (seq_no == link->rcv_nxt)
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001009 skb_queue_splice_tail_init(&link->deferdq, list);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001010}
1011
Allan Stephens85035562008-04-15 19:04:54 -07001012/**
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001013 * tipc_rcv - process TIPC packets/messages arriving from off-node
Ying Xuef2f98002015-01-09 15:27:05 +08001014 * @net: the applicable net namespace
Ying Xuef03273f2014-11-26 11:41:54 +08001015 * @skb: TIPC packet
Ying Xue7a2f7d12014-04-21 10:55:46 +08001016 * @b_ptr: pointer to bearer message arrived on
Allan Stephensb02b69c2010-08-17 11:00:07 +00001017 *
1018 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1019 * structure (i.e. cannot be NULL), but bearer can be inactive.
1020 */
Ying Xuec93d3ba2015-01-09 15:27:04 +08001021void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001022{
Ying Xue34747532015-01-09 15:27:10 +08001023 struct tipc_net *tn = net_generic(net, tipc_net_id);
Ying Xuef03273f2014-11-26 11:41:54 +08001024 struct sk_buff_head head;
1025 struct tipc_node *n_ptr;
1026 struct tipc_link *l_ptr;
1027 struct sk_buff *skb1, *tmp;
1028 struct tipc_msg *msg;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001029 u16 seq_no;
1030 u16 ackd;
Ying Xuef03273f2014-11-26 11:41:54 +08001031 u32 released;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001032
Ying Xuea6ca1092014-11-26 11:41:55 +08001033 skb2list(skb, &head);
Allan Stephens85035562008-04-15 19:04:54 -07001034
Ying Xuef03273f2014-11-26 11:41:54 +08001035 while ((skb = __skb_dequeue(&head))) {
Allan Stephens85035562008-04-15 19:04:54 -07001036 /* Ensure message is well-formed */
Jon Paul Maloycf2157f2015-03-13 16:08:06 -04001037 if (unlikely(!tipc_msg_validate(skb)))
Ying Xue3af390e2013-10-30 11:26:57 +08001038 goto discard;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001039
Allan Stephens85035562008-04-15 19:04:54 -07001040 /* Handle arrival of a non-unicast link message */
Ying Xuef03273f2014-11-26 11:41:54 +08001041 msg = buf_msg(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001042 if (unlikely(msg_non_seq(msg))) {
Allan Stephens1265a022008-06-04 17:32:35 -07001043 if (msg_user(msg) == LINK_CONFIG)
Ying Xuec93d3ba2015-01-09 15:27:04 +08001044 tipc_disc_rcv(net, skb, b_ptr);
Allan Stephens1265a022008-06-04 17:32:35 -07001045 else
Ying Xuec93d3ba2015-01-09 15:27:04 +08001046 tipc_bclink_rcv(net, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001047 continue;
1048 }
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001049
Allan Stephensed33a9c2011-04-05 15:15:04 -04001050 /* Discard unicast link messages destined for another node */
Allan Stephens26008242006-06-25 23:39:31 -07001051 if (unlikely(!msg_short(msg) &&
Ying Xue34747532015-01-09 15:27:10 +08001052 (msg_destnode(msg) != tn->own_addr)))
Ying Xue3af390e2013-10-30 11:26:57 +08001053 goto discard;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001054
Allan Stephens5a68d5e2010-08-17 11:00:16 +00001055 /* Locate neighboring node that sent message */
Ying Xuef2f98002015-01-09 15:27:05 +08001056 n_ptr = tipc_node_find(net, msg_prevnode(msg));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001057 if (unlikely(!n_ptr))
Ying Xue3af390e2013-10-30 11:26:57 +08001058 goto discard;
Allan Stephens85035562008-04-15 19:04:54 -07001059
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001060 tipc_node_lock(n_ptr);
Allan Stephens5a68d5e2010-08-17 11:00:16 +00001061 /* Locate unicast link endpoint that should handle message */
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001062 l_ptr = n_ptr->links[b_ptr->identity].link;
Ying Xue3af390e2013-10-30 11:26:57 +08001063 if (unlikely(!l_ptr))
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001064 goto unlock;
Allan Stephens85035562008-04-15 19:04:54 -07001065
Allan Stephensb4b56102011-05-27 11:00:51 -04001066 /* Verify that communication with node is currently allowed */
Ying Xueaecb9bb2014-05-08 08:54:39 +08001067 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
Ying Xue10f465c2014-05-05 08:56:11 +08001068 msg_user(msg) == LINK_PROTOCOL &&
1069 (msg_type(msg) == RESET_MSG ||
1070 msg_type(msg) == ACTIVATE_MSG) &&
1071 !msg_redundant_link(msg))
Ying Xueaecb9bb2014-05-08 08:54:39 +08001072 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
Allan Stephensb4b56102011-05-27 11:00:51 -04001073
Ying Xue10f465c2014-05-05 08:56:11 +08001074 if (tipc_node_blocked(n_ptr))
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001075 goto unlock;
Allan Stephensb4b56102011-05-27 11:00:51 -04001076
Allan Stephens85035562008-04-15 19:04:54 -07001077 /* Validate message sequence number info */
Allan Stephens85035562008-04-15 19:04:54 -07001078 seq_no = msg_seqno(msg);
1079 ackd = msg_ack(msg);
1080
1081 /* Release acked messages */
Jon Paul Maloy2cdf3912015-03-13 16:08:09 -04001082 if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
Allan Stephens365595912011-10-24 15:26:24 -04001083 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001084
Ying Xue58dc55f2014-11-26 11:41:52 +08001085 released = 0;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001086 skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
1087 if (more(buf_seqno(skb1), ackd))
Ying Xue58dc55f2014-11-26 11:41:52 +08001088 break;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001089 __skb_unlink(skb1, &l_ptr->transmq);
Ying Xue58dc55f2014-11-26 11:41:52 +08001090 kfree_skb(skb1);
1091 released = 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001092 }
Allan Stephens85035562008-04-15 19:04:54 -07001093
1094 /* Try sending any messages link endpoint has pending */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001095 if (unlikely(skb_queue_len(&l_ptr->backlogq)))
Ying Xue47b4c9a2014-11-26 11:41:48 +08001096 tipc_link_push_packets(l_ptr);
Jon Paul Maloya5377832014-02-13 17:29:15 -05001097
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001098 if (released && !skb_queue_empty(&l_ptr->wakeupq))
Jon Paul Maloy50100a52014-08-22 18:09:07 -04001099 link_prepare_wakeup(l_ptr);
Jon Paul Maloya5377832014-02-13 17:29:15 -05001100
Jon Paul Maloya5377832014-02-13 17:29:15 -05001101 /* Process the incoming packet */
Ying Xue3af390e2013-10-30 11:26:57 +08001102 if (unlikely(!link_working_working(l_ptr))) {
1103 if (msg_user(msg) == LINK_PROTOCOL) {
Jon Paul Maloyc5898632015-02-05 08:36:36 -05001104 tipc_link_proto_rcv(l_ptr, skb);
Ying Xuef03273f2014-11-26 11:41:54 +08001105 link_retrieve_defq(l_ptr, &head);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001106 skb = NULL;
1107 goto unlock;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001108 }
Ying Xue3af390e2013-10-30 11:26:57 +08001109
1110 /* Traffic message. Conditionally activate link */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001111 link_state_event(l_ptr, TRAFFIC_EVT);
Ying Xue3af390e2013-10-30 11:26:57 +08001112
1113 if (link_working_working(l_ptr)) {
1114 /* Re-insert buffer in front of queue */
Ying Xuef03273f2014-11-26 11:41:54 +08001115 __skb_queue_head(&head, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001116 skb = NULL;
1117 goto unlock;
Ying Xue3af390e2013-10-30 11:26:57 +08001118 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001119 goto unlock;
Ying Xue3af390e2013-10-30 11:26:57 +08001120 }
1121
1122 /* Link is now in state WORKING_WORKING */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001123 if (unlikely(seq_no != l_ptr->rcv_nxt)) {
Jon Paul Maloyc5898632015-02-05 08:36:36 -05001124 link_handle_out_of_seq_msg(l_ptr, skb);
Ying Xuef03273f2014-11-26 11:41:54 +08001125 link_retrieve_defq(l_ptr, &head);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001126 skb = NULL;
1127 goto unlock;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001128 }
Jon Paul Maloycd4eee32015-05-14 10:46:16 -04001129 l_ptr->silent_intv_cnt = 0;
1130
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001131 /* Synchronize with parallel link if applicable */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001132 if (unlikely((l_ptr->exec_mode == TIPC_LINK_TUNNEL) &&
1133 !msg_dup(msg))) {
Jon Paul Maloy0d699f282015-04-28 16:59:04 -04001134 if (!link_synch(l_ptr))
1135 goto unlock;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001136 }
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001137 l_ptr->rcv_nxt++;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001138 if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
Ying Xuef03273f2014-11-26 11:41:54 +08001139 link_retrieve_defq(l_ptr, &head);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001140 if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
Erik Hugne3f53bd82014-07-01 10:22:41 +02001141 l_ptr->stats.sent_acks++;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001142 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
Erik Hugne3f53bd82014-07-01 10:22:41 +02001143 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001144 tipc_link_input(l_ptr, skb);
1145 skb = NULL;
1146unlock:
Ying Xue3af390e2013-10-30 11:26:57 +08001147 tipc_node_unlock(n_ptr);
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001148 tipc_node_put(n_ptr);
Ying Xue3af390e2013-10-30 11:26:57 +08001149discard:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001150 if (unlikely(skb))
1151 kfree_skb(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001152 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001153}
1154
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001155/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001156 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001157 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001158 * Node lock must be held
1159 */
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001160static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001161{
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001162 struct tipc_node *node = link->owner;
1163 struct tipc_msg *msg = buf_msg(skb);
1164 u32 dport = msg_destport(msg);
Erik Hugne7ae934b2014-07-01 10:22:40 +02001165
1166 switch (msg_user(msg)) {
1167 case TIPC_LOW_IMPORTANCE:
1168 case TIPC_MEDIUM_IMPORTANCE:
1169 case TIPC_HIGH_IMPORTANCE:
1170 case TIPC_CRITICAL_IMPORTANCE:
1171 case CONN_MANAGER:
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -04001172 if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
1173 node->inputq = link->inputq;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001174 node->action_flags |= TIPC_MSG_EVT;
1175 }
1176 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001177 case NAME_DISTRIBUTOR:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001178 node->bclink.recv_permitted = true;
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -04001179 node->namedq = link->namedq;
1180 skb_queue_tail(link->namedq, skb);
1181 if (skb_queue_len(link->namedq) == 1)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001182 node->action_flags |= TIPC_NAMED_MSG_EVT;
1183 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001184 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001185 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001186 case MSG_FRAGMENTER:
1187 case BCAST_PROTOCOL:
1188 return false;
1189 default:
1190 pr_warn("Dropping received illegal msg type\n");
1191 kfree_skb(skb);
1192 return false;
1193 };
1194}
1195
1196/* tipc_link_input - process packet that has passed link protocol check
1197 *
1198 * Consumes buffer
1199 * Node lock must be held
1200 */
1201static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1202{
1203 struct tipc_node *node = link->owner;
1204 struct tipc_msg *msg = buf_msg(skb);
1205 struct sk_buff *iskb;
1206 int pos = 0;
1207
1208 if (likely(tipc_data_input(link, skb)))
1209 return;
1210
1211 switch (msg_user(msg)) {
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001212 case TUNNEL_PROTOCOL:
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001213 if (msg_dup(msg)) {
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001214 link->exec_mode = TIPC_LINK_TUNNEL;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001215 link->synch_point = msg_seqno(msg_get_wrapped(msg));
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001216 kfree_skb(skb);
1217 break;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001218 }
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001219 if (!tipc_link_failover_rcv(link, &skb))
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001220 break;
1221 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1222 tipc_data_input(link, skb);
1223 break;
1224 }
1225 case MSG_BUNDLER:
1226 link->stats.recv_bundles++;
1227 link->stats.recv_bundled += msg_msgcnt(msg);
1228
1229 while (tipc_msg_extract(skb, &iskb, &pos))
1230 tipc_data_input(link, iskb);
1231 break;
1232 case MSG_FRAGMENTER:
1233 link->stats.recv_fragments++;
1234 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1235 link->stats.recv_fragmented++;
1236 tipc_data_input(link, skb);
1237 } else if (!link->reasm_buf) {
1238 tipc_link_reset(link);
1239 }
1240 break;
1241 case BCAST_PROTOCOL:
1242 tipc_link_sync_rcv(node, skb);
Erik Hugne7ae934b2014-07-01 10:22:40 +02001243 break;
1244 default:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001245 break;
1246 };
Erik Hugne7ae934b2014-07-01 10:22:40 +02001247}
1248
1249/**
Allan Stephens8809b252011-10-25 10:44:35 -04001250 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1251 *
1252 * Returns increase in queue length (i.e. 0 or 1)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001253 */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001254u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001255{
Ying Xuebc6fecd2014-11-26 11:41:53 +08001256 struct sk_buff *skb1;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001257 u16 seq_no = buf_seqno(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001258
1259 /* Empty queue ? */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001260 if (skb_queue_empty(list)) {
1261 __skb_queue_tail(list, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001262 return 1;
1263 }
1264
1265 /* Last ? */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001266 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1267 __skb_queue_tail(list, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001268 return 1;
1269 }
1270
Allan Stephens8809b252011-10-25 10:44:35 -04001271 /* Locate insertion point in queue, then insert; discard if duplicate */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001272 skb_queue_walk(list, skb1) {
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001273 u16 curr_seqno = buf_seqno(skb1);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001274
Allan Stephens8809b252011-10-25 10:44:35 -04001275 if (seq_no == curr_seqno) {
Ying Xuebc6fecd2014-11-26 11:41:53 +08001276 kfree_skb(skb);
Allan Stephens8809b252011-10-25 10:44:35 -04001277 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001278 }
Allan Stephens8809b252011-10-25 10:44:35 -04001279
1280 if (less(seq_no, curr_seqno))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001281 break;
Allan Stephens8809b252011-10-25 10:44:35 -04001282 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001283
Ying Xuebc6fecd2014-11-26 11:41:53 +08001284 __skb_queue_before(list, skb1, skb);
Allan Stephens8809b252011-10-25 10:44:35 -04001285 return 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001286}
1287
Allan Stephens8809b252011-10-25 10:44:35 -04001288/*
Per Lidenb97bf3f2006-01-02 19:04:38 +01001289 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1290 */
Jon Paul Maloyc5898632015-02-05 08:36:36 -05001291static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
Per Lidenb97bf3f2006-01-02 19:04:38 +01001292 struct sk_buff *buf)
1293{
Allan Stephensf9057302011-10-24 16:03:12 -04001294 u32 seq_no = buf_seqno(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001295
1296 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
Jon Paul Maloyc5898632015-02-05 08:36:36 -05001297 tipc_link_proto_rcv(l_ptr, buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001298 return;
1299 }
1300
Jon Paul Maloycd4eee32015-05-14 10:46:16 -04001301 /* Record OOS packet arrival */
1302 l_ptr->silent_intv_cnt = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001303
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001304 /*
Per Lidenb97bf3f2006-01-02 19:04:38 +01001305 * Discard packet if a duplicate; otherwise add it to deferred queue
1306 * and notify peer of gap as per protocol specification
1307 */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001308 if (less(seq_no, l_ptr->rcv_nxt)) {
Per Lidenb97bf3f2006-01-02 19:04:38 +01001309 l_ptr->stats.duplicates++;
Allan Stephens5f6d9122011-11-04 13:24:29 -04001310 kfree_skb(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001311 return;
1312 }
1313
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001314 if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
Per Lidenb97bf3f2006-01-02 19:04:38 +01001315 l_ptr->stats.deferred_recv++;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001316 if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001317 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
Ying Xuebc6fecd2014-11-26 11:41:53 +08001318 } else {
Per Lidenb97bf3f2006-01-02 19:04:38 +01001319 l_ptr->stats.duplicates++;
Ying Xuebc6fecd2014-11-26 11:41:53 +08001320 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001321}
1322
1323/*
1324 * Send protocol message to the other endpoint.
1325 */
Ying Xue247f0f32014-02-18 16:06:46 +08001326void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001327 u32 gap, u32 tolerance, u32 priority)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001328{
Sam Ravnborg1fc54d82006-03-20 22:36:47 -08001329 struct sk_buff *buf = NULL;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001330 struct tipc_msg *msg = l_ptr->pmsg;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001331 u32 msg_size = sizeof(l_ptr->proto_msg);
Allan Stephens75f0aa42011-02-28 15:30:20 -05001332 int r_flag;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001333 u16 last_rcv;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001334
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001335 /* Don't send protocol message during link failover */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001336 if (l_ptr->exec_mode == TIPC_LINK_BLOCKED)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001337 return;
Allan Stephensb4b56102011-05-27 11:00:51 -04001338
1339 /* Abort non-RESET send if communication with node is prohibited */
Ying Xue10f465c2014-05-05 08:56:11 +08001340 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
Allan Stephensb4b56102011-05-27 11:00:51 -04001341 return;
1342
Allan Stephens92d2c902011-10-25 11:20:26 -04001343 /* Create protocol message with "out-of-sequence" sequence number */
Per Lidenb97bf3f2006-01-02 19:04:38 +01001344 msg_set_type(msg, msg_typ);
Ying Xue7a2f7d12014-04-21 10:55:46 +08001345 msg_set_net_plane(msg, l_ptr->net_plane);
Allan Stephens7a54d4a2011-10-27 14:17:53 -04001346 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
Ying Xue1da46562015-01-09 15:27:07 +08001347 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001348
1349 if (msg_typ == STATE_MSG) {
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001350 u16 next_sent = l_ptr->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001351
Per Liden4323add2006-01-18 00:38:21 +01001352 if (!tipc_link_is_up(l_ptr))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001353 return;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001354 msg_set_next_sent(msg, next_sent);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001355 if (!skb_queue_empty(&l_ptr->deferdq)) {
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001356 last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001357 gap = mod(last_rcv - l_ptr->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001358 }
1359 msg_set_seq_gap(msg, gap);
1360 if (gap)
1361 l_ptr->stats.sent_nacks++;
1362 msg_set_link_tolerance(msg, tolerance);
1363 msg_set_linkprio(msg, priority);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001364 msg_set_max_pkt(msg, l_ptr->mtu);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001365 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001366 msg_set_probe(msg, probe_msg != 0);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001367 if (probe_msg)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001368 l_ptr->stats.sent_probes++;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001369 l_ptr->stats.sent_states++;
1370 } else { /* RESET_MSG or ACTIVATE_MSG */
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001371 msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001372 msg_set_seq_gap(msg, 0);
1373 msg_set_next_sent(msg, 1);
Allan Stephensf23d9bf2011-01-18 15:15:34 -05001374 msg_set_probe(msg, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001375 msg_set_link_tolerance(msg, l_ptr->tolerance);
1376 msg_set_linkprio(msg, l_ptr->priority);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001377 msg_set_max_pkt(msg, l_ptr->advertised_mtu);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001378 }
1379
Allan Stephens75f0aa42011-02-28 15:30:20 -05001380 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1381 msg_set_redundant_link(msg, r_flag);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001382 msg_set_linkprio(msg, l_ptr->priority);
Allan Stephens92d2c902011-10-25 11:20:26 -04001383 msg_set_size(msg, msg_size);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001384
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001385 msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001386
stephen hemminger31e3c3f2010-10-13 13:20:35 +00001387 buf = tipc_buf_acquire(msg_size);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001388 if (!buf)
1389 return;
1390
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001391 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
Ying Xue796c75d2013-06-17 10:54:48 -04001392 buf->priority = TC_PRIO_CONTROL;
Ying Xue7f9f95d2015-01-09 15:27:06 +08001393 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1394 &l_ptr->media_addr);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001395 l_ptr->rcv_unacked = 0;
Allan Stephens5f6d9122011-11-04 13:24:29 -04001396 kfree_skb(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001397}
1398
1399/*
1400 * Receive protocol message :
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001401 * Note that network plane id propagates through the network, and may
1402 * change at any time. The node with lowest address rules
Per Lidenb97bf3f2006-01-02 19:04:38 +01001403 */
Jon Paul Maloyc5898632015-02-05 08:36:36 -05001404static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
Ying Xuec93d3ba2015-01-09 15:27:04 +08001405 struct sk_buff *buf)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001406{
1407 u32 rec_gap = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001408 u32 msg_tol;
1409 struct tipc_msg *msg = buf_msg(buf);
1410
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001411 if (l_ptr->exec_mode == TIPC_LINK_BLOCKED)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001412 goto exit;
1413
Ying Xue7a2f7d12014-04-21 10:55:46 +08001414 if (l_ptr->net_plane != msg_net_plane(msg))
Jon Paul Maloyc5898632015-02-05 08:36:36 -05001415 if (link_own_addr(l_ptr) > msg_prevnode(msg))
Ying Xue7a2f7d12014-04-21 10:55:46 +08001416 l_ptr->net_plane = msg_net_plane(msg);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001417
Per Lidenb97bf3f2006-01-02 19:04:38 +01001418 switch (msg_type(msg)) {
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001419
Per Lidenb97bf3f2006-01-02 19:04:38 +01001420 case RESET_MSG:
Allan Stephensa686e682008-06-04 17:29:39 -07001421 if (!link_working_unknown(l_ptr) &&
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001422 (l_ptr->peer_session != WILDCARD_SESSION)) {
Allan Stephens641c2182011-04-07 09:54:43 -04001423 if (less_eq(msg_session(msg), l_ptr->peer_session))
1424 break; /* duplicate or old reset: ignore */
Per Lidenb97bf3f2006-01-02 19:04:38 +01001425 }
Allan Stephensb4b56102011-05-27 11:00:51 -04001426
1427 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1428 link_working_unknown(l_ptr))) {
1429 /*
1430 * peer has lost contact -- don't allow peer's links
1431 * to reactivate before we recognize loss & clean up
1432 */
Ying Xueca9cf062014-05-08 08:54:40 +08001433 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
Allan Stephensb4b56102011-05-27 11:00:51 -04001434 }
1435
Allan Stephens47361c82011-10-26 10:55:16 -04001436 link_state_event(l_ptr, RESET_MSG);
1437
Per Lidenb97bf3f2006-01-02 19:04:38 +01001438 /* fall thru' */
1439 case ACTIVATE_MSG:
1440 /* Update link settings according other endpoint's values */
Per Lidenb97bf3f2006-01-02 19:04:38 +01001441 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1442
Allan Stephens2db99832010-12-31 18:59:33 +00001443 msg_tol = msg_link_tolerance(msg);
1444 if (msg_tol > l_ptr->tolerance)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001445 link_set_supervision_props(l_ptr, msg_tol);
1446
1447 if (msg_linkprio(msg) > l_ptr->priority)
1448 l_ptr->priority = msg_linkprio(msg);
1449
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001450 if (l_ptr->mtu > msg_max_pkt(msg))
1451 l_ptr->mtu = msg_max_pkt(msg);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001452
Allan Stephens4d753132011-10-25 12:19:05 -04001453 /* Synchronize broadcast link info, if not done previously */
Allan Stephens7a54d4a2011-10-27 14:17:53 -04001454 if (!tipc_node_is_up(l_ptr->owner)) {
1455 l_ptr->owner->bclink.last_sent =
1456 l_ptr->owner->bclink.last_in =
1457 msg_last_bcast(msg);
1458 l_ptr->owner->bclink.oos_state = 0;
1459 }
Allan Stephens4d753132011-10-25 12:19:05 -04001460
Per Lidenb97bf3f2006-01-02 19:04:38 +01001461 l_ptr->peer_session = msg_session(msg);
1462 l_ptr->peer_bearer_id = msg_bearer_id(msg);
Allan Stephens47361c82011-10-26 10:55:16 -04001463
1464 if (msg_type(msg) == ACTIVATE_MSG)
1465 link_state_event(l_ptr, ACTIVATE_MSG);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001466 break;
1467 case STATE_MSG:
1468
Allan Stephens2db99832010-12-31 18:59:33 +00001469 msg_tol = msg_link_tolerance(msg);
1470 if (msg_tol)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001471 link_set_supervision_props(l_ptr, msg_tol);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001472
1473 if (msg_linkprio(msg) &&
Per Lidenb97bf3f2006-01-02 19:04:38 +01001474 (msg_linkprio(msg) != l_ptr->priority)) {
Erik Hugne3fa9cac2015-01-22 17:10:31 +01001475 pr_debug("%s<%s>, priority change %u->%u\n",
1476 link_rst_msg, l_ptr->name,
1477 l_ptr->priority, msg_linkprio(msg));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001478 l_ptr->priority = msg_linkprio(msg);
Per Liden4323add2006-01-18 00:38:21 +01001479 tipc_link_reset(l_ptr); /* Enforce change to take effect */
Per Lidenb97bf3f2006-01-02 19:04:38 +01001480 break;
1481 }
Jon Paul Maloyec37dcd2014-05-14 05:39:10 -04001482
1483 /* Record reception; force mismatch at next timeout: */
Jon Paul Maloycd4eee32015-05-14 10:46:16 -04001484 l_ptr->silent_intv_cnt = 0;
Jon Paul Maloyec37dcd2014-05-14 05:39:10 -04001485
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001486 link_state_event(l_ptr, TRAFFIC_EVT);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001487 l_ptr->stats.recv_states++;
1488 if (link_reset_unknown(l_ptr))
1489 break;
1490
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001491 if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
1492 rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001493
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001494 if (msg_probe(msg))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001495 l_ptr->stats.recv_probes++;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001496
1497 /* Protocol message before retransmits, reduce loss risk */
Ying Xue389dd9b2012-11-16 13:51:30 +08001498 if (l_ptr->owner->bclink.recv_permitted)
Jon Paul Maloyc5898632015-02-05 08:36:36 -05001499 tipc_bclink_update_link_state(l_ptr->owner,
Allan Stephens7a54d4a2011-10-27 14:17:53 -04001500 msg_last_bcast(msg));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001501
1502 if (rec_gap || (msg_probe(msg))) {
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001503 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
1504 rec_gap, 0, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001505 }
1506 if (msg_seq_gap(msg)) {
Per Lidenb97bf3f2006-01-02 19:04:38 +01001507 l_ptr->stats.recv_nacks++;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001508 tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
Per Liden4323add2006-01-18 00:38:21 +01001509 msg_seq_gap(msg));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001510 }
1511 break;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001512 }
1513exit:
Allan Stephens5f6d9122011-11-04 13:24:29 -04001514 kfree_skb(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001515}
1516
1517
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001518/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1519 * a different bearer. Owner node is locked.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001520 */
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001521static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1522 struct tipc_msg *tunnel_hdr,
1523 struct tipc_msg *msg,
1524 u32 selector)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001525{
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001526 struct tipc_link *tunnel;
Ying Xuea6ca1092014-11-26 11:41:55 +08001527 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001528 u32 length = msg_size(msg);
1529
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001530 tunnel = node_active_link(l_ptr->owner, selector & 1);
Allan Stephens5392d642006-06-25 23:52:50 -07001531 if (!tipc_link_is_up(tunnel)) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04001532 pr_warn("%stunnel link no longer available\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001533 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001534 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001535 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
Ying Xuea6ca1092014-11-26 11:41:55 +08001536 skb = tipc_buf_acquire(length + INT_H_SIZE);
1537 if (!skb) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04001538 pr_warn("%sunable to send tunnel msg\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001539 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001540 }
Ying Xuea6ca1092014-11-26 11:41:55 +08001541 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1542 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1543 __tipc_link_xmit_skb(tunnel, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001544}
1545
1546
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001547/* tipc_link_failover_send_queue(): A link has gone down, but a second
1548 * link is still active. We can do failover. Tunnel the failing link's
1549 * whole send queue via the remaining link. This way, we don't lose
1550 * any packets, and sequence order is preserved for subsequent traffic
1551 * sent over the remaining link. Owner node is locked.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001552 */
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001553void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001554{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001555 int msgcount;
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001556 struct tipc_link *tunnel = node_active_link(l_ptr->owner, 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001557 struct tipc_msg tunnel_hdr;
Ying Xue58dc55f2014-11-26 11:41:52 +08001558 struct sk_buff *skb;
Allan Stephens5392d642006-06-25 23:52:50 -07001559 int split_bundles;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001560
1561 if (!tunnel)
1562 return;
1563
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001564 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
1565 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001566
1567 skb_queue_walk(&l_ptr->backlogq, skb) {
1568 msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
1569 l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
1570 }
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001571 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001572 tipc_link_purge_backlog(l_ptr);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001573 msgcount = skb_queue_len(&l_ptr->transmq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001574 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1575 msg_set_msgcnt(&tunnel_hdr, msgcount);
Allan Stephensf1310722006-06-25 23:51:37 -07001576
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001577 if (skb_queue_empty(&l_ptr->transmq)) {
Ying Xue58dc55f2014-11-26 11:41:52 +08001578 skb = tipc_buf_acquire(INT_H_SIZE);
1579 if (skb) {
1580 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001581 msg_set_size(&tunnel_hdr, INT_H_SIZE);
Ying Xuea6ca1092014-11-26 11:41:55 +08001582 __tipc_link_xmit_skb(tunnel, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001583 } else {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04001584 pr_warn("%sunable to send changeover msg\n",
1585 link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001586 }
1587 return;
1588 }
Allan Stephensf1310722006-06-25 23:51:37 -07001589
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001590 split_bundles = (node_active_link(l_ptr->owner, 0) !=
1591 node_active_link(l_ptr->owner, 0));
Allan Stephens5392d642006-06-25 23:52:50 -07001592
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001593 skb_queue_walk(&l_ptr->transmq, skb) {
Ying Xue58dc55f2014-11-26 11:41:52 +08001594 struct tipc_msg *msg = buf_msg(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001595
1596 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
Per Lidenb97bf3f2006-01-02 19:04:38 +01001597 struct tipc_msg *m = msg_get_wrapped(msg);
Allan Stephens0e659672010-12-31 18:59:32 +00001598 unchar *pos = (unchar *)m;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001599
Florian Westphald788d802007-08-02 19:28:06 -07001600 msgcount = msg_msgcnt(msg);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001601 while (msgcount--) {
Allan Stephens0e659672010-12-31 18:59:32 +00001602 msg_set_seqno(m, msg_seqno(msg));
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001603 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1604 msg_link_selector(m));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001605 pos += align(msg_size(m));
1606 m = (struct tipc_msg *)pos;
1607 }
1608 } else {
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001609 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1610 msg_link_selector(msg));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001611 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001612 }
1613}
1614
Ying Xue247f0f32014-02-18 16:06:46 +08001615/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
Jon Paul Maloy170b3922014-01-07 17:02:41 -05001616 * duplicate of the first link's send queue via the new link. This way, we
1617 * are guaranteed that currently queued packets from a socket are delivered
1618 * before future traffic from the same socket, even if this is using the
1619 * new link. The last arriving copy of each duplicate packet is dropped at
1620 * the receiving end by the regular protocol check, so packet cardinality
1621 * and sequence order is preserved per sender/receiver socket pair.
1622 * Owner node is locked.
1623 */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001624void tipc_link_dup_queue_xmit(struct tipc_link *link,
1625 struct tipc_link *tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001626{
Ying Xue58dc55f2014-11-26 11:41:52 +08001627 struct sk_buff *skb;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001628 struct tipc_msg tnl_hdr;
1629 struct sk_buff_head *queue = &link->transmq;
1630 int mcnt;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001631 u16 seqno;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001632
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001633 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
1634 SYNCH_MSG, INT_H_SIZE, link->addr);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001635 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1636 msg_set_msgcnt(&tnl_hdr, mcnt);
1637 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
1638
1639tunnel_queue:
1640 skb_queue_walk(queue, skb) {
Ying Xue58dc55f2014-11-26 11:41:52 +08001641 struct sk_buff *outskb;
1642 struct tipc_msg *msg = buf_msg(skb);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001643 u32 len = msg_size(msg);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001644
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001645 msg_set_ack(msg, mod(link->rcv_nxt - 1));
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001646 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1647 msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1648 outskb = tipc_buf_acquire(len + INT_H_SIZE);
Ying Xue58dc55f2014-11-26 11:41:52 +08001649 if (outskb == NULL) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04001650 pr_warn("%sunable to send duplicate msg\n",
1651 link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001652 return;
1653 }
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001654 skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
1655 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
1656 skb->data, len);
1657 __tipc_link_xmit_skb(tnl, outskb);
1658 if (!tipc_link_is_up(link))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001659 return;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001660 }
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001661 if (queue == &link->backlogq)
1662 return;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001663 seqno = link->snd_nxt;
1664 skb_queue_walk(&link->backlogq, skb) {
1665 msg_set_seqno(buf_msg(skb), seqno);
1666 seqno = mod(seqno + 1);
1667 }
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001668 queue = &link->backlogq;
1669 goto tunnel_queue;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001670}
1671
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001672/* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001673 * Owner node is locked.
1674 */
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001675static bool tipc_link_failover_rcv(struct tipc_link *link,
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001676 struct sk_buff **skb)
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001677{
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001678 struct tipc_msg *msg = buf_msg(*skb);
1679 struct sk_buff *iskb = NULL;
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001680 struct tipc_link *pl = NULL;
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001681 int bearer_id = msg_bearer_id(msg);
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -04001682 int pos = 0;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001683
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001684 if (msg_type(msg) != FAILOVER_MSG) {
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001685 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1686 goto exit;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001687 }
Dan Carpentercb4b102f2013-05-06 08:28:41 +00001688 if (bearer_id >= MAX_BEARERS)
1689 goto exit;
Jon Paul Maloy1dab3d52014-02-13 17:29:10 -05001690
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001691 if (bearer_id == link->bearer_id)
1692 goto exit;
1693
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001694 pl = link->owner->links[bearer_id].link;
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001695 if (pl && tipc_link_is_up(pl))
1696 tipc_link_reset(pl);
1697
1698 if (link->failover_pkts == FIRST_FAILOVER)
1699 link->failover_pkts = msg_msgcnt(msg);
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001700
1701 /* Should we expect an inner packet? */
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001702 if (!link->failover_pkts)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001703 goto exit;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001704
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001705 if (!tipc_msg_extract(*skb, &iskb, &pos)) {
1706 pr_warn("%sno inner failover pkt\n", link_co_err);
1707 *skb = NULL;
1708 goto exit;
1709 }
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001710 link->failover_pkts--;
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001711 *skb = NULL;
1712
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001713 /* Was this packet already delivered? */
1714 if (less(buf_seqno(iskb), link->failover_checkpt)) {
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001715 kfree_skb(iskb);
1716 iskb = NULL;
1717 goto exit;
1718 }
1719 if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
1720 link->stats.recv_fragments++;
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001721 tipc_buf_append(&link->failover_skb, &iskb);
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001722 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001723exit:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001724 if (!link->failover_pkts && pl)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -04001725 pl->exec_mode = TIPC_LINK_OPEN;
Jon Paul Maloy2da71422015-04-02 09:33:00 -04001726 kfree_skb(*skb);
1727 *skb = iskb;
1728 return *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001729}
1730
Ying Xue2f55c432015-01-09 15:27:00 +08001731static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001732{
Ying Xue2f55c432015-01-09 15:27:00 +08001733 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1734
1735 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
Allan Stephens5413b4c2011-01-18 13:24:55 -05001736 return;
1737
Ying Xue2f55c432015-01-09 15:27:00 +08001738 l_ptr->tolerance = tol;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001739 l_ptr->keepalive_intv = msecs_to_jiffies(intv);
1740 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001741}
1742
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001743void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001744{
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001745 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001746
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001747 l->window = win;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001748 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1749 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1750 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1751 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1752 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001753}
1754
Jon Paul Maloye099e862014-02-13 17:29:18 -05001755/* tipc_link_find_owner - locate owner node of link by link's name
Ying Xuef2f98002015-01-09 15:27:05 +08001756 * @net: the applicable net namespace
Jon Paul Maloye099e862014-02-13 17:29:18 -05001757 * @name: pointer to link name string
1758 * @bearer_id: pointer to index in 'node->links' array where the link was found.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001759 *
Jon Paul Maloye099e862014-02-13 17:29:18 -05001760 * Returns pointer to node owning the link, or 0 if no matching link is found.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001761 */
Ying Xuef2f98002015-01-09 15:27:05 +08001762static struct tipc_node *tipc_link_find_owner(struct net *net,
1763 const char *link_name,
Jon Paul Maloye099e862014-02-13 17:29:18 -05001764 unsigned int *bearer_id)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001765{
Ying Xuef2f98002015-01-09 15:27:05 +08001766 struct tipc_net *tn = net_generic(net, tipc_net_id);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001767 struct tipc_link *l_ptr;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001768 struct tipc_node *n_ptr;
Fabian Frederick886eaa12014-12-25 12:05:50 +01001769 struct tipc_node *found_node = NULL;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001770 int i;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001771
Jon Paul Maloye099e862014-02-13 17:29:18 -05001772 *bearer_id = 0;
Ying Xue6c7a7622014-03-27 12:54:37 +08001773 rcu_read_lock();
Ying Xuef2f98002015-01-09 15:27:05 +08001774 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001775 tipc_node_lock(n_ptr);
Erik Hugnebbfbe472013-10-18 07:23:21 +02001776 for (i = 0; i < MAX_BEARERS; i++) {
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001777 l_ptr = n_ptr->links[i].link;
Jon Paul Maloye099e862014-02-13 17:29:18 -05001778 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1779 *bearer_id = i;
1780 found_node = n_ptr;
1781 break;
1782 }
Erik Hugnebbfbe472013-10-18 07:23:21 +02001783 }
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001784 tipc_node_unlock(n_ptr);
Jon Paul Maloye099e862014-02-13 17:29:18 -05001785 if (found_node)
1786 break;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001787 }
Ying Xue6c7a7622014-03-27 12:54:37 +08001788 rcu_read_unlock();
1789
Jon Paul Maloye099e862014-02-13 17:29:18 -05001790 return found_node;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001791}
1792
Allan Stephens5c216e12011-10-18 11:34:29 -04001793/**
Per Lidenb97bf3f2006-01-02 19:04:38 +01001794 * link_reset_statistics - reset link statistics
1795 * @l_ptr: pointer to link
1796 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001797static void link_reset_statistics(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001798{
1799 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001800 l_ptr->stats.sent_info = l_ptr->snd_nxt;
1801 l_ptr->stats.recv_info = l_ptr->rcv_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001802}
1803
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001804static void link_print(struct tipc_link *l_ptr, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001805{
Ying Xue7f9f95d2015-01-09 15:27:06 +08001806 struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
Ying Xue7a2f7d12014-04-21 10:55:46 +08001807 struct tipc_bearer *b_ptr;
1808
1809 rcu_read_lock();
Ying Xue7f9f95d2015-01-09 15:27:06 +08001810 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
Ying Xue7a2f7d12014-04-21 10:55:46 +08001811 if (b_ptr)
1812 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
1813 rcu_read_unlock();
Allan Stephens8d64a5b2010-12-31 18:59:27 +00001814
Per Lidenb97bf3f2006-01-02 19:04:38 +01001815 if (link_working_unknown(l_ptr))
Paul Gortmaker5deedde2012-07-11 19:27:56 -04001816 pr_cont(":WU\n");
Allan Stephens8d64a5b2010-12-31 18:59:27 +00001817 else if (link_reset_reset(l_ptr))
Paul Gortmaker5deedde2012-07-11 19:27:56 -04001818 pr_cont(":RR\n");
Allan Stephens8d64a5b2010-12-31 18:59:27 +00001819 else if (link_reset_unknown(l_ptr))
Paul Gortmaker5deedde2012-07-11 19:27:56 -04001820 pr_cont(":RU\n");
Allan Stephens8d64a5b2010-12-31 18:59:27 +00001821 else if (link_working_working(l_ptr))
Paul Gortmaker5deedde2012-07-11 19:27:56 -04001822 pr_cont(":WW\n");
1823 else
1824 pr_cont("\n");
Per Lidenb97bf3f2006-01-02 19:04:38 +01001825}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001826
1827/* Parse and validate nested (link) properties valid for media, bearer and link
1828 */
1829int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1830{
1831 int err;
1832
1833 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1834 tipc_nl_prop_policy);
1835 if (err)
1836 return err;
1837
1838 if (props[TIPC_NLA_PROP_PRIO]) {
1839 u32 prio;
1840
1841 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1842 if (prio > TIPC_MAX_LINK_PRI)
1843 return -EINVAL;
1844 }
1845
1846 if (props[TIPC_NLA_PROP_TOL]) {
1847 u32 tol;
1848
1849 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1850 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1851 return -EINVAL;
1852 }
1853
1854 if (props[TIPC_NLA_PROP_WIN]) {
1855 u32 win;
1856
1857 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1858 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1859 return -EINVAL;
1860 }
1861
1862 return 0;
1863}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001864
Richard Alpef96ce7a2014-11-20 10:29:13 +01001865int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1866{
1867 int err;
1868 int res = 0;
1869 int bearer_id;
1870 char *name;
1871 struct tipc_link *link;
1872 struct tipc_node *node;
1873 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe37e2d482015-02-09 09:50:08 +01001874 struct net *net = sock_net(skb->sk);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001875
1876 if (!info->attrs[TIPC_NLA_LINK])
1877 return -EINVAL;
1878
1879 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1880 info->attrs[TIPC_NLA_LINK],
1881 tipc_nl_link_policy);
1882 if (err)
1883 return err;
1884
1885 if (!attrs[TIPC_NLA_LINK_NAME])
1886 return -EINVAL;
1887
1888 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1889
Richard Alpe670f4f82015-05-06 13:58:55 +02001890 if (strcmp(name, tipc_bclink_name) == 0)
1891 return tipc_nl_bc_link_set(net, attrs);
1892
Ying Xuef2f98002015-01-09 15:27:05 +08001893 node = tipc_link_find_owner(net, name, &bearer_id);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001894 if (!node)
1895 return -EINVAL;
1896
1897 tipc_node_lock(node);
1898
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001899 link = node->links[bearer_id].link;
Richard Alpef96ce7a2014-11-20 10:29:13 +01001900 if (!link) {
1901 res = -EINVAL;
1902 goto out;
1903 }
1904
1905 if (attrs[TIPC_NLA_LINK_PROP]) {
1906 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1907
1908 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1909 props);
1910 if (err) {
1911 res = err;
1912 goto out;
1913 }
1914
1915 if (props[TIPC_NLA_PROP_TOL]) {
1916 u32 tol;
1917
1918 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1919 link_set_supervision_props(link, tol);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001920 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001921 }
1922 if (props[TIPC_NLA_PROP_PRIO]) {
1923 u32 prio;
1924
1925 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1926 link->priority = prio;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001927 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001928 }
1929 if (props[TIPC_NLA_PROP_WIN]) {
1930 u32 win;
1931
1932 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1933 tipc_link_set_queue_limits(link, win);
1934 }
1935 }
1936
1937out:
1938 tipc_node_unlock(node);
1939
1940 return res;
1941}
Richard Alped8182802014-11-24 11:10:29 +01001942
1943static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001944{
1945 int i;
1946 struct nlattr *stats;
1947
1948 struct nla_map {
1949 u32 key;
1950 u32 val;
1951 };
1952
1953 struct nla_map map[] = {
1954 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1955 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1956 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1957 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1958 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1959 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1960 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1961 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1962 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1963 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1964 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1965 s->msg_length_counts : 1},
1966 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1967 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1968 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1969 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1970 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1971 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1972 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1973 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1974 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1975 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1976 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1977 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1978 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1979 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1980 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1981 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1982 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1983 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1984 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1985 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1986 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1987 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1988 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1989 };
1990
1991 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1992 if (!stats)
1993 return -EMSGSIZE;
1994
1995 for (i = 0; i < ARRAY_SIZE(map); i++)
1996 if (nla_put_u32(skb, map[i].key, map[i].val))
1997 goto msg_full;
1998
1999 nla_nest_end(skb, stats);
2000
2001 return 0;
2002msg_full:
2003 nla_nest_cancel(skb, stats);
2004
2005 return -EMSGSIZE;
2006}
2007
2008/* Caller should hold appropriate locks to protect the link */
Ying Xue34747532015-01-09 15:27:10 +08002009static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02002010 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002011{
2012 int err;
2013 void *hdr;
2014 struct nlattr *attrs;
2015 struct nlattr *prop;
Ying Xue34747532015-01-09 15:27:10 +08002016 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002017
Richard Alpebfb3e5d2015-02-09 09:50:03 +01002018 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02002019 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002020 if (!hdr)
2021 return -EMSGSIZE;
2022
2023 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2024 if (!attrs)
2025 goto msg_full;
2026
2027 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2028 goto attr_msg_full;
2029 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
Ying Xue34747532015-01-09 15:27:10 +08002030 tipc_cluster_mask(tn->own_addr)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002031 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04002032 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002033 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04002034 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002035 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04002036 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002037 goto attr_msg_full;
2038
2039 if (tipc_link_is_up(link))
2040 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2041 goto attr_msg_full;
2042 if (tipc_link_is_active(link))
2043 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2044 goto attr_msg_full;
2045
2046 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2047 if (!prop)
2048 goto attr_msg_full;
2049 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2050 goto prop_msg_full;
2051 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2052 goto prop_msg_full;
2053 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002054 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002055 goto prop_msg_full;
2056 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2057 goto prop_msg_full;
2058 nla_nest_end(msg->skb, prop);
2059
2060 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2061 if (err)
2062 goto attr_msg_full;
2063
2064 nla_nest_end(msg->skb, attrs);
2065 genlmsg_end(msg->skb, hdr);
2066
2067 return 0;
2068
2069prop_msg_full:
2070 nla_nest_cancel(msg->skb, prop);
2071attr_msg_full:
2072 nla_nest_cancel(msg->skb, attrs);
2073msg_full:
2074 genlmsg_cancel(msg->skb, hdr);
2075
2076 return -EMSGSIZE;
2077}
2078
2079/* Caller should hold node lock */
Ying Xue34747532015-01-09 15:27:10 +08002080static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2081 struct tipc_node *node, u32 *prev_link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002082{
2083 u32 i;
2084 int err;
2085
2086 for (i = *prev_link; i < MAX_BEARERS; i++) {
2087 *prev_link = i;
2088
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04002089 if (!node->links[i].link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002090 continue;
2091
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04002092 err = __tipc_nl_add_link(net, msg,
2093 node->links[i].link, NLM_F_MULTI);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002094 if (err)
2095 return err;
2096 }
2097 *prev_link = 0;
2098
2099 return 0;
2100}
2101
2102int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2103{
Ying Xuef2f98002015-01-09 15:27:05 +08002104 struct net *net = sock_net(skb->sk);
2105 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002106 struct tipc_node *node;
2107 struct tipc_nl_msg msg;
2108 u32 prev_node = cb->args[0];
2109 u32 prev_link = cb->args[1];
2110 int done = cb->args[2];
2111 int err;
2112
2113 if (done)
2114 return 0;
2115
2116 msg.skb = skb;
2117 msg.portid = NETLINK_CB(cb->skb).portid;
2118 msg.seq = cb->nlh->nlmsg_seq;
2119
2120 rcu_read_lock();
Richard Alpe7be57fc2014-11-20 10:29:12 +01002121 if (prev_node) {
Ying Xuef2f98002015-01-09 15:27:05 +08002122 node = tipc_node_find(net, prev_node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002123 if (!node) {
2124 /* We never set seq or call nl_dump_check_consistent()
2125 * this means that setting prev_seq here will cause the
2126 * consistence check to fail in the netlink callback
2127 * handler. Resulting in the last NLMSG_DONE message
2128 * having the NLM_F_DUMP_INTR flag set.
2129 */
2130 cb->prev_seq = 1;
2131 goto out;
2132 }
Ying Xue8a0f6eb2015-03-26 18:10:24 +08002133 tipc_node_put(node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002134
Ying Xuef2f98002015-01-09 15:27:05 +08002135 list_for_each_entry_continue_rcu(node, &tn->node_list,
2136 list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01002137 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08002138 err = __tipc_nl_add_node_links(net, &msg, node,
2139 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002140 tipc_node_unlock(node);
2141 if (err)
2142 goto out;
2143
2144 prev_node = node->addr;
2145 }
2146 } else {
Ying Xue1da46562015-01-09 15:27:07 +08002147 err = tipc_nl_add_bc_link(net, &msg);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002148 if (err)
2149 goto out;
2150
Ying Xuef2f98002015-01-09 15:27:05 +08002151 list_for_each_entry_rcu(node, &tn->node_list, list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01002152 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08002153 err = __tipc_nl_add_node_links(net, &msg, node,
2154 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002155 tipc_node_unlock(node);
2156 if (err)
2157 goto out;
2158
2159 prev_node = node->addr;
2160 }
2161 }
2162 done = 1;
2163out:
2164 rcu_read_unlock();
2165
2166 cb->args[0] = prev_node;
2167 cb->args[1] = prev_link;
2168 cb->args[2] = done;
2169
2170 return skb->len;
2171}
2172
2173int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2174{
Ying Xuef2f98002015-01-09 15:27:05 +08002175 struct net *net = genl_info_net(info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002176 struct tipc_nl_msg msg;
Richard Alpe7be57fc2014-11-20 10:29:12 +01002177 char *name;
Richard Alpe7be57fc2014-11-20 10:29:12 +01002178 int err;
2179
Richard Alpe7be57fc2014-11-20 10:29:12 +01002180 msg.portid = info->snd_portid;
2181 msg.seq = info->snd_seq;
2182
Richard Alpe670f4f82015-05-06 13:58:55 +02002183 if (!info->attrs[TIPC_NLA_LINK_NAME])
2184 return -EINVAL;
2185 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2186
2187 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2188 if (!msg.skb)
2189 return -ENOMEM;
2190
2191 if (strcmp(name, tipc_bclink_name) == 0) {
2192 err = tipc_nl_add_bc_link(net, &msg);
2193 if (err) {
2194 nlmsg_free(msg.skb);
2195 return err;
2196 }
2197 } else {
2198 int bearer_id;
2199 struct tipc_node *node;
2200 struct tipc_link *link;
2201
2202 node = tipc_link_find_owner(net, name, &bearer_id);
2203 if (!node)
2204 return -EINVAL;
2205
2206 tipc_node_lock(node);
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04002207 link = node->links[bearer_id].link;
Richard Alpe670f4f82015-05-06 13:58:55 +02002208 if (!link) {
2209 tipc_node_unlock(node);
2210 nlmsg_free(msg.skb);
2211 return -EINVAL;
2212 }
2213
2214 err = __tipc_nl_add_link(net, &msg, link, 0);
2215 tipc_node_unlock(node);
2216 if (err) {
2217 nlmsg_free(msg.skb);
2218 return err;
2219 }
Richard Alpe7be57fc2014-11-20 10:29:12 +01002220 }
2221
Richard Alpe670f4f82015-05-06 13:58:55 +02002222 return genlmsg_reply(msg.skb, info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002223}
Richard Alpeae363422014-11-20 10:29:14 +01002224
2225int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2226{
2227 int err;
2228 char *link_name;
2229 unsigned int bearer_id;
2230 struct tipc_link *link;
2231 struct tipc_node *node;
2232 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe18178772015-02-09 09:50:09 +01002233 struct net *net = sock_net(skb->sk);
Richard Alpeae363422014-11-20 10:29:14 +01002234
2235 if (!info->attrs[TIPC_NLA_LINK])
2236 return -EINVAL;
2237
2238 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2239 info->attrs[TIPC_NLA_LINK],
2240 tipc_nl_link_policy);
2241 if (err)
2242 return err;
2243
2244 if (!attrs[TIPC_NLA_LINK_NAME])
2245 return -EINVAL;
2246
2247 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2248
2249 if (strcmp(link_name, tipc_bclink_name) == 0) {
Ying Xue1da46562015-01-09 15:27:07 +08002250 err = tipc_bclink_reset_stats(net);
Richard Alpeae363422014-11-20 10:29:14 +01002251 if (err)
2252 return err;
2253 return 0;
2254 }
2255
Ying Xuef2f98002015-01-09 15:27:05 +08002256 node = tipc_link_find_owner(net, link_name, &bearer_id);
Richard Alpeae363422014-11-20 10:29:14 +01002257 if (!node)
2258 return -EINVAL;
2259
2260 tipc_node_lock(node);
2261
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04002262 link = node->links[bearer_id].link;
Richard Alpeae363422014-11-20 10:29:14 +01002263 if (!link) {
2264 tipc_node_unlock(node);
2265 return -EINVAL;
2266 }
2267
2268 link_reset_statistics(link);
2269
2270 tipc_node_unlock(node);
2271
2272 return 0;
2273}