blob: 8b98fafc88a4769d45966379958b32a021f1de6d [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloye74a3862016-03-03 14:23:21 -05004 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010045
Ying Xue796c75d2013-06-17 10:54:48 -040046#include <linux/pkt_sched.h>
47
Jon Paul Maloy38206d52015-11-19 14:30:46 -050048struct tipc_stats {
49 u32 sent_info; /* used in counting # sent packets */
50 u32 recv_info; /* used in counting # recv'd packets */
51 u32 sent_states;
52 u32 recv_states;
53 u32 sent_probes;
54 u32 recv_probes;
55 u32 sent_nacks;
56 u32 recv_nacks;
57 u32 sent_acks;
58 u32 sent_bundled;
59 u32 sent_bundles;
60 u32 recv_bundled;
61 u32 recv_bundles;
62 u32 retransmitted;
63 u32 sent_fragmented;
64 u32 sent_fragments;
65 u32 recv_fragmented;
66 u32 recv_fragments;
67 u32 link_congs; /* # port sends blocked by congestion */
68 u32 deferred_recv;
69 u32 duplicates;
70 u32 max_queue_sz; /* send queue size high water mark */
71 u32 accu_queue_sz; /* used for send queue size profiling */
72 u32 queue_sz_counts; /* used for send queue size profiling */
73 u32 msg_length_counts; /* used for message length profiling */
74 u32 msg_lengths_total; /* used for message length profiling */
75 u32 msg_length_profile[7]; /* used for msg. length profiling */
76};
77
78/**
79 * struct tipc_link - TIPC link data structure
80 * @addr: network address of link's peer node
81 * @name: link name character string
82 * @media_addr: media address to use when sending messages over link
83 * @timer: link timer
84 * @net: pointer to namespace struct
85 * @refcnt: reference counter for permanent references (owner node & timer)
86 * @peer_session: link session # being used by peer end of link
87 * @peer_bearer_id: bearer id used by link's peer endpoint
88 * @bearer_id: local bearer id used by link
89 * @tolerance: minimum link continuity loss needed to reset link [in ms]
90 * @keepalive_intv: link keepalive timer interval
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
100 * @exp_msg_count: # of tunnelled messages expected during link changeover
101 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
102 * @mtu: current maximum packet size for this link
103 * @advertised_mtu: advertised own mtu when link is being established
104 * @transmitq: queue for sent, non-acked messages
105 * @backlogq: queue for messages waiting to be sent
106 * @snt_nxt: next sequence number to use for outbound messages
107 * @last_retransmitted: sequence number of most recently retransmitted message
108 * @stale_count: # of identical retransmit requests made by peer
109 * @ackers: # of peers that needs to ack each packet before it can be released
110 * @acked: # last packet acked by a certain peer. Used for broadcast.
111 * @rcv_nxt: next sequence number to expect for inbound messages
112 * @deferred_queue: deferred queue saved OOS b'cast message received from node
113 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
114 * @inputq: buffer queue for messages to be delivered upwards
115 * @namedq: buffer queue for name table messages to be delivered upwards
116 * @next_out: ptr to first unsent outbound message in queue
117 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
118 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
119 * @reasm_buf: head of partially reassembled inbound message fragments
120 * @bc_rcvr: marks that this is a broadcast receiver link
121 * @stats: collects statistics regarding link activity
122 */
123struct tipc_link {
124 u32 addr;
125 char name[TIPC_MAX_LINK_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500126 struct net *net;
127
128 /* Management and link supervision data */
129 u32 peer_session;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500130 u32 session;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500131 u32 peer_bearer_id;
132 u32 bearer_id;
133 u32 tolerance;
134 unsigned long keepalive_intv;
135 u32 abort_limit;
136 u32 state;
137 u16 peer_caps;
138 bool active;
139 u32 silent_intv_cnt;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500140 char if_name[TIPC_MAX_IF_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500141 u32 priority;
142 char net_plane;
143
144 /* Failover/synch */
145 u16 drop_point;
146 struct sk_buff *failover_reasm_skb;
147
148 /* Max packet negotiation */
149 u16 mtu;
150 u16 advertised_mtu;
151
152 /* Sending */
153 struct sk_buff_head transmq;
154 struct sk_buff_head backlogq;
155 struct {
156 u16 len;
157 u16 limit;
158 } backlog[5];
159 u16 snd_nxt;
160 u16 last_retransm;
161 u16 window;
162 u32 stale_count;
163
164 /* Reception */
165 u16 rcv_nxt;
166 u32 rcv_unacked;
167 struct sk_buff_head deferdq;
168 struct sk_buff_head *inputq;
169 struct sk_buff_head *namedq;
170
171 /* Congestion handling */
172 struct sk_buff_head wakeupq;
173
174 /* Fragmentation/reassembly */
175 struct sk_buff *reasm_buf;
176
177 /* Broadcast */
178 u16 ackers;
179 u16 acked;
180 struct tipc_link *bc_rcvlink;
181 struct tipc_link *bc_sndlink;
182 int nack_state;
183 bool bc_peer_is_up;
184
185 /* Statistics */
186 struct tipc_stats stats;
187};
188
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400189/*
190 * Error message prefixes
191 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400192static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400193static const char *link_rst_msg = "Resetting link ";
Richard Alpe7be57fc2014-11-20 10:29:12 +0100194
Jon Paul Maloy52666982015-10-22 08:51:41 -0400195/* Send states for broadcast NACKs
196 */
197enum {
198 BC_NACK_SND_CONDITIONAL,
199 BC_NACK_SND_UNCONDITIONAL,
200 BC_NACK_SND_SUPPRESS,
201};
202
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900203/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400204 * Interval between NACKs when packets arrive out of order
205 */
206#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500207
208/* Wildcard value for link session numbers. When it is known that
209 * peer endpoint is down, any session number must be accepted.
Allan Stephensa686e682008-06-04 17:29:39 -0700210 */
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500211#define ANY_SESSION 0x10000
Allan Stephensa686e682008-06-04 17:29:39 -0700212
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400213/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400214 */
215enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400216 LINK_ESTABLISHED = 0xe,
217 LINK_ESTABLISHING = 0xe << 4,
218 LINK_RESET = 0x1 << 8,
219 LINK_RESETTING = 0x2 << 12,
220 LINK_PEER_RESET = 0xd << 16,
221 LINK_FAILINGOVER = 0xf << 20,
222 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400223};
224
225/* Link FSM state checking routines
226 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400227static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400228{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400229 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400230}
231
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400232static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
233 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400234static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
235 u16 rcvgap, int tolerance, int priority,
236 struct sk_buff_head *xmitq);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500237static void link_print(struct tipc_link *l, const char *str);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400238static void tipc_link_build_nack_msg(struct tipc_link *l,
239 struct sk_buff_head *xmitq);
240static void tipc_link_build_bc_init_msg(struct tipc_link *l,
241 struct sk_buff_head *xmitq);
242static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400243
Per Lidenb97bf3f2006-01-02 19:04:38 +0100244/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800245 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100246 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400247bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100248{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400249 return link_is_up(l);
250}
251
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400252bool tipc_link_peer_is_down(struct tipc_link *l)
253{
254 return l->state == LINK_PEER_RESET;
255}
256
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400257bool tipc_link_is_reset(struct tipc_link *l)
258{
259 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
260}
261
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400262bool tipc_link_is_establishing(struct tipc_link *l)
263{
264 return l->state == LINK_ESTABLISHING;
265}
266
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400267bool tipc_link_is_synching(struct tipc_link *l)
268{
269 return l->state == LINK_SYNCHING;
270}
271
272bool tipc_link_is_failingover(struct tipc_link *l)
273{
274 return l->state == LINK_FAILINGOVER;
275}
276
277bool tipc_link_is_blocked(struct tipc_link *l)
278{
279 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100280}
281
Wu Fengguang742e0382015-10-24 22:56:01 +0800282static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400283{
284 return !l->bc_sndlink;
285}
286
Wu Fengguang742e0382015-10-24 22:56:01 +0800287static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400288{
289 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
290}
291
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400292int tipc_link_is_active(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100293{
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400294 return l->active;
295}
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400296
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400297void tipc_link_set_active(struct tipc_link *l, bool active)
298{
299 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100300}
301
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500302u32 tipc_link_id(struct tipc_link *l)
303{
304 return l->peer_bearer_id << 16 | l->bearer_id;
305}
306
307int tipc_link_window(struct tipc_link *l)
308{
309 return l->window;
310}
311
312int tipc_link_prio(struct tipc_link *l)
313{
314 return l->priority;
315}
316
317unsigned long tipc_link_tolerance(struct tipc_link *l)
318{
319 return l->tolerance;
320}
321
322struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
323{
324 return l->inputq;
325}
326
327char tipc_link_plane(struct tipc_link *l)
328{
329 return l->net_plane;
330}
331
Jon Paul Maloy52666982015-10-22 08:51:41 -0400332void tipc_link_add_bc_peer(struct tipc_link *snd_l,
333 struct tipc_link *uc_l,
334 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400335{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400336 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
337
338 snd_l->ackers++;
339 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500340 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400341 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400342}
343
Jon Paul Maloy52666982015-10-22 08:51:41 -0400344void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
345 struct tipc_link *rcv_l,
346 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400347{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400348 u16 ack = snd_l->snd_nxt - 1;
349
350 snd_l->ackers--;
351 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
352 tipc_link_reset(rcv_l);
353 rcv_l->state = LINK_RESET;
354 if (!snd_l->ackers) {
355 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500356 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400357 __skb_queue_purge(xmitq);
358 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400359}
360
361int tipc_link_bc_peers(struct tipc_link *l)
362{
363 return l->ackers;
364}
365
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400366void tipc_link_set_mtu(struct tipc_link *l, int mtu)
367{
368 l->mtu = mtu;
369}
370
371int tipc_link_mtu(struct tipc_link *l)
372{
373 return l->mtu;
374}
375
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500376u16 tipc_link_rcv_nxt(struct tipc_link *l)
377{
378 return l->rcv_nxt;
379}
380
381u16 tipc_link_acked(struct tipc_link *l)
382{
383 return l->acked;
384}
385
386char *tipc_link_name(struct tipc_link *l)
387{
388 return l->name;
389}
390
Per Lidenb97bf3f2006-01-02 19:04:38 +0100391/**
Per Liden4323add2006-01-18 00:38:21 +0100392 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400393 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400394 * @if_name: associated interface name
395 * @bearer_id: id (index) of associated bearer
396 * @tolerance: link tolerance to be used by link
397 * @net_plane: network plane (A,B,c..) this link belongs to
398 * @mtu: mtu to be advertised by link
399 * @priority: priority to be used by link
400 * @window: send window to be used by link
401 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400402 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400403 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400404 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400405 * @bc_sndlink: the namespace global link used for broadcast sending
406 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400407 * @inputq: queue to put messages ready for delivery
408 * @namedq: queue to put binding table update messages ready for delivery
409 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900410 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400411 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100412 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400413bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400414 int tolerance, char net_plane, u32 mtu, int priority,
415 int window, u32 session, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400416 u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400417 struct tipc_link *bc_sndlink,
418 struct tipc_link *bc_rcvlink,
419 struct sk_buff_head *inputq,
420 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400421 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100422{
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400423 struct tipc_link *l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500424
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400425 l = kzalloc(sizeof(*l), GFP_ATOMIC);
426 if (!l)
427 return false;
428 *link = l;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500429 l->session = session;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400430
431 /* Note: peer i/f name is completed by reset/activate message */
432 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
433 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
434 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500435 strcpy(l->if_name, if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400436 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400437 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400438 l->net = net;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500439 l->peer_session = ANY_SESSION;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400440 l->bearer_id = bearer_id;
441 l->tolerance = tolerance;
442 l->net_plane = net_plane;
443 l->advertised_mtu = mtu;
444 l->mtu = mtu;
445 l->priority = priority;
446 tipc_link_set_queue_limits(l, window);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400447 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400448 l->bc_sndlink = bc_sndlink;
449 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400450 l->inputq = inputq;
451 l->namedq = namedq;
452 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400453 __skb_queue_head_init(&l->transmq);
454 __skb_queue_head_init(&l->backlogq);
455 __skb_queue_head_init(&l->deferdq);
456 skb_queue_head_init(&l->wakeupq);
457 skb_queue_head_init(l->inputq);
458 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100459}
460
Jon Paul Maloy32301902015-10-22 08:51:37 -0400461/**
462 * tipc_link_bc_create - create new link to be used for broadcast
463 * @n: pointer to associated node
464 * @mtu: mtu to be used
465 * @window: send window to be used
466 * @inputq: queue to put messages ready for delivery
467 * @namedq: queue to put binding table update messages ready for delivery
468 * @link: return value, pointer to put the created link
469 *
470 * Returns true if link was created, otherwise false
471 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400472bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400473 int mtu, int window, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400474 struct sk_buff_head *inputq,
475 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400476 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400477 struct tipc_link **link)
478{
479 struct tipc_link *l;
480
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400481 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400482 0, ownnode, peer, peer_caps, bc_sndlink,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400483 NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400484 return false;
485
486 l = *link;
487 strcpy(l->name, tipc_bclink_name);
488 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400489 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400490 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400491 l->bc_rcvlink = l;
492
493 /* Broadcast send link is always up */
494 if (link_is_bc_sndlink(l))
495 l->state = LINK_ESTABLISHED;
496
Jon Paul Maloy32301902015-10-22 08:51:37 -0400497 return true;
498}
499
Per Lidenb97bf3f2006-01-02 19:04:38 +0100500/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400501 * tipc_link_fsm_evt - link finite state machine
502 * @l: pointer to link
503 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400504 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400505int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400506{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400507 int rc = 0;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400508
509 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400510 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400511 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400512 case LINK_PEER_RESET_EVT:
513 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400514 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400515 case LINK_RESET_EVT:
516 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400517 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400518 case LINK_FAILURE_EVT:
519 case LINK_FAILOVER_BEGIN_EVT:
520 case LINK_ESTABLISH_EVT:
521 case LINK_FAILOVER_END_EVT:
522 case LINK_SYNCH_BEGIN_EVT:
523 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400524 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400525 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400526 }
527 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400528 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400529 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400530 case LINK_PEER_RESET_EVT:
531 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400532 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400533 case LINK_FAILOVER_BEGIN_EVT:
534 l->state = LINK_FAILINGOVER;
535 case LINK_FAILURE_EVT:
536 case LINK_RESET_EVT:
537 case LINK_ESTABLISH_EVT:
538 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400539 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400540 case LINK_SYNCH_BEGIN_EVT:
541 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400542 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400543 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400544 }
545 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400546 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400547 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400548 case LINK_RESET_EVT:
549 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400550 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400551 case LINK_PEER_RESET_EVT:
552 case LINK_ESTABLISH_EVT:
553 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400554 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400555 case LINK_SYNCH_BEGIN_EVT:
556 case LINK_SYNCH_END_EVT:
557 case LINK_FAILOVER_BEGIN_EVT:
558 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400559 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400560 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400561 }
562 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400563 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400564 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400565 case LINK_FAILOVER_END_EVT:
566 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400567 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400568 case LINK_PEER_RESET_EVT:
569 case LINK_RESET_EVT:
570 case LINK_ESTABLISH_EVT:
571 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400572 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400573 case LINK_FAILOVER_BEGIN_EVT:
574 case LINK_SYNCH_BEGIN_EVT:
575 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400576 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400577 goto illegal_evt;
578 }
579 break;
580 case LINK_ESTABLISHING:
581 switch (evt) {
582 case LINK_ESTABLISH_EVT:
583 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400584 break;
585 case LINK_FAILOVER_BEGIN_EVT:
586 l->state = LINK_FAILINGOVER;
587 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400588 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400589 l->state = LINK_RESET;
590 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400591 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400592 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400593 case LINK_SYNCH_BEGIN_EVT:
594 case LINK_FAILOVER_END_EVT:
595 break;
596 case LINK_SYNCH_END_EVT:
597 default:
598 goto illegal_evt;
599 }
600 break;
601 case LINK_ESTABLISHED:
602 switch (evt) {
603 case LINK_PEER_RESET_EVT:
604 l->state = LINK_PEER_RESET;
605 rc |= TIPC_LINK_DOWN_EVT;
606 break;
607 case LINK_FAILURE_EVT:
608 l->state = LINK_RESETTING;
609 rc |= TIPC_LINK_DOWN_EVT;
610 break;
611 case LINK_RESET_EVT:
612 l->state = LINK_RESET;
613 break;
614 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400615 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400616 break;
617 case LINK_SYNCH_BEGIN_EVT:
618 l->state = LINK_SYNCHING;
619 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400620 case LINK_FAILOVER_BEGIN_EVT:
621 case LINK_FAILOVER_END_EVT:
622 default:
623 goto illegal_evt;
624 }
625 break;
626 case LINK_SYNCHING:
627 switch (evt) {
628 case LINK_PEER_RESET_EVT:
629 l->state = LINK_PEER_RESET;
630 rc |= TIPC_LINK_DOWN_EVT;
631 break;
632 case LINK_FAILURE_EVT:
633 l->state = LINK_RESETTING;
634 rc |= TIPC_LINK_DOWN_EVT;
635 break;
636 case LINK_RESET_EVT:
637 l->state = LINK_RESET;
638 break;
639 case LINK_ESTABLISH_EVT:
640 case LINK_SYNCH_BEGIN_EVT:
641 break;
642 case LINK_SYNCH_END_EVT:
643 l->state = LINK_ESTABLISHED;
644 break;
645 case LINK_FAILOVER_BEGIN_EVT:
646 case LINK_FAILOVER_END_EVT:
647 default:
648 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400649 }
650 break;
651 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400652 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400653 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400654 return rc;
655illegal_evt:
656 pr_err("Illegal FSM event %x in state %x on link %s\n",
657 evt, l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400658 return rc;
659}
660
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400661/* link_profile_stats - update statistical profiling of traffic
662 */
663static void link_profile_stats(struct tipc_link *l)
664{
665 struct sk_buff *skb;
666 struct tipc_msg *msg;
667 int length;
668
669 /* Update counters used in statistical profiling of send traffic */
670 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
671 l->stats.queue_sz_counts++;
672
673 skb = skb_peek(&l->transmq);
674 if (!skb)
675 return;
676 msg = buf_msg(skb);
677 length = msg_size(msg);
678
679 if (msg_user(msg) == MSG_FRAGMENTER) {
680 if (msg_type(msg) != FIRST_FRAGMENT)
681 return;
682 length = msg_size(msg_get_wrapped(msg));
683 }
684 l->stats.msg_lengths_total += length;
685 l->stats.msg_length_counts++;
686 if (length <= 64)
687 l->stats.msg_length_profile[0]++;
688 else if (length <= 256)
689 l->stats.msg_length_profile[1]++;
690 else if (length <= 1024)
691 l->stats.msg_length_profile[2]++;
692 else if (length <= 4096)
693 l->stats.msg_length_profile[3]++;
694 else if (length <= 16384)
695 l->stats.msg_length_profile[4]++;
696 else if (length <= 32768)
697 l->stats.msg_length_profile[5]++;
698 else
699 l->stats.msg_length_profile[6]++;
700}
701
702/* tipc_link_timeout - perform periodic task as instructed from node timeout
703 */
Jon Paul Maloy52666982015-10-22 08:51:41 -0400704/* tipc_link_timeout - perform periodic task as instructed from node timeout
705 */
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400706int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
707{
708 int rc = 0;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400709 int mtyp = STATE_MSG;
710 bool xmit = false;
711 bool prb = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400712 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
713 u16 bc_acked = l->bc_rcvlink->acked;
714 bool bc_up = link_is_up(l->bc_rcvlink);
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400715
716 link_profile_stats(l);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400717
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400718 switch (l->state) {
719 case LINK_ESTABLISHED:
720 case LINK_SYNCHING:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400721 if (!l->silent_intv_cnt) {
Jon Paul Maloy52666982015-10-22 08:51:41 -0400722 if (bc_up && (bc_acked != bc_snt))
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400723 xmit = true;
724 } else if (l->silent_intv_cnt <= l->abort_limit) {
725 xmit = true;
726 prb = true;
727 } else {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400728 rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400729 }
730 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400731 break;
732 case LINK_RESET:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400733 xmit = true;
734 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400735 break;
736 case LINK_ESTABLISHING:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400737 xmit = true;
738 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400739 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400740 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400741 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400742 case LINK_FAILINGOVER:
743 break;
744 default:
745 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400746 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400747
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400748 if (xmit)
749 tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
750
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400751 return rc;
752}
753
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400754/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400755 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400756 * @link: congested link
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400757 * @list: message that was attempted sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400758 * Create pseudo msg to send back to user when congestion abates
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400759 * Does not consume buffer list
Per Lidenb97bf3f2006-01-02 19:04:38 +0100760 */
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400761static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100762{
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400763 struct tipc_msg *msg = buf_msg(skb_peek(list));
764 int imp = msg_importance(msg);
765 u32 oport = msg_origport(msg);
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500766 u32 addr = tipc_own_addr(link->net);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400767 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100768
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400769 /* This really cannot happen... */
770 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
771 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400772 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400773 }
774 /* Non-blocking sender: */
775 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
776 return -ELINKCONG;
777
778 /* Create and schedule wakeup pseudo message */
779 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
780 addr, addr, oport, 0, 0);
781 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400782 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400783 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
784 TIPC_SKB_CB(skb)->chain_imp = imp;
785 skb_queue_tail(&link->wakeupq, skb);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400786 link->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400787 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100788}
789
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400790/**
791 * link_prepare_wakeup - prepare users for wakeup after congestion
792 * @link: congested link
793 * Move a number of waiting users, as permitted by available space in
794 * the send queue, from link wait queue to node wait queue for wakeup
795 */
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400796void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100797{
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400798 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
799 int imp, lim;
Ying Xue58d78b32014-11-26 11:41:51 +0800800 struct sk_buff *skb, *tmp;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100801
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400802 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
803 imp = TIPC_SKB_CB(skb)->chain_imp;
804 lim = l->window + l->backlog[imp].limit;
805 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
806 if ((pnd[imp] + l->backlog[imp].len) >= lim)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100807 break;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400808 skb_unlink(skb, &l->wakeupq);
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400809 skb_queue_tail(l->inputq, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100810 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100811}
812
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400813void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100814{
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500815 l->peer_session = ANY_SESSION;
816 l->session++;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400817 l->mtu = l->advertised_mtu;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400818 __skb_queue_purge(&l->transmq);
819 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy23d83352015-07-30 18:24:24 -0400820 skb_queue_splice_init(&l->wakeupq, l->inputq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400821 __skb_queue_purge(&l->backlogq);
822 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
823 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
824 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
825 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
826 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400827 kfree_skb(l->reasm_buf);
828 kfree_skb(l->failover_reasm_skb);
829 l->reasm_buf = NULL;
830 l->failover_reasm_skb = NULL;
831 l->rcv_unacked = 0;
832 l->snd_nxt = 1;
833 l->rcv_nxt = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400834 l->acked = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400835 l->silent_intv_cnt = 0;
836 l->stats.recv_info = 0;
837 l->stale_count = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400838 l->bc_peer_is_up = false;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500839 tipc_link_reset_stats(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100840}
841
Per Lidenb97bf3f2006-01-02 19:04:38 +0100842/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400843 * tipc_link_xmit(): enqueue buffer list according to queue situation
844 * @link: link to use
845 * @list: chain of buffers containing message
846 * @xmitq: returned list of packets to be sent by caller
847 *
848 * Consumes the buffer chain, except when returning -ELINKCONG,
849 * since the caller then may want to make more send attempts.
850 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
851 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
852 */
853int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
854 struct sk_buff_head *xmitq)
855{
856 struct tipc_msg *hdr = buf_msg(skb_peek(list));
857 unsigned int maxwin = l->window;
858 unsigned int i, imp = msg_importance(hdr);
859 unsigned int mtu = l->mtu;
860 u16 ack = l->rcv_nxt - 1;
861 u16 seqno = l->snd_nxt;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400862 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400863 struct sk_buff_head *transmq = &l->transmq;
864 struct sk_buff_head *backlogq = &l->backlogq;
865 struct sk_buff *skb, *_skb, *bskb;
866
867 /* Match msg importance against this and all higher backlog limits: */
868 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
869 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
870 return link_schedule_user(l, list);
871 }
Richard Alpe4952cd32016-02-11 10:43:15 +0100872 if (unlikely(msg_size(hdr) > mtu)) {
873 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400874 return -EMSGSIZE;
Richard Alpe4952cd32016-02-11 10:43:15 +0100875 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400876
877 /* Prepare each packet for sending, and add to relevant queue: */
878 while (skb_queue_len(list)) {
879 skb = skb_peek(list);
880 hdr = buf_msg(skb);
881 msg_set_seqno(hdr, seqno);
882 msg_set_ack(hdr, ack);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400883 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400884
885 if (likely(skb_queue_len(transmq) < maxwin)) {
886 _skb = skb_clone(skb, GFP_ATOMIC);
Richard Alpe4952cd32016-02-11 10:43:15 +0100887 if (!_skb) {
888 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400889 return -ENOBUFS;
Richard Alpe4952cd32016-02-11 10:43:15 +0100890 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400891 __skb_dequeue(list);
892 __skb_queue_tail(transmq, skb);
893 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400894 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400895 l->rcv_unacked = 0;
896 seqno++;
897 continue;
898 }
899 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
900 kfree_skb(__skb_dequeue(list));
901 l->stats.sent_bundled++;
902 continue;
903 }
904 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
905 kfree_skb(__skb_dequeue(list));
906 __skb_queue_tail(backlogq, bskb);
907 l->backlog[msg_importance(buf_msg(bskb))].len++;
908 l->stats.sent_bundled++;
909 l->stats.sent_bundles++;
910 continue;
911 }
912 l->backlog[imp].len += skb_queue_len(list);
913 skb_queue_splice_tail_init(list, backlogq);
914 }
915 l->snd_nxt = seqno;
916 return 0;
917}
918
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400919void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
920{
921 struct sk_buff *skb, *_skb;
922 struct tipc_msg *hdr;
923 u16 seqno = l->snd_nxt;
924 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400925 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400926
927 while (skb_queue_len(&l->transmq) < l->window) {
928 skb = skb_peek(&l->backlogq);
929 if (!skb)
930 break;
931 _skb = skb_clone(skb, GFP_ATOMIC);
932 if (!_skb)
933 break;
934 __skb_dequeue(&l->backlogq);
935 hdr = buf_msg(skb);
936 l->backlog[msg_importance(hdr)].len--;
937 __skb_queue_tail(&l->transmq, skb);
938 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400939 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400940 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400941 msg_set_ack(hdr, ack);
942 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400943 l->rcv_unacked = 0;
944 seqno++;
945 }
946 l->snd_nxt = seqno;
947}
948
Jon Paul Maloy52666982015-10-22 08:51:41 -0400949static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700950{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400951 struct tipc_msg *hdr = buf_msg(skb);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700952
Jon Paul Maloy52666982015-10-22 08:51:41 -0400953 pr_warn("Retransmission failure on link <%s>\n", l->name);
954 link_print(l, "Resetting link ");
955 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
956 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
957 pr_info("sqno %u, prev: %x, src: %x\n",
958 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
Allan Stephensd356eeb2006-06-25 23:40:01 -0700959}
960
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400961int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
962 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400963{
964 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
965 struct tipc_msg *hdr;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400966 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400967 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400968
969 if (!skb)
970 return 0;
971
972 /* Detect repeated retransmit failures on same packet */
973 if (likely(l->last_retransm != buf_seqno(skb))) {
974 l->last_retransm = buf_seqno(skb);
975 l->stale_count = 1;
976 } else if (++l->stale_count > 100) {
977 link_retransmit_failure(l, skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400978 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400979 }
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400980
981 /* Move forward to where retransmission should start */
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400982 skb_queue_walk(&l->transmq, skb) {
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400983 if (!less(buf_seqno(skb), from))
984 break;
985 }
986
987 skb_queue_walk_from(&l->transmq, skb) {
988 if (more(buf_seqno(skb), to))
989 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400990 hdr = buf_msg(skb);
991 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
992 if (!_skb)
993 return 0;
994 hdr = buf_msg(_skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400995 msg_set_ack(hdr, ack);
996 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400997 _skb->priority = TC_PRIO_CONTROL;
998 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400999 l->stats.retransmitted++;
1000 }
1001 return 0;
1002}
1003
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001004/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001005 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001006 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001007 * Node lock must be held
1008 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001009static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001010 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001011{
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001012 switch (msg_user(buf_msg(skb))) {
Erik Hugne7ae934b2014-07-01 10:22:40 +02001013 case TIPC_LOW_IMPORTANCE:
1014 case TIPC_MEDIUM_IMPORTANCE:
1015 case TIPC_HIGH_IMPORTANCE:
1016 case TIPC_CRITICAL_IMPORTANCE:
1017 case CONN_MANAGER:
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001018 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001019 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001020 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -04001021 l->bc_rcvlink->state = LINK_ESTABLISHED;
1022 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001023 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001024 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001025 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001026 case MSG_FRAGMENTER:
1027 case BCAST_PROTOCOL:
1028 return false;
1029 default:
1030 pr_warn("Dropping received illegal msg type\n");
1031 kfree_skb(skb);
1032 return false;
1033 };
1034}
1035
1036/* tipc_link_input - process packet that has passed link protocol check
1037 *
1038 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001039 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001040static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1041 struct sk_buff_head *inputq)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001042{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001043 struct tipc_msg *hdr = buf_msg(skb);
1044 struct sk_buff **reasm_skb = &l->reasm_buf;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001045 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001046 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001047 int usr = msg_user(hdr);
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001048 int rc = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001049 int pos = 0;
1050 int ipos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001051
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001052 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1053 if (msg_type(hdr) == SYNCH_MSG) {
1054 __skb_queue_purge(&l->deferdq);
1055 goto drop;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001056 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001057 if (!tipc_msg_extract(skb, &iskb, &ipos))
1058 return rc;
1059 kfree_skb(skb);
1060 skb = iskb;
1061 hdr = buf_msg(skb);
1062 if (less(msg_seqno(hdr), l->drop_point))
1063 goto drop;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001064 if (tipc_data_input(l, skb, inputq))
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001065 return rc;
1066 usr = msg_user(hdr);
1067 reasm_skb = &l->failover_reasm_skb;
1068 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001069
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001070 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001071 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001072 l->stats.recv_bundles++;
1073 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001074 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001075 tipc_data_input(l, iskb, &tmpq);
1076 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001077 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001078 } else if (usr == MSG_FRAGMENTER) {
1079 l->stats.recv_fragments++;
1080 if (tipc_buf_append(reasm_skb, &skb)) {
1081 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001082 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001083 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1084 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001085 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001086 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001087 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001088 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001089 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001090 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001091 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001092 }
1093drop:
1094 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001095 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001096}
1097
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001098static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1099{
1100 bool released = false;
1101 struct sk_buff *skb, *tmp;
1102
1103 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1104 if (more(buf_seqno(skb), acked))
1105 break;
1106 __skb_unlink(skb, &l->transmq);
1107 kfree_skb(skb);
1108 released = true;
1109 }
1110 return released;
1111}
1112
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001113/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -04001114 *
1115 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1116 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001117 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001118int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001119{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001120 if (!l)
1121 return 0;
1122
1123 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1124 if (link_is_bc_rcvlink(l)) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001125 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001126 return 0;
1127 l->rcv_unacked = 0;
1128 return TIPC_LINK_SND_BC_ACK;
1129 }
1130
1131 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001132 l->rcv_unacked = 0;
1133 l->stats.sent_acks++;
1134 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001135 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001136}
1137
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001138/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1139 */
1140void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1141{
1142 int mtyp = RESET_MSG;
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001143 struct sk_buff *skb;
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001144
1145 if (l->state == LINK_ESTABLISHING)
1146 mtyp = ACTIVATE_MSG;
1147
1148 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001149
1150 /* Inform peer that this endpoint is going down if applicable */
1151 skb = skb_peek_tail(xmitq);
1152 if (skb && (l->state == LINK_RESET))
1153 msg_set_peer_stopping(buf_msg(skb), 1);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001154}
1155
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001156/* tipc_link_build_nack_msg: prepare link nack message for transmission
1157 */
1158static void tipc_link_build_nack_msg(struct tipc_link *l,
1159 struct sk_buff_head *xmitq)
1160{
1161 u32 def_cnt = ++l->stats.deferred_recv;
1162
Jon Paul Maloy52666982015-10-22 08:51:41 -04001163 if (link_is_bc_rcvlink(l))
1164 return;
1165
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001166 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1167 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1168}
1169
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001170/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001171 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001172 * @skb: TIPC packet
1173 * @xmitq: queue to place packets to be sent after this call
1174 */
1175int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1176 struct sk_buff_head *xmitq)
1177{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001178 struct sk_buff_head *defq = &l->deferdq;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001179 struct tipc_msg *hdr;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001180 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001181 int rc = 0;
1182
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001183 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001184 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001185 seqno = msg_seqno(hdr);
1186 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001187 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001188
1189 /* Verify and update link state */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001190 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1191 return tipc_link_proto_rcv(l, skb, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001192
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001193 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001194 if (l->state == LINK_ESTABLISHING)
1195 rc = TIPC_LINK_UP_EVT;
1196 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001197 }
1198
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001199 /* Don't send probe at next timeout expiration */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001200 l->silent_intv_cnt = 0;
1201
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001202 /* Drop if outside receive window */
1203 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1204 l->stats.duplicates++;
1205 goto drop;
1206 }
1207
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001208 /* Forward queues and wake up waiting users */
1209 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1210 tipc_link_advance_backlog(l, xmitq);
1211 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1212 link_prepare_wakeup(l);
1213 }
1214
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001215 /* Defer delivery if sequence gap */
1216 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001217 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001218 tipc_link_build_nack_msg(l, xmitq);
1219 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001220 }
1221
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001222 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001223 l->rcv_nxt++;
1224 l->stats.recv_info++;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001225 if (!tipc_data_input(l, skb, l->inputq))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001226 rc |= tipc_link_input(l, skb, l->inputq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001227 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001228 rc |= tipc_link_build_ack_msg(l, xmitq);
1229 if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
1230 break;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001231 } while ((skb = __skb_dequeue(defq)));
1232
1233 return rc;
1234drop:
1235 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001236 return rc;
1237}
1238
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001239static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1240 u16 rcvgap, int tolerance, int priority,
1241 struct sk_buff_head *xmitq)
1242{
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001243 struct sk_buff *skb;
1244 struct tipc_msg *hdr;
1245 struct sk_buff_head *dfq = &l->deferdq;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001246 bool node_up = link_is_up(l->bc_rcvlink);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001247
1248 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001249 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001250 return;
1251
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001252 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1253 return;
1254
1255 if (!skb_queue_empty(dfq))
1256 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1257
1258 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1259 TIPC_MAX_IF_NAME, l->addr,
1260 tipc_own_addr(l->net), 0, 0, 0);
1261 if (!skb)
1262 return;
1263
1264 hdr = buf_msg(skb);
1265 msg_set_session(hdr, l->session);
1266 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001267 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001268 msg_set_next_sent(hdr, l->snd_nxt);
1269 msg_set_ack(hdr, l->rcv_nxt - 1);
1270 msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
1271 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001272 msg_set_link_tolerance(hdr, tolerance);
1273 msg_set_linkprio(hdr, priority);
1274 msg_set_redundant_link(hdr, node_up);
1275 msg_set_seq_gap(hdr, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001276 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001277
1278 if (mtyp == STATE_MSG) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001279 msg_set_seq_gap(hdr, rcvgap);
1280 msg_set_size(hdr, INT_H_SIZE);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001281 msg_set_probe(hdr, probe);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001282 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001283 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001284 } else {
1285 /* RESET_MSG or ACTIVATE_MSG */
1286 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001287 strcpy(msg_data(hdr), l->if_name);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001288 }
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001289 if (probe)
1290 l->stats.sent_probes++;
1291 if (rcvgap)
1292 l->stats.sent_nacks++;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001293 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001294 __skb_queue_tail(xmitq, skb);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001295}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001296
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001297/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001298 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001299 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001300void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1301 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001302{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001303 struct sk_buff *skb, *tnlskb;
1304 struct tipc_msg *hdr, tnlhdr;
1305 struct sk_buff_head *queue = &l->transmq;
1306 struct sk_buff_head tmpxq, tnlq;
1307 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001308
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001309 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001310 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001311
1312 skb_queue_head_init(&tnlq);
1313 skb_queue_head_init(&tmpxq);
1314
1315 /* At least one packet required for safe algorithm => add dummy */
1316 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001317 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001318 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001319 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001320 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001321 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001322 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001323 skb_queue_tail(&tnlq, skb);
1324 tipc_link_xmit(l, &tnlq, &tmpxq);
1325 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001326
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001327 /* Initialize reusable tunnel packet header */
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001328 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001329 mtyp, INT_H_SIZE, l->addr);
1330 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1331 msg_set_msgcnt(&tnlhdr, pktcnt);
1332 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1333tnl:
1334 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001335 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001336 hdr = buf_msg(skb);
1337 if (queue == &l->backlogq)
1338 msg_set_seqno(hdr, seqno++);
1339 pktlen = msg_size(hdr);
1340 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1341 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1342 if (!tnlskb) {
1343 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001344 return;
1345 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001346 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1347 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1348 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001349 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001350 if (queue != &l->backlogq) {
1351 queue = &l->backlogq;
1352 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001353 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001354
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001355 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001356
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001357 if (mtyp == FAILOVER_MSG) {
1358 tnl->drop_point = l->rcv_nxt;
1359 tnl->failover_reasm_skb = l->reasm_buf;
1360 l->reasm_buf = NULL;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001361 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001362}
1363
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001364/* tipc_link_proto_rcv(): receive link level protocol message :
1365 * Note that network plane id propagates through the network, and may
1366 * change at any time. The node with lowest numerical id determines
1367 * network plane
1368 */
1369static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1370 struct sk_buff_head *xmitq)
1371{
1372 struct tipc_msg *hdr = buf_msg(skb);
1373 u16 rcvgap = 0;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001374 u16 ack = msg_ack(hdr);
1375 u16 gap = msg_seq_gap(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001376 u16 peers_snd_nxt = msg_next_sent(hdr);
1377 u16 peers_tol = msg_link_tolerance(hdr);
1378 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001379 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001380 int mtyp = msg_type(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001381 char *if_name;
1382 int rc = 0;
1383
Jon Paul Maloy52666982015-10-22 08:51:41 -04001384 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001385 goto exit;
1386
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001387 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001388 l->net_plane = msg_net_plane(hdr);
1389
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001390 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001391 case RESET_MSG:
1392
1393 /* Ignore duplicate RESET with old session number */
1394 if ((less_eq(msg_session(hdr), l->peer_session)) &&
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001395 (l->peer_session != ANY_SESSION))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001396 break;
1397 /* fall thru' */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001398
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001399 case ACTIVATE_MSG:
Jon Paul Maloyc7cad0d2015-11-19 14:30:40 -05001400 skb_linearize(skb);
1401 hdr = buf_msg(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001402
1403 /* Complete own link name with peer's interface name */
1404 if_name = strrchr(l->name, ':') + 1;
1405 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1406 break;
1407 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1408 break;
1409 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
1410
1411 /* Update own tolerance if peer indicates a non-zero value */
1412 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1413 l->tolerance = peers_tol;
1414
1415 /* Update own priority if peer's priority is higher */
1416 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1417 l->priority = peers_prio;
1418
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001419 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001420 if (msg_peer_stopping(hdr))
1421 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1422 else if ((mtyp == RESET_MSG) || !link_is_up(l))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001423 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1424
1425 /* ACTIVATE_MSG takes up link if it was already locally reset */
1426 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1427 rc = TIPC_LINK_UP_EVT;
1428
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001429 l->peer_session = msg_session(hdr);
1430 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001431 if (l->mtu > msg_max_pkt(hdr))
1432 l->mtu = msg_max_pkt(hdr);
1433 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001434
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001435 case STATE_MSG:
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001436
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001437 /* Update own tolerance if peer indicates a non-zero value */
1438 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1439 l->tolerance = peers_tol;
1440
Richard Alpe81729812016-02-01 08:19:57 +01001441 if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI,
1442 TIPC_MAX_LINK_PRI)) {
1443 l->priority = peers_prio;
1444 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1445 }
1446
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001447 l->silent_intv_cnt = 0;
1448 l->stats.recv_states++;
1449 if (msg_probe(hdr))
1450 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001451
1452 if (!link_is_up(l)) {
1453 if (l->state == LINK_ESTABLISHING)
1454 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001455 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001456 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001457
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001458 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001459 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001460 rcvgap = peers_snd_nxt - l->rcv_nxt;
1461 if (rcvgap || (msg_probe(hdr)))
1462 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
Jon Paul Maloy16040892015-07-21 06:42:28 -04001463 0, 0, xmitq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001464 tipc_link_release_pkts(l, ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001465
1466 /* If NACK, retransmit will now start at right position */
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001467 if (gap) {
1468 rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001469 l->stats.recv_nacks++;
1470 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001471
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001472 tipc_link_advance_backlog(l, xmitq);
1473 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1474 link_prepare_wakeup(l);
1475 }
1476exit:
1477 kfree_skb(skb);
1478 return rc;
1479}
1480
Jon Paul Maloy52666982015-10-22 08:51:41 -04001481/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1482 */
1483static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1484 u16 peers_snd_nxt,
1485 struct sk_buff_head *xmitq)
1486{
1487 struct sk_buff *skb;
1488 struct tipc_msg *hdr;
1489 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1490 u16 ack = l->rcv_nxt - 1;
1491 u16 gap_to = peers_snd_nxt - 1;
1492
1493 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001494 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001495 if (!skb)
1496 return false;
1497 hdr = buf_msg(skb);
1498 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1499 msg_set_bcast_ack(hdr, ack);
1500 msg_set_bcgap_after(hdr, ack);
1501 if (dfrd_skb)
1502 gap_to = buf_seqno(dfrd_skb) - 1;
1503 msg_set_bcgap_to(hdr, gap_to);
1504 msg_set_non_seq(hdr, bcast);
1505 __skb_queue_tail(xmitq, skb);
1506 return true;
1507}
1508
1509/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1510 *
1511 * Give a newly added peer node the sequence number where it should
1512 * start receiving and acking broadcast packets.
1513 */
Wu Fengguang742e0382015-10-24 22:56:01 +08001514static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1515 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001516{
1517 struct sk_buff_head list;
1518
1519 __skb_queue_head_init(&list);
1520 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1521 return;
1522 tipc_link_xmit(l, &list, xmitq);
1523}
1524
1525/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1526 */
1527void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1528{
1529 int mtyp = msg_type(hdr);
1530 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1531
1532 if (link_is_up(l))
1533 return;
1534
1535 if (msg_user(hdr) == BCAST_PROTOCOL) {
1536 l->rcv_nxt = peers_snd_nxt;
1537 l->state = LINK_ESTABLISHED;
1538 return;
1539 }
1540
1541 if (l->peer_caps & TIPC_BCAST_SYNCH)
1542 return;
1543
1544 if (msg_peer_node_is_up(hdr))
1545 return;
1546
1547 /* Compatibility: accept older, less safe initial synch data */
1548 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1549 l->rcv_nxt = peers_snd_nxt;
1550}
1551
1552/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1553 */
1554void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1555 struct sk_buff_head *xmitq)
1556{
1557 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1558
1559 if (!link_is_up(l))
1560 return;
1561
1562 if (!msg_peer_node_is_up(hdr))
1563 return;
1564
1565 l->bc_peer_is_up = true;
1566
1567 /* Ignore if peers_snd_nxt goes beyond receive window */
1568 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1569 return;
1570
1571 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1572 l->nack_state = BC_NACK_SND_CONDITIONAL;
1573 return;
1574 }
1575
1576 /* Don't NACK if one was recently sent or peeked */
1577 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1578 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1579 return;
1580 }
1581
1582 /* Conditionally delay NACK sending until next synch rcv */
1583 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1584 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1585 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1586 return;
1587 }
1588
1589 /* Send NACK now but suppress next one */
1590 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1591 l->nack_state = BC_NACK_SND_SUPPRESS;
1592}
1593
1594void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1595 struct sk_buff_head *xmitq)
1596{
1597 struct sk_buff *skb, *tmp;
1598 struct tipc_link *snd_l = l->bc_sndlink;
1599
1600 if (!link_is_up(l) || !l->bc_peer_is_up)
1601 return;
1602
1603 if (!more(acked, l->acked))
1604 return;
1605
1606 /* Skip over packets peer has already acked */
1607 skb_queue_walk(&snd_l->transmq, skb) {
1608 if (more(buf_seqno(skb), l->acked))
1609 break;
1610 }
1611
1612 /* Update/release the packets peer is acking now */
1613 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1614 if (more(buf_seqno(skb), acked))
1615 break;
1616 if (!--TIPC_SKB_CB(skb)->ackers) {
1617 __skb_unlink(skb, &snd_l->transmq);
1618 kfree_skb(skb);
1619 }
1620 }
1621 l->acked = acked;
1622 tipc_link_advance_backlog(snd_l, xmitq);
1623 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1624 link_prepare_wakeup(snd_l);
1625}
1626
1627/* tipc_link_bc_nack_rcv(): receive broadcast nack message
1628 */
1629int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1630 struct sk_buff_head *xmitq)
1631{
1632 struct tipc_msg *hdr = buf_msg(skb);
1633 u32 dnode = msg_destnode(hdr);
1634 int mtyp = msg_type(hdr);
1635 u16 acked = msg_bcast_ack(hdr);
1636 u16 from = acked + 1;
1637 u16 to = msg_bcgap_to(hdr);
1638 u16 peers_snd_nxt = to + 1;
1639 int rc = 0;
1640
1641 kfree_skb(skb);
1642
1643 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1644 return 0;
1645
1646 if (mtyp != STATE_MSG)
1647 return 0;
1648
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001649 if (dnode == tipc_own_addr(l->net)) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04001650 tipc_link_bc_ack_rcv(l, acked, xmitq);
1651 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1652 l->stats.recv_nacks++;
1653 return rc;
1654 }
1655
1656 /* Msg for other node => suppress own NACK at next sync if applicable */
1657 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1658 l->nack_state = BC_NACK_SND_SUPPRESS;
1659
1660 return 0;
1661}
1662
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001663void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001664{
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001665 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001666
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001667 l->window = win;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001668 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1669 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1670 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1671 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1672 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001673}
1674
Allan Stephens5c216e12011-10-18 11:34:29 -04001675/**
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001676 * link_reset_stats - reset link statistics
Jon Paul Maloy1a906322015-11-19 14:30:47 -05001677 * @l: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +01001678 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001679void tipc_link_reset_stats(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001680{
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001681 memset(&l->stats, 0, sizeof(l->stats));
1682 if (!link_is_bc_sndlink(l)) {
1683 l->stats.sent_info = l->snd_nxt;
1684 l->stats.recv_info = l->rcv_nxt;
1685 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001686}
1687
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001688static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001689{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001690 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001691 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001692 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08001693
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001694 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001695 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1696 skb_queue_len(&l->transmq), head, tail,
1697 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001698}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001699
1700/* Parse and validate nested (link) properties valid for media, bearer and link
1701 */
1702int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1703{
1704 int err;
1705
1706 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1707 tipc_nl_prop_policy);
1708 if (err)
1709 return err;
1710
1711 if (props[TIPC_NLA_PROP_PRIO]) {
1712 u32 prio;
1713
1714 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1715 if (prio > TIPC_MAX_LINK_PRI)
1716 return -EINVAL;
1717 }
1718
1719 if (props[TIPC_NLA_PROP_TOL]) {
1720 u32 tol;
1721
1722 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1723 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1724 return -EINVAL;
1725 }
1726
1727 if (props[TIPC_NLA_PROP_WIN]) {
1728 u32 win;
1729
1730 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1731 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1732 return -EINVAL;
1733 }
1734
1735 return 0;
1736}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001737
Richard Alped8182802014-11-24 11:10:29 +01001738static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001739{
1740 int i;
1741 struct nlattr *stats;
1742
1743 struct nla_map {
1744 u32 key;
1745 u32 val;
1746 };
1747
1748 struct nla_map map[] = {
1749 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1750 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1751 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1752 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1753 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1754 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1755 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1756 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1757 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1758 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1759 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1760 s->msg_length_counts : 1},
1761 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1762 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1763 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1764 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1765 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1766 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1767 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1768 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1769 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1770 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1771 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1772 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1773 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1774 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1775 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1776 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1777 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1778 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1779 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1780 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1781 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1782 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1783 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1784 };
1785
1786 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1787 if (!stats)
1788 return -EMSGSIZE;
1789
1790 for (i = 0; i < ARRAY_SIZE(map); i++)
1791 if (nla_put_u32(skb, map[i].key, map[i].val))
1792 goto msg_full;
1793
1794 nla_nest_end(skb, stats);
1795
1796 return 0;
1797msg_full:
1798 nla_nest_cancel(skb, stats);
1799
1800 return -EMSGSIZE;
1801}
1802
1803/* Caller should hold appropriate locks to protect the link */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001804int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1805 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001806{
1807 int err;
1808 void *hdr;
1809 struct nlattr *attrs;
1810 struct nlattr *prop;
Ying Xue34747532015-01-09 15:27:10 +08001811 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001812
Richard Alpebfb3e5d2015-02-09 09:50:03 +01001813 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001814 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001815 if (!hdr)
1816 return -EMSGSIZE;
1817
1818 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1819 if (!attrs)
1820 goto msg_full;
1821
1822 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1823 goto attr_msg_full;
1824 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
Ying Xue34747532015-01-09 15:27:10 +08001825 tipc_cluster_mask(tn->own_addr)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001826 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001827 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001828 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001829 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001830 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001831 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001832 goto attr_msg_full;
1833
1834 if (tipc_link_is_up(link))
1835 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1836 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001837 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001838 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1839 goto attr_msg_full;
1840
1841 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1842 if (!prop)
1843 goto attr_msg_full;
1844 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1845 goto prop_msg_full;
1846 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1847 goto prop_msg_full;
1848 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001849 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001850 goto prop_msg_full;
1851 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1852 goto prop_msg_full;
1853 nla_nest_end(msg->skb, prop);
1854
1855 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1856 if (err)
1857 goto attr_msg_full;
1858
1859 nla_nest_end(msg->skb, attrs);
1860 genlmsg_end(msg->skb, hdr);
1861
1862 return 0;
1863
1864prop_msg_full:
1865 nla_nest_cancel(msg->skb, prop);
1866attr_msg_full:
1867 nla_nest_cancel(msg->skb, attrs);
1868msg_full:
1869 genlmsg_cancel(msg->skb, hdr);
1870
1871 return -EMSGSIZE;
1872}
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001873
1874static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
1875 struct tipc_stats *stats)
1876{
1877 int i;
1878 struct nlattr *nest;
1879
1880 struct nla_map {
1881 __u32 key;
1882 __u32 val;
1883 };
1884
1885 struct nla_map map[] = {
1886 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
1887 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
1888 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
1889 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
1890 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
1891 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
1892 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
1893 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
1894 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
1895 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
1896 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
1897 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
1898 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
1899 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
1900 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
1901 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
1902 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
1903 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
1904 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
1905 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
1906 };
1907
1908 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1909 if (!nest)
1910 return -EMSGSIZE;
1911
1912 for (i = 0; i < ARRAY_SIZE(map); i++)
1913 if (nla_put_u32(skb, map[i].key, map[i].val))
1914 goto msg_full;
1915
1916 nla_nest_end(skb, nest);
1917
1918 return 0;
1919msg_full:
1920 nla_nest_cancel(skb, nest);
1921
1922 return -EMSGSIZE;
1923}
1924
1925int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1926{
1927 int err;
1928 void *hdr;
1929 struct nlattr *attrs;
1930 struct nlattr *prop;
1931 struct tipc_net *tn = net_generic(net, tipc_net_id);
1932 struct tipc_link *bcl = tn->bcl;
1933
1934 if (!bcl)
1935 return 0;
1936
1937 tipc_bcast_lock(net);
1938
1939 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1940 NLM_F_MULTI, TIPC_NL_LINK_GET);
Insu Yunb53ce3e2016-02-17 11:47:35 -05001941 if (!hdr) {
1942 tipc_bcast_unlock(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001943 return -EMSGSIZE;
Insu Yunb53ce3e2016-02-17 11:47:35 -05001944 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001945
1946 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1947 if (!attrs)
1948 goto msg_full;
1949
1950 /* The broadcast link is always up */
1951 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1952 goto attr_msg_full;
1953
1954 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
1955 goto attr_msg_full;
1956 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
1957 goto attr_msg_full;
1958 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
1959 goto attr_msg_full;
1960 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
1961 goto attr_msg_full;
1962
1963 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1964 if (!prop)
1965 goto attr_msg_full;
1966 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
1967 goto prop_msg_full;
1968 nla_nest_end(msg->skb, prop);
1969
1970 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
1971 if (err)
1972 goto attr_msg_full;
1973
1974 tipc_bcast_unlock(net);
1975 nla_nest_end(msg->skb, attrs);
1976 genlmsg_end(msg->skb, hdr);
1977
1978 return 0;
1979
1980prop_msg_full:
1981 nla_nest_cancel(msg->skb, prop);
1982attr_msg_full:
1983 nla_nest_cancel(msg->skb, attrs);
1984msg_full:
1985 tipc_bcast_unlock(net);
1986 genlmsg_cancel(msg->skb, hdr);
1987
1988 return -EMSGSIZE;
1989}
1990
Richard Alped01332f2016-02-01 08:19:56 +01001991void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
1992 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001993{
1994 l->tolerance = tol;
Richard Alped01332f2016-02-01 08:19:56 +01001995 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001996}
1997
Richard Alped01332f2016-02-01 08:19:56 +01001998void tipc_link_set_prio(struct tipc_link *l, u32 prio,
1999 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002000{
2001 l->priority = prio;
Richard Alped01332f2016-02-01 08:19:56 +01002002 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002003}
2004
2005void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2006{
2007 l->abort_limit = limit;
2008}