blob: ec4d283286528861f0822755e3df63899e868570 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloye74a3862016-03-03 14:23:21 -05004 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040045#include "monitor.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010046
Ying Xue796c75d2013-06-17 10:54:48 -040047#include <linux/pkt_sched.h>
48
Jon Paul Maloy38206d52015-11-19 14:30:46 -050049struct tipc_stats {
Jon Paul Maloy95901122016-11-25 10:35:02 -050050 u32 sent_pkts;
51 u32 recv_pkts;
Jon Paul Maloy38206d52015-11-19 14:30:46 -050052 u32 sent_states;
53 u32 recv_states;
54 u32 sent_probes;
55 u32 recv_probes;
56 u32 sent_nacks;
57 u32 recv_nacks;
58 u32 sent_acks;
59 u32 sent_bundled;
60 u32 sent_bundles;
61 u32 recv_bundled;
62 u32 recv_bundles;
63 u32 retransmitted;
64 u32 sent_fragmented;
65 u32 sent_fragments;
66 u32 recv_fragmented;
67 u32 recv_fragments;
68 u32 link_congs; /* # port sends blocked by congestion */
69 u32 deferred_recv;
70 u32 duplicates;
71 u32 max_queue_sz; /* send queue size high water mark */
72 u32 accu_queue_sz; /* used for send queue size profiling */
73 u32 queue_sz_counts; /* used for send queue size profiling */
74 u32 msg_length_counts; /* used for message length profiling */
75 u32 msg_lengths_total; /* used for message length profiling */
76 u32 msg_length_profile[7]; /* used for msg. length profiling */
77};
78
79/**
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
84 * @timer: link timer
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
Jon Paul Maloy38206d52015-11-19 14:30:46 -050091 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040099 * @mon_state: cookie with information needed by link monitor
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
Jon Maloya4dc70d2018-07-06 15:22:36 +0200109 * @stale_cnt: counter for number of identical retransmit attempts
110 * @stale_limit: time when repeated identical retransmits must force link reset
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500111 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages
114 * @deferred_queue: deferred queue saved OOS b'cast message received from node
115 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
116 * @inputq: buffer queue for messages to be delivered upwards
117 * @namedq: buffer queue for name table messages to be delivered upwards
118 * @next_out: ptr to first unsent outbound message in queue
119 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
120 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
121 * @reasm_buf: head of partially reassembled inbound message fragments
122 * @bc_rcvr: marks that this is a broadcast receiver link
123 * @stats: collects statistics regarding link activity
124 */
125struct tipc_link {
126 u32 addr;
127 char name[TIPC_MAX_LINK_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500128 struct net *net;
129
130 /* Management and link supervision data */
131 u32 peer_session;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500132 u32 session;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500133 u32 peer_bearer_id;
134 u32 bearer_id;
135 u32 tolerance;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500136 u32 abort_limit;
137 u32 state;
138 u16 peer_caps;
139 bool active;
140 u32 silent_intv_cnt;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500141 char if_name[TIPC_MAX_IF_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500142 u32 priority;
143 char net_plane;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400144 struct tipc_mon_state mon_state;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400145 u16 rst_cnt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500146
147 /* Failover/synch */
148 u16 drop_point;
149 struct sk_buff *failover_reasm_skb;
150
151 /* Max packet negotiation */
152 u16 mtu;
153 u16 advertised_mtu;
154
155 /* Sending */
156 struct sk_buff_head transmq;
157 struct sk_buff_head backlogq;
158 struct {
159 u16 len;
160 u16 limit;
161 } backlog[5];
162 u16 snd_nxt;
163 u16 last_retransm;
164 u16 window;
Jon Maloya4dc70d2018-07-06 15:22:36 +0200165 u16 stale_cnt;
166 unsigned long stale_limit;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500167
168 /* Reception */
169 u16 rcv_nxt;
170 u32 rcv_unacked;
171 struct sk_buff_head deferdq;
172 struct sk_buff_head *inputq;
173 struct sk_buff_head *namedq;
174
175 /* Congestion handling */
176 struct sk_buff_head wakeupq;
177
178 /* Fragmentation/reassembly */
179 struct sk_buff *reasm_buf;
180
181 /* Broadcast */
182 u16 ackers;
183 u16 acked;
184 struct tipc_link *bc_rcvlink;
185 struct tipc_link *bc_sndlink;
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400186 unsigned long prev_retr;
187 u16 prev_from;
188 u16 prev_to;
189 u8 nack_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500190 bool bc_peer_is_up;
191
192 /* Statistics */
193 struct tipc_stats stats;
194};
195
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400196/*
197 * Error message prefixes
198 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400199static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400200static const char *link_rst_msg = "Resetting link ";
Richard Alpe7be57fc2014-11-20 10:29:12 +0100201
Jon Paul Maloy52666982015-10-22 08:51:41 -0400202/* Send states for broadcast NACKs
203 */
204enum {
205 BC_NACK_SND_CONDITIONAL,
206 BC_NACK_SND_UNCONDITIONAL,
207 BC_NACK_SND_SUPPRESS,
208};
209
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400210#define TIPC_BC_RETR_LIMIT 10 /* [ms] */
211
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900212/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400213 * Interval between NACKs when packets arrive out of order
214 */
215#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500216
217/* Wildcard value for link session numbers. When it is known that
218 * peer endpoint is down, any session number must be accepted.
Allan Stephensa686e682008-06-04 17:29:39 -0700219 */
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500220#define ANY_SESSION 0x10000
Allan Stephensa686e682008-06-04 17:29:39 -0700221
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400222/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400223 */
224enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400225 LINK_ESTABLISHED = 0xe,
226 LINK_ESTABLISHING = 0xe << 4,
227 LINK_RESET = 0x1 << 8,
228 LINK_RESETTING = 0x2 << 12,
229 LINK_PEER_RESET = 0xd << 16,
230 LINK_FAILINGOVER = 0xf << 20,
231 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400232};
233
234/* Link FSM state checking routines
235 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400236static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400237{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400238 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400239}
240
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400241static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
242 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400243static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100244 bool probe_reply, u16 rcvgap,
245 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400246 struct sk_buff_head *xmitq);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500247static void link_print(struct tipc_link *l, const char *str);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -0400248static int tipc_link_build_nack_msg(struct tipc_link *l,
249 struct sk_buff_head *xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400250static void tipc_link_build_bc_init_msg(struct tipc_link *l,
251 struct sk_buff_head *xmitq);
252static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400253
Per Lidenb97bf3f2006-01-02 19:04:38 +0100254/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800255 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100256 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400257bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100258{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400259 return link_is_up(l);
260}
261
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400262bool tipc_link_peer_is_down(struct tipc_link *l)
263{
264 return l->state == LINK_PEER_RESET;
265}
266
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400267bool tipc_link_is_reset(struct tipc_link *l)
268{
269 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
270}
271
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400272bool tipc_link_is_establishing(struct tipc_link *l)
273{
274 return l->state == LINK_ESTABLISHING;
275}
276
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400277bool tipc_link_is_synching(struct tipc_link *l)
278{
279 return l->state == LINK_SYNCHING;
280}
281
282bool tipc_link_is_failingover(struct tipc_link *l)
283{
284 return l->state == LINK_FAILINGOVER;
285}
286
287bool tipc_link_is_blocked(struct tipc_link *l)
288{
289 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100290}
291
Wu Fengguang742e0382015-10-24 22:56:01 +0800292static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400293{
294 return !l->bc_sndlink;
295}
296
Wu Fengguang742e0382015-10-24 22:56:01 +0800297static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400298{
299 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
300}
301
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400302int tipc_link_is_active(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100303{
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400304 return l->active;
305}
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400306
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400307void tipc_link_set_active(struct tipc_link *l, bool active)
308{
309 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100310}
311
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500312u32 tipc_link_id(struct tipc_link *l)
313{
314 return l->peer_bearer_id << 16 | l->bearer_id;
315}
316
317int tipc_link_window(struct tipc_link *l)
318{
319 return l->window;
320}
321
322int tipc_link_prio(struct tipc_link *l)
323{
324 return l->priority;
325}
326
327unsigned long tipc_link_tolerance(struct tipc_link *l)
328{
329 return l->tolerance;
330}
331
332struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
333{
334 return l->inputq;
335}
336
337char tipc_link_plane(struct tipc_link *l)
338{
339 return l->net_plane;
340}
341
Jon Paul Maloy52666982015-10-22 08:51:41 -0400342void tipc_link_add_bc_peer(struct tipc_link *snd_l,
343 struct tipc_link *uc_l,
344 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400345{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400346 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
347
348 snd_l->ackers++;
349 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500350 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400351 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400352}
353
Jon Paul Maloy52666982015-10-22 08:51:41 -0400354void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
355 struct tipc_link *rcv_l,
356 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400357{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400358 u16 ack = snd_l->snd_nxt - 1;
359
360 snd_l->ackers--;
Jon Paul Maloya71eb722016-07-11 16:08:36 -0400361 rcv_l->bc_peer_is_up = true;
362 rcv_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400363 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
364 tipc_link_reset(rcv_l);
365 rcv_l->state = LINK_RESET;
366 if (!snd_l->ackers) {
367 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500368 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400369 __skb_queue_purge(xmitq);
370 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400371}
372
373int tipc_link_bc_peers(struct tipc_link *l)
374{
375 return l->ackers;
376}
377
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -0400378u16 link_bc_rcv_gap(struct tipc_link *l)
379{
380 struct sk_buff *skb = skb_peek(&l->deferdq);
381 u16 gap = 0;
382
383 if (more(l->snd_nxt, l->rcv_nxt))
384 gap = l->snd_nxt - l->rcv_nxt;
385 if (skb)
386 gap = buf_seqno(skb) - l->rcv_nxt;
387 return gap;
388}
389
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400390void tipc_link_set_mtu(struct tipc_link *l, int mtu)
391{
392 l->mtu = mtu;
393}
394
395int tipc_link_mtu(struct tipc_link *l)
396{
397 return l->mtu;
398}
399
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500400u16 tipc_link_rcv_nxt(struct tipc_link *l)
401{
402 return l->rcv_nxt;
403}
404
405u16 tipc_link_acked(struct tipc_link *l)
406{
407 return l->acked;
408}
409
410char *tipc_link_name(struct tipc_link *l)
411{
412 return l->name;
413}
414
Per Lidenb97bf3f2006-01-02 19:04:38 +0100415/**
Per Liden4323add2006-01-18 00:38:21 +0100416 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400417 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400418 * @if_name: associated interface name
419 * @bearer_id: id (index) of associated bearer
420 * @tolerance: link tolerance to be used by link
421 * @net_plane: network plane (A,B,c..) this link belongs to
422 * @mtu: mtu to be advertised by link
423 * @priority: priority to be used by link
424 * @window: send window to be used by link
425 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400426 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400427 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400428 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400429 * @bc_sndlink: the namespace global link used for broadcast sending
430 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400431 * @inputq: queue to put messages ready for delivery
432 * @namedq: queue to put binding table update messages ready for delivery
433 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900434 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400435 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100436 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400437bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400438 int tolerance, char net_plane, u32 mtu, int priority,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100439 int window, u32 session, u32 self,
440 u32 peer, u8 *peer_id, u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400441 struct tipc_link *bc_sndlink,
442 struct tipc_link *bc_rcvlink,
443 struct sk_buff_head *inputq,
444 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400445 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100446{
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100447 char peer_str[NODE_ID_STR_LEN] = {0,};
448 char self_str[NODE_ID_STR_LEN] = {0,};
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400449 struct tipc_link *l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500450
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400451 l = kzalloc(sizeof(*l), GFP_ATOMIC);
452 if (!l)
453 return false;
454 *link = l;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500455 l->session = session;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400456
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100457 /* Set link name for unicast links only */
458 if (peer_id) {
459 tipc_nodeid2string(self_str, tipc_own_id(net));
460 if (strlen(self_str) > 16)
461 sprintf(self_str, "%x", self);
462 tipc_nodeid2string(peer_str, peer_id);
463 if (strlen(peer_str) > 16)
464 sprintf(peer_str, "%x", peer);
465 }
466 /* Peer i/f name will be completed by reset/activate message */
Jon Maloy7494cfa2018-03-29 23:20:45 +0200467 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
468 self_str, if_name, peer_str);
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100469
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500470 strcpy(l->if_name, if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400471 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400472 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400473 l->net = net;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500474 l->peer_session = ANY_SESSION;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400475 l->bearer_id = bearer_id;
476 l->tolerance = tolerance;
477 l->net_plane = net_plane;
478 l->advertised_mtu = mtu;
479 l->mtu = mtu;
480 l->priority = priority;
481 tipc_link_set_queue_limits(l, window);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400482 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400483 l->bc_sndlink = bc_sndlink;
484 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400485 l->inputq = inputq;
486 l->namedq = namedq;
487 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400488 __skb_queue_head_init(&l->transmq);
489 __skb_queue_head_init(&l->backlogq);
490 __skb_queue_head_init(&l->deferdq);
491 skb_queue_head_init(&l->wakeupq);
492 skb_queue_head_init(l->inputq);
493 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100494}
495
Jon Paul Maloy32301902015-10-22 08:51:37 -0400496/**
497 * tipc_link_bc_create - create new link to be used for broadcast
498 * @n: pointer to associated node
Jon Maloy4c94cc22017-11-30 16:47:25 +0100499 * @mtu: mtu to be used initially if no peers
Jon Paul Maloy32301902015-10-22 08:51:37 -0400500 * @window: send window to be used
501 * @inputq: queue to put messages ready for delivery
502 * @namedq: queue to put binding table update messages ready for delivery
503 * @link: return value, pointer to put the created link
504 *
505 * Returns true if link was created, otherwise false
506 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400507bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400508 int mtu, int window, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400509 struct sk_buff_head *inputq,
510 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400511 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400512 struct tipc_link **link)
513{
514 struct tipc_link *l;
515
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400516 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100517 0, ownnode, peer, NULL, peer_caps, bc_sndlink,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400518 NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400519 return false;
520
521 l = *link;
522 strcpy(l->name, tipc_bclink_name);
523 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400524 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400525 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400526 l->bc_rcvlink = l;
527
528 /* Broadcast send link is always up */
529 if (link_is_bc_sndlink(l))
530 l->state = LINK_ESTABLISHED;
531
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500532 /* Disable replicast if even a single peer doesn't support it */
533 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
534 tipc_bcast_disable_rcast(net);
535
Jon Paul Maloy32301902015-10-22 08:51:37 -0400536 return true;
537}
538
Per Lidenb97bf3f2006-01-02 19:04:38 +0100539/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400540 * tipc_link_fsm_evt - link finite state machine
541 * @l: pointer to link
542 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400543 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400544int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400545{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400546 int rc = 0;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400547
548 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400549 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400550 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400551 case LINK_PEER_RESET_EVT:
552 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400553 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400554 case LINK_RESET_EVT:
555 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400556 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400557 case LINK_FAILURE_EVT:
558 case LINK_FAILOVER_BEGIN_EVT:
559 case LINK_ESTABLISH_EVT:
560 case LINK_FAILOVER_END_EVT:
561 case LINK_SYNCH_BEGIN_EVT:
562 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400563 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400564 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400565 }
566 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400567 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400568 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400569 case LINK_PEER_RESET_EVT:
570 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400571 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400572 case LINK_FAILOVER_BEGIN_EVT:
573 l->state = LINK_FAILINGOVER;
574 case LINK_FAILURE_EVT:
575 case LINK_RESET_EVT:
576 case LINK_ESTABLISH_EVT:
577 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400578 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400579 case LINK_SYNCH_BEGIN_EVT:
580 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400581 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400582 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400583 }
584 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400585 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400586 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400587 case LINK_RESET_EVT:
588 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400589 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400590 case LINK_PEER_RESET_EVT:
591 case LINK_ESTABLISH_EVT:
592 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400593 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400594 case LINK_SYNCH_BEGIN_EVT:
595 case LINK_SYNCH_END_EVT:
596 case LINK_FAILOVER_BEGIN_EVT:
597 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400598 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400599 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400600 }
601 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400602 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400603 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400604 case LINK_FAILOVER_END_EVT:
605 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400606 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400607 case LINK_PEER_RESET_EVT:
608 case LINK_RESET_EVT:
609 case LINK_ESTABLISH_EVT:
610 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400611 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400612 case LINK_FAILOVER_BEGIN_EVT:
613 case LINK_SYNCH_BEGIN_EVT:
614 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400615 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400616 goto illegal_evt;
617 }
618 break;
619 case LINK_ESTABLISHING:
620 switch (evt) {
621 case LINK_ESTABLISH_EVT:
622 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400623 break;
624 case LINK_FAILOVER_BEGIN_EVT:
625 l->state = LINK_FAILINGOVER;
626 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400627 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400628 l->state = LINK_RESET;
629 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400630 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400631 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400632 case LINK_SYNCH_BEGIN_EVT:
633 case LINK_FAILOVER_END_EVT:
634 break;
635 case LINK_SYNCH_END_EVT:
636 default:
637 goto illegal_evt;
638 }
639 break;
640 case LINK_ESTABLISHED:
641 switch (evt) {
642 case LINK_PEER_RESET_EVT:
643 l->state = LINK_PEER_RESET;
644 rc |= TIPC_LINK_DOWN_EVT;
645 break;
646 case LINK_FAILURE_EVT:
647 l->state = LINK_RESETTING;
648 rc |= TIPC_LINK_DOWN_EVT;
649 break;
650 case LINK_RESET_EVT:
651 l->state = LINK_RESET;
652 break;
653 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400654 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400655 break;
656 case LINK_SYNCH_BEGIN_EVT:
657 l->state = LINK_SYNCHING;
658 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400659 case LINK_FAILOVER_BEGIN_EVT:
660 case LINK_FAILOVER_END_EVT:
661 default:
662 goto illegal_evt;
663 }
664 break;
665 case LINK_SYNCHING:
666 switch (evt) {
667 case LINK_PEER_RESET_EVT:
668 l->state = LINK_PEER_RESET;
669 rc |= TIPC_LINK_DOWN_EVT;
670 break;
671 case LINK_FAILURE_EVT:
672 l->state = LINK_RESETTING;
673 rc |= TIPC_LINK_DOWN_EVT;
674 break;
675 case LINK_RESET_EVT:
676 l->state = LINK_RESET;
677 break;
678 case LINK_ESTABLISH_EVT:
679 case LINK_SYNCH_BEGIN_EVT:
680 break;
681 case LINK_SYNCH_END_EVT:
682 l->state = LINK_ESTABLISHED;
683 break;
684 case LINK_FAILOVER_BEGIN_EVT:
685 case LINK_FAILOVER_END_EVT:
686 default:
687 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400688 }
689 break;
690 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400691 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400692 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400693 return rc;
694illegal_evt:
695 pr_err("Illegal FSM event %x in state %x on link %s\n",
696 evt, l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400697 return rc;
698}
699
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400700/* link_profile_stats - update statistical profiling of traffic
701 */
702static void link_profile_stats(struct tipc_link *l)
703{
704 struct sk_buff *skb;
705 struct tipc_msg *msg;
706 int length;
707
708 /* Update counters used in statistical profiling of send traffic */
709 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
710 l->stats.queue_sz_counts++;
711
712 skb = skb_peek(&l->transmq);
713 if (!skb)
714 return;
715 msg = buf_msg(skb);
716 length = msg_size(msg);
717
718 if (msg_user(msg) == MSG_FRAGMENTER) {
719 if (msg_type(msg) != FIRST_FRAGMENT)
720 return;
721 length = msg_size(msg_get_wrapped(msg));
722 }
723 l->stats.msg_lengths_total += length;
724 l->stats.msg_length_counts++;
725 if (length <= 64)
726 l->stats.msg_length_profile[0]++;
727 else if (length <= 256)
728 l->stats.msg_length_profile[1]++;
729 else if (length <= 1024)
730 l->stats.msg_length_profile[2]++;
731 else if (length <= 4096)
732 l->stats.msg_length_profile[3]++;
733 else if (length <= 16384)
734 l->stats.msg_length_profile[4]++;
735 else if (length <= 32768)
736 l->stats.msg_length_profile[5]++;
737 else
738 l->stats.msg_length_profile[6]++;
739}
740
741/* tipc_link_timeout - perform periodic task as instructed from node timeout
742 */
743int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
744{
Ying Xuec91522f2016-06-15 14:11:31 +0800745 int mtyp = 0;
746 int rc = 0;
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400747 bool state = false;
748 bool probe = false;
749 bool setup = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400750 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
751 u16 bc_acked = l->bc_rcvlink->acked;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400752 struct tipc_mon_state *mstate = &l->mon_state;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400753
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400754 switch (l->state) {
755 case LINK_ESTABLISHED:
756 case LINK_SYNCHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400757 mtyp = STATE_MSG;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400758 link_profile_stats(l);
759 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
760 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
761 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400762 state = bc_acked != bc_snt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400763 state |= l->bc_rcvlink->rcv_unacked;
764 state |= l->rcv_unacked;
765 state |= !skb_queue_empty(&l->transmq);
766 state |= !skb_queue_empty(&l->deferdq);
767 probe = mstate->probing;
768 probe |= l->silent_intv_cnt;
769 if (probe || mstate->monitoring)
770 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400771 break;
772 case LINK_RESET:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400773 setup = l->rst_cnt++ <= 4;
774 setup |= !(l->rst_cnt % 16);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400775 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400776 break;
777 case LINK_ESTABLISHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400778 setup = true;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400779 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400780 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400781 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400782 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400783 case LINK_FAILINGOVER:
784 break;
785 default:
786 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400787 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400788
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400789 if (state || probe || setup)
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100790 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400791
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400792 return rc;
793}
794
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400795/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400796 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500797 * @l: congested link
798 * @hdr: header of message that is being sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400799 * Create pseudo msg to send back to user when congestion abates
Per Lidenb97bf3f2006-01-02 19:04:38 +0100800 */
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500801static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100802{
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500803 u32 dnode = tipc_own_addr(l->net);
804 u32 dport = msg_origport(hdr);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400805 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100806
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400807 /* Create and schedule wakeup pseudo message */
808 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500809 dnode, l->addr, dport, 0, 0);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400810 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400811 return -ENOBUFS;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500812 msg_set_dest_droppable(buf_msg(skb), true);
813 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
814 skb_queue_tail(&l->wakeupq, skb);
815 l->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400816 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100817}
818
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400819/**
820 * link_prepare_wakeup - prepare users for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500821 * @l: congested link
822 * Wake up a number of waiting users, as permitted by available space
823 * in the send queue
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400824 */
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400825void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100826{
Ying Xue58d78b32014-11-26 11:41:51 +0800827 struct sk_buff *skb, *tmp;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500828 int imp, i = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100829
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400830 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
831 imp = TIPC_SKB_CB(skb)->chain_imp;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500832 if (l->backlog[imp].len < l->backlog[imp].limit) {
833 skb_unlink(skb, &l->wakeupq);
834 skb_queue_tail(l->inputq, skb);
835 } else if (i++ > 10) {
Per Lidenb97bf3f2006-01-02 19:04:38 +0100836 break;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500837 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100838 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100839}
840
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400841void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100842{
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500843 l->peer_session = ANY_SESSION;
844 l->session++;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400845 l->mtu = l->advertised_mtu;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400846 __skb_queue_purge(&l->transmq);
847 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy23d83352015-07-30 18:24:24 -0400848 skb_queue_splice_init(&l->wakeupq, l->inputq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400849 __skb_queue_purge(&l->backlogq);
850 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
851 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
852 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
853 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
854 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400855 kfree_skb(l->reasm_buf);
856 kfree_skb(l->failover_reasm_skb);
857 l->reasm_buf = NULL;
858 l->failover_reasm_skb = NULL;
859 l->rcv_unacked = 0;
860 l->snd_nxt = 1;
861 l->rcv_nxt = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400862 l->acked = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400863 l->silent_intv_cnt = 0;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400864 l->rst_cnt = 0;
Jon Maloya4dc70d2018-07-06 15:22:36 +0200865 l->stale_cnt = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400866 l->bc_peer_is_up = false;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400867 memset(&l->mon_state, 0, sizeof(l->mon_state));
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500868 tipc_link_reset_stats(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100869}
870
Per Lidenb97bf3f2006-01-02 19:04:38 +0100871/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400872 * tipc_link_xmit(): enqueue buffer list according to queue situation
873 * @link: link to use
874 * @list: chain of buffers containing message
875 * @xmitq: returned list of packets to be sent by caller
876 *
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500877 * Consumes the buffer chain.
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400878 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
879 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
880 */
881int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
882 struct sk_buff_head *xmitq)
883{
884 struct tipc_msg *hdr = buf_msg(skb_peek(list));
885 unsigned int maxwin = l->window;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500886 int imp = msg_importance(hdr);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400887 unsigned int mtu = l->mtu;
888 u16 ack = l->rcv_nxt - 1;
889 u16 seqno = l->snd_nxt;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400890 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400891 struct sk_buff_head *transmq = &l->transmq;
892 struct sk_buff_head *backlogq = &l->backlogq;
893 struct sk_buff *skb, *_skb, *bskb;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500894 int pkt_cnt = skb_queue_len(list);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500895 int rc = 0;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400896
Richard Alpe4952cd32016-02-11 10:43:15 +0100897 if (unlikely(msg_size(hdr) > mtu)) {
898 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400899 return -EMSGSIZE;
Richard Alpe4952cd32016-02-11 10:43:15 +0100900 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400901
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500902 /* Allow oversubscription of one data msg per source at congestion */
903 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
904 if (imp == TIPC_SYSTEM_IMPORTANCE) {
905 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
906 return -ENOBUFS;
907 }
908 rc = link_schedule_user(l, hdr);
909 }
910
Jon Paul Maloy95901122016-11-25 10:35:02 -0500911 if (pkt_cnt > 1) {
912 l->stats.sent_fragmented++;
913 l->stats.sent_fragments += pkt_cnt;
914 }
915
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400916 /* Prepare each packet for sending, and add to relevant queue: */
917 while (skb_queue_len(list)) {
918 skb = skb_peek(list);
919 hdr = buf_msg(skb);
920 msg_set_seqno(hdr, seqno);
921 msg_set_ack(hdr, ack);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400922 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400923
924 if (likely(skb_queue_len(transmq) < maxwin)) {
925 _skb = skb_clone(skb, GFP_ATOMIC);
Richard Alpe4952cd32016-02-11 10:43:15 +0100926 if (!_skb) {
927 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400928 return -ENOBUFS;
Richard Alpe4952cd32016-02-11 10:43:15 +0100929 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400930 __skb_dequeue(list);
931 __skb_queue_tail(transmq, skb);
932 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400933 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400934 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500935 l->stats.sent_pkts++;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400936 seqno++;
937 continue;
938 }
939 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
940 kfree_skb(__skb_dequeue(list));
941 l->stats.sent_bundled++;
942 continue;
943 }
944 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
945 kfree_skb(__skb_dequeue(list));
946 __skb_queue_tail(backlogq, bskb);
947 l->backlog[msg_importance(buf_msg(bskb))].len++;
948 l->stats.sent_bundled++;
949 l->stats.sent_bundles++;
950 continue;
951 }
952 l->backlog[imp].len += skb_queue_len(list);
953 skb_queue_splice_tail_init(list, backlogq);
954 }
955 l->snd_nxt = seqno;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500956 return rc;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400957}
958
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400959void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
960{
961 struct sk_buff *skb, *_skb;
962 struct tipc_msg *hdr;
963 u16 seqno = l->snd_nxt;
964 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400965 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400966
967 while (skb_queue_len(&l->transmq) < l->window) {
968 skb = skb_peek(&l->backlogq);
969 if (!skb)
970 break;
971 _skb = skb_clone(skb, GFP_ATOMIC);
972 if (!_skb)
973 break;
974 __skb_dequeue(&l->backlogq);
975 hdr = buf_msg(skb);
976 l->backlog[msg_importance(hdr)].len--;
977 __skb_queue_tail(&l->transmq, skb);
978 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400979 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400980 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400981 msg_set_ack(hdr, ack);
982 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400983 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500984 l->stats.sent_pkts++;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400985 seqno++;
986 }
987 l->snd_nxt = seqno;
988}
989
Jon Paul Maloy52666982015-10-22 08:51:41 -0400990static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700991{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400992 struct tipc_msg *hdr = buf_msg(skb);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700993
Jon Paul Maloy52666982015-10-22 08:51:41 -0400994 pr_warn("Retransmission failure on link <%s>\n", l->name);
Jon Paul Maloy40501f92017-08-21 17:59:30 +0200995 link_print(l, "State of link ");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400996 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
997 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
998 pr_info("sqno %u, prev: %x, src: %x\n",
999 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
Allan Stephensd356eeb2006-06-25 23:40:01 -07001000}
1001
Jon Maloya4dc70d2018-07-06 15:22:36 +02001002/* tipc_link_retrans() - retransmit one or more packets
1003 * @l: the link to transmit on
1004 * @r: the receiving link ordering the retransmit. Same as l if unicast
1005 * @from: retransmit from (inclusive) this sequence number
1006 * @to: retransmit to (inclusive) this sequence number
1007 * xmitq: queue for accumulating the retransmitted packets
1008 */
1009int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001010 u16 from, u16 to, struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001011{
1012 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001013 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Maloya4dc70d2018-07-06 15:22:36 +02001014 u16 ack = l->rcv_nxt - 1;
1015 struct tipc_msg *hdr;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001016
1017 if (!skb)
1018 return 0;
1019
1020 /* Detect repeated retransmit failures on same packet */
Jon Maloya4dc70d2018-07-06 15:22:36 +02001021 if (r->last_retransm != buf_seqno(skb)) {
1022 r->last_retransm = buf_seqno(skb);
1023 r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance);
1024 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001025 link_retransmit_failure(l, skb);
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001026 if (link_is_bc_sndlink(l))
1027 return TIPC_LINK_DOWN_EVT;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001028 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001029 }
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001030
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001031 skb_queue_walk(&l->transmq, skb) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001032 hdr = buf_msg(skb);
Jon Maloya4dc70d2018-07-06 15:22:36 +02001033 if (less(msg_seqno(hdr), from))
1034 continue;
1035 if (more(msg_seqno(hdr), to))
1036 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001037 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1038 if (!_skb)
1039 return 0;
1040 hdr = buf_msg(_skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001041 msg_set_ack(hdr, ack);
1042 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001043 _skb->priority = TC_PRIO_CONTROL;
1044 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001045 l->stats.retransmitted++;
1046 }
1047 return 0;
1048}
1049
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001050/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001051 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001052 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001053 * Node lock must be held
1054 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001055static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001056 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001057{
Jon Maloy399574d2017-10-13 11:04:32 +02001058 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001059 struct tipc_msg *hdr = buf_msg(skb);
1060
1061 switch (msg_user(hdr)) {
Erik Hugne7ae934b2014-07-01 10:22:40 +02001062 case TIPC_LOW_IMPORTANCE:
1063 case TIPC_MEDIUM_IMPORTANCE:
1064 case TIPC_HIGH_IMPORTANCE:
1065 case TIPC_CRITICAL_IMPORTANCE:
Jon Maloy2f487712017-10-13 11:04:31 +02001066 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
Jon Maloy399574d2017-10-13 11:04:32 +02001067 skb_queue_tail(mc_inputq, skb);
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001068 return true;
1069 }
Gustavo A. R. Silvac53e0c72018-07-04 16:13:59 -05001070 /* else: fall through */
Jon Maloy2f487712017-10-13 11:04:31 +02001071 case CONN_MANAGER:
Jon Maloy36c0a9d2017-10-16 16:04:51 +02001072 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001073 return true;
Jon Maloy399574d2017-10-13 11:04:32 +02001074 case GROUP_PROTOCOL:
1075 skb_queue_tail(mc_inputq, skb);
1076 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001077 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -04001078 l->bc_rcvlink->state = LINK_ESTABLISHED;
1079 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001080 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001081 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001082 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001083 case MSG_FRAGMENTER:
1084 case BCAST_PROTOCOL:
1085 return false;
1086 default:
1087 pr_warn("Dropping received illegal msg type\n");
1088 kfree_skb(skb);
1089 return false;
1090 };
1091}
1092
1093/* tipc_link_input - process packet that has passed link protocol check
1094 *
1095 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001096 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001097static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1098 struct sk_buff_head *inputq)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001099{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001100 struct tipc_msg *hdr = buf_msg(skb);
1101 struct sk_buff **reasm_skb = &l->reasm_buf;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001102 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001103 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001104 int usr = msg_user(hdr);
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001105 int rc = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001106 int pos = 0;
1107 int ipos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001108
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001109 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1110 if (msg_type(hdr) == SYNCH_MSG) {
1111 __skb_queue_purge(&l->deferdq);
1112 goto drop;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001113 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001114 if (!tipc_msg_extract(skb, &iskb, &ipos))
1115 return rc;
1116 kfree_skb(skb);
1117 skb = iskb;
1118 hdr = buf_msg(skb);
1119 if (less(msg_seqno(hdr), l->drop_point))
1120 goto drop;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001121 if (tipc_data_input(l, skb, inputq))
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001122 return rc;
1123 usr = msg_user(hdr);
1124 reasm_skb = &l->failover_reasm_skb;
1125 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001126
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001127 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001128 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001129 l->stats.recv_bundles++;
1130 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001131 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001132 tipc_data_input(l, iskb, &tmpq);
1133 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001134 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001135 } else if (usr == MSG_FRAGMENTER) {
1136 l->stats.recv_fragments++;
1137 if (tipc_buf_append(reasm_skb, &skb)) {
1138 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001139 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001140 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1141 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001142 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001143 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001144 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001145 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001146 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001147 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001148 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001149 }
1150drop:
1151 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001152 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001153}
1154
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001155static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1156{
1157 bool released = false;
1158 struct sk_buff *skb, *tmp;
1159
1160 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1161 if (more(buf_seqno(skb), acked))
1162 break;
1163 __skb_unlink(skb, &l->transmq);
1164 kfree_skb(skb);
1165 released = true;
1166 }
1167 return released;
1168}
1169
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001170/* tipc_link_build_state_msg: prepare link state message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -04001171 *
1172 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1173 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001174 */
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001175int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001176{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001177 if (!l)
1178 return 0;
1179
1180 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1181 if (link_is_bc_rcvlink(l)) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001182 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001183 return 0;
1184 l->rcv_unacked = 0;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001185
1186 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1187 l->snd_nxt = l->rcv_nxt;
1188 return TIPC_LINK_SND_STATE;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001189 }
1190
1191 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001192 l->rcv_unacked = 0;
1193 l->stats.sent_acks++;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001194 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001195 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001196}
1197
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001198/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1199 */
1200void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1201{
1202 int mtyp = RESET_MSG;
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001203 struct sk_buff *skb;
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001204
1205 if (l->state == LINK_ESTABLISHING)
1206 mtyp = ACTIVATE_MSG;
1207
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001208 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001209
1210 /* Inform peer that this endpoint is going down if applicable */
1211 skb = skb_peek_tail(xmitq);
1212 if (skb && (l->state == LINK_RESET))
1213 msg_set_peer_stopping(buf_msg(skb), 1);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001214}
1215
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001216/* tipc_link_build_nack_msg: prepare link nack message for transmission
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001217 * Note that sending of broadcast NACK is coordinated among nodes, to
1218 * reduce the risk of NACK storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001219 */
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001220static int tipc_link_build_nack_msg(struct tipc_link *l,
1221 struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001222{
1223 u32 def_cnt = ++l->stats.deferred_recv;
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001224 int match1, match2;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001225
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001226 if (link_is_bc_rcvlink(l)) {
1227 match1 = def_cnt & 0xf;
1228 match2 = tipc_own_addr(l->net) & 0xf;
1229 if (match1 == match2)
1230 return TIPC_LINK_SND_STATE;
1231 return 0;
1232 }
Jon Paul Maloy52666982015-10-22 08:51:41 -04001233
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001234 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001235 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001236 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001237}
1238
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001239/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001240 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001241 * @skb: TIPC packet
1242 * @xmitq: queue to place packets to be sent after this call
1243 */
1244int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1245 struct sk_buff_head *xmitq)
1246{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001247 struct sk_buff_head *defq = &l->deferdq;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001248 struct tipc_msg *hdr;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001249 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001250 int rc = 0;
1251
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001252 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001253 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001254 seqno = msg_seqno(hdr);
1255 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001256 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001257
1258 /* Verify and update link state */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001259 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1260 return tipc_link_proto_rcv(l, skb, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001261
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001262 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001263 if (l->state == LINK_ESTABLISHING)
1264 rc = TIPC_LINK_UP_EVT;
1265 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001266 }
1267
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001268 /* Don't send probe at next timeout expiration */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001269 l->silent_intv_cnt = 0;
1270
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001271 /* Drop if outside receive window */
1272 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1273 l->stats.duplicates++;
1274 goto drop;
1275 }
1276
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001277 /* Forward queues and wake up waiting users */
1278 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
Jon Maloya4dc70d2018-07-06 15:22:36 +02001279 l->stale_cnt = 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001280 tipc_link_advance_backlog(l, xmitq);
1281 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1282 link_prepare_wakeup(l);
1283 }
1284
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001285 /* Defer delivery if sequence gap */
1286 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001287 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001288 rc |= tipc_link_build_nack_msg(l, xmitq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001289 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001290 }
1291
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001292 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001293 l->rcv_nxt++;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001294 l->stats.recv_pkts++;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001295 if (!tipc_data_input(l, skb, l->inputq))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001296 rc |= tipc_link_input(l, skb, l->inputq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001297 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001298 rc |= tipc_link_build_state_msg(l, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001299 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001300 break;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001301 } while ((skb = __skb_dequeue(defq)));
1302
1303 return rc;
1304drop:
1305 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001306 return rc;
1307}
1308
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001309static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001310 bool probe_reply, u16 rcvgap,
1311 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001312 struct sk_buff_head *xmitq)
1313{
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001314 struct tipc_link *bcl = l->bc_rcvlink;
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001315 struct sk_buff *skb;
1316 struct tipc_msg *hdr;
1317 struct sk_buff_head *dfq = &l->deferdq;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001318 bool node_up = link_is_up(bcl);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001319 struct tipc_mon_state *mstate = &l->mon_state;
1320 int dlen = 0;
1321 void *data;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001322
1323 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001324 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001325 return;
1326
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001327 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1328 return;
1329
1330 if (!skb_queue_empty(dfq))
1331 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1332
1333 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001334 tipc_max_domain_size, l->addr,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001335 tipc_own_addr(l->net), 0, 0, 0);
1336 if (!skb)
1337 return;
1338
1339 hdr = buf_msg(skb);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001340 data = msg_data(hdr);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001341 msg_set_session(hdr, l->session);
1342 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001343 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001344 msg_set_next_sent(hdr, l->snd_nxt);
1345 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001346 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001347 msg_set_bc_ack_invalid(hdr, !node_up);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001348 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001349 msg_set_link_tolerance(hdr, tolerance);
1350 msg_set_linkprio(hdr, priority);
1351 msg_set_redundant_link(hdr, node_up);
1352 msg_set_seq_gap(hdr, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001353 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001354
1355 if (mtyp == STATE_MSG) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001356 msg_set_seq_gap(hdr, rcvgap);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001357 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001358 msg_set_probe(hdr, probe);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001359 msg_set_is_keepalive(hdr, probe || probe_reply);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001360 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1361 msg_set_size(hdr, INT_H_SIZE + dlen);
1362 skb_trim(skb, INT_H_SIZE + dlen);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001363 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001364 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001365 } else {
1366 /* RESET_MSG or ACTIVATE_MSG */
1367 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001368 strcpy(data, l->if_name);
1369 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1370 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001371 }
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001372 if (probe)
1373 l->stats.sent_probes++;
1374 if (rcvgap)
1375 l->stats.sent_nacks++;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001376 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001377 __skb_queue_tail(xmitq, skb);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001378}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001379
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001380/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001381 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001382 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001383void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1384 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001385{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001386 struct sk_buff *skb, *tnlskb;
1387 struct tipc_msg *hdr, tnlhdr;
1388 struct sk_buff_head *queue = &l->transmq;
1389 struct sk_buff_head tmpxq, tnlq;
1390 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001391
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001392 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001393 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001394
1395 skb_queue_head_init(&tnlq);
1396 skb_queue_head_init(&tmpxq);
1397
1398 /* At least one packet required for safe algorithm => add dummy */
1399 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001400 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001401 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001402 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001403 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001404 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001405 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001406 skb_queue_tail(&tnlq, skb);
1407 tipc_link_xmit(l, &tnlq, &tmpxq);
1408 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001409
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001410 /* Initialize reusable tunnel packet header */
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001411 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001412 mtyp, INT_H_SIZE, l->addr);
1413 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1414 msg_set_msgcnt(&tnlhdr, pktcnt);
1415 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1416tnl:
1417 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001418 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001419 hdr = buf_msg(skb);
1420 if (queue == &l->backlogq)
1421 msg_set_seqno(hdr, seqno++);
1422 pktlen = msg_size(hdr);
1423 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
Parthasarathy Bhuvaragan57d5f642017-01-13 15:46:25 +01001424 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001425 if (!tnlskb) {
1426 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001427 return;
1428 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001429 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1430 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1431 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001432 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001433 if (queue != &l->backlogq) {
1434 queue = &l->backlogq;
1435 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001436 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001437
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001438 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001439
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001440 if (mtyp == FAILOVER_MSG) {
1441 tnl->drop_point = l->rcv_nxt;
1442 tnl->failover_reasm_skb = l->reasm_buf;
1443 l->reasm_buf = NULL;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001444 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001445}
1446
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001447/* tipc_link_proto_rcv(): receive link level protocol message :
1448 * Note that network plane id propagates through the network, and may
1449 * change at any time. The node with lowest numerical id determines
1450 * network plane
1451 */
1452static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1453 struct sk_buff_head *xmitq)
1454{
1455 struct tipc_msg *hdr = buf_msg(skb);
1456 u16 rcvgap = 0;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001457 u16 ack = msg_ack(hdr);
1458 u16 gap = msg_seq_gap(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001459 u16 peers_snd_nxt = msg_next_sent(hdr);
1460 u16 peers_tol = msg_link_tolerance(hdr);
1461 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001462 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001463 u16 dlen = msg_data_sz(hdr);
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001464 int mtyp = msg_type(hdr);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001465 bool reply = msg_probe(hdr);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001466 void *data;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001467 char *if_name;
1468 int rc = 0;
1469
Jon Paul Maloy52666982015-10-22 08:51:41 -04001470 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001471 goto exit;
1472
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001473 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001474 l->net_plane = msg_net_plane(hdr);
1475
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001476 skb_linearize(skb);
1477 hdr = buf_msg(skb);
1478 data = msg_data(hdr);
1479
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001480 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001481 case RESET_MSG:
1482
1483 /* Ignore duplicate RESET with old session number */
1484 if ((less_eq(msg_session(hdr), l->peer_session)) &&
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001485 (l->peer_session != ANY_SESSION))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001486 break;
1487 /* fall thru' */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001488
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001489 case ACTIVATE_MSG:
1490
1491 /* Complete own link name with peer's interface name */
1492 if_name = strrchr(l->name, ':') + 1;
1493 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1494 break;
1495 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1496 break;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001497 strncpy(if_name, data, TIPC_MAX_IF_NAME);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001498
1499 /* Update own tolerance if peer indicates a non-zero value */
1500 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1501 l->tolerance = peers_tol;
1502
1503 /* Update own priority if peer's priority is higher */
1504 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1505 l->priority = peers_prio;
1506
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001507 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001508 if (msg_peer_stopping(hdr))
1509 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1510 else if ((mtyp == RESET_MSG) || !link_is_up(l))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001511 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1512
1513 /* ACTIVATE_MSG takes up link if it was already locally reset */
1514 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1515 rc = TIPC_LINK_UP_EVT;
1516
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001517 l->peer_session = msg_session(hdr);
1518 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001519 if (l->mtu > msg_max_pkt(hdr))
1520 l->mtu = msg_max_pkt(hdr);
1521 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001522
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001523 case STATE_MSG:
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001524
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001525 /* Update own tolerance if peer indicates a non-zero value */
1526 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1527 l->tolerance = peers_tol;
1528
Jon Paul Maloyf7967552016-11-23 21:05:26 -05001529 /* Update own prio if peer indicates a different value */
1530 if ((peers_prio != l->priority) &&
1531 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
Richard Alpe81729812016-02-01 08:19:57 +01001532 l->priority = peers_prio;
1533 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1534 }
1535
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001536 l->silent_intv_cnt = 0;
1537 l->stats.recv_states++;
1538 if (msg_probe(hdr))
1539 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001540
1541 if (!link_is_up(l)) {
1542 if (l->state == LINK_ESTABLISHING)
1543 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001544 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001545 }
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001546 tipc_mon_rcv(l->net, data, dlen, l->addr,
1547 &l->mon_state, l->bearer_id);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001548
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001549 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001550 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001551 rcvgap = peers_snd_nxt - l->rcv_nxt;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001552 if (rcvgap || reply)
1553 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1554 rcvgap, 0, 0, xmitq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001555 tipc_link_release_pkts(l, ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001556
1557 /* If NACK, retransmit will now start at right position */
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001558 if (gap) {
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001559 rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001560 l->stats.recv_nacks++;
1561 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001562
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001563 tipc_link_advance_backlog(l, xmitq);
1564 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1565 link_prepare_wakeup(l);
1566 }
1567exit:
1568 kfree_skb(skb);
1569 return rc;
1570}
1571
Jon Paul Maloy52666982015-10-22 08:51:41 -04001572/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1573 */
1574static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1575 u16 peers_snd_nxt,
1576 struct sk_buff_head *xmitq)
1577{
1578 struct sk_buff *skb;
1579 struct tipc_msg *hdr;
1580 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1581 u16 ack = l->rcv_nxt - 1;
1582 u16 gap_to = peers_snd_nxt - 1;
1583
1584 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001585 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001586 if (!skb)
1587 return false;
1588 hdr = buf_msg(skb);
1589 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1590 msg_set_bcast_ack(hdr, ack);
1591 msg_set_bcgap_after(hdr, ack);
1592 if (dfrd_skb)
1593 gap_to = buf_seqno(dfrd_skb) - 1;
1594 msg_set_bcgap_to(hdr, gap_to);
1595 msg_set_non_seq(hdr, bcast);
1596 __skb_queue_tail(xmitq, skb);
1597 return true;
1598}
1599
1600/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1601 *
1602 * Give a newly added peer node the sequence number where it should
1603 * start receiving and acking broadcast packets.
1604 */
Wu Fengguang742e0382015-10-24 22:56:01 +08001605static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1606 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001607{
1608 struct sk_buff_head list;
1609
1610 __skb_queue_head_init(&list);
1611 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1612 return;
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001613 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001614 tipc_link_xmit(l, &list, xmitq);
1615}
1616
1617/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1618 */
1619void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1620{
1621 int mtyp = msg_type(hdr);
1622 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1623
1624 if (link_is_up(l))
1625 return;
1626
1627 if (msg_user(hdr) == BCAST_PROTOCOL) {
1628 l->rcv_nxt = peers_snd_nxt;
1629 l->state = LINK_ESTABLISHED;
1630 return;
1631 }
1632
1633 if (l->peer_caps & TIPC_BCAST_SYNCH)
1634 return;
1635
1636 if (msg_peer_node_is_up(hdr))
1637 return;
1638
1639 /* Compatibility: accept older, less safe initial synch data */
1640 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1641 l->rcv_nxt = peers_snd_nxt;
1642}
1643
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001644/* link_bc_retr eval()- check if the indicated range can be retransmitted now
1645 * - Adjust permitted range if there is overlap with previous retransmission
1646 */
1647static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1648{
1649 unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1650
1651 if (less(*to, *from))
1652 return false;
1653
1654 /* New retransmission request */
1655 if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1656 less(*to, l->prev_from) || more(*from, l->prev_to)) {
1657 l->prev_from = *from;
1658 l->prev_to = *to;
1659 l->prev_retr = jiffies;
1660 return true;
1661 }
1662
1663 /* Inside range of previous retransmit */
1664 if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1665 return false;
1666
1667 /* Fully or partially outside previous range => exclude overlap */
1668 if (less(*from, l->prev_from)) {
1669 *to = l->prev_from - 1;
1670 l->prev_from = *from;
1671 }
1672 if (more(*to, l->prev_to)) {
1673 *from = l->prev_to + 1;
1674 l->prev_to = *to;
1675 }
1676 l->prev_retr = jiffies;
1677 return true;
1678}
1679
Jon Paul Maloy52666982015-10-22 08:51:41 -04001680/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1681 */
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001682int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1683 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001684{
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001685 struct tipc_link *snd_l = l->bc_sndlink;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001686 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001687 u16 from = msg_bcast_ack(hdr) + 1;
1688 u16 to = from + msg_bc_gap(hdr) - 1;
1689 int rc = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001690
1691 if (!link_is_up(l))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001692 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001693
1694 if (!msg_peer_node_is_up(hdr))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001695 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001696
Jon Paul Maloy2d18ac42016-07-11 16:08:35 -04001697 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1698 if (msg_ack(hdr))
1699 l->bc_peer_is_up = true;
1700
1701 if (!l->bc_peer_is_up)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001702 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001703
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001704 l->stats.recv_nacks++;
1705
Jon Paul Maloy52666982015-10-22 08:51:41 -04001706 /* Ignore if peers_snd_nxt goes beyond receive window */
1707 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001708 return rc;
1709
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001710 if (link_bc_retr_eval(snd_l, &from, &to))
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001711 rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001712
1713 l->snd_nxt = peers_snd_nxt;
1714 if (link_bc_rcv_gap(l))
1715 rc |= TIPC_LINK_SND_STATE;
1716
1717 /* Return now if sender supports nack via STATE messages */
1718 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1719 return rc;
1720
1721 /* Otherwise, be backwards compatible */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001722
1723 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1724 l->nack_state = BC_NACK_SND_CONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001725 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001726 }
1727
1728 /* Don't NACK if one was recently sent or peeked */
1729 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1730 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001731 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001732 }
1733
1734 /* Conditionally delay NACK sending until next synch rcv */
1735 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1736 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1737 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001738 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001739 }
1740
1741 /* Send NACK now but suppress next one */
1742 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1743 l->nack_state = BC_NACK_SND_SUPPRESS;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001744 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001745}
1746
1747void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1748 struct sk_buff_head *xmitq)
1749{
1750 struct sk_buff *skb, *tmp;
1751 struct tipc_link *snd_l = l->bc_sndlink;
1752
1753 if (!link_is_up(l) || !l->bc_peer_is_up)
1754 return;
1755
1756 if (!more(acked, l->acked))
1757 return;
1758
1759 /* Skip over packets peer has already acked */
1760 skb_queue_walk(&snd_l->transmq, skb) {
1761 if (more(buf_seqno(skb), l->acked))
1762 break;
1763 }
1764
1765 /* Update/release the packets peer is acking now */
1766 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1767 if (more(buf_seqno(skb), acked))
1768 break;
1769 if (!--TIPC_SKB_CB(skb)->ackers) {
1770 __skb_unlink(skb, &snd_l->transmq);
1771 kfree_skb(skb);
1772 }
1773 }
1774 l->acked = acked;
1775 tipc_link_advance_backlog(snd_l, xmitq);
1776 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1777 link_prepare_wakeup(snd_l);
1778}
1779
1780/* tipc_link_bc_nack_rcv(): receive broadcast nack message
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001781 * This function is here for backwards compatibility, since
1782 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
Jon Paul Maloy52666982015-10-22 08:51:41 -04001783 */
1784int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1785 struct sk_buff_head *xmitq)
1786{
1787 struct tipc_msg *hdr = buf_msg(skb);
1788 u32 dnode = msg_destnode(hdr);
1789 int mtyp = msg_type(hdr);
1790 u16 acked = msg_bcast_ack(hdr);
1791 u16 from = acked + 1;
1792 u16 to = msg_bcgap_to(hdr);
1793 u16 peers_snd_nxt = to + 1;
1794 int rc = 0;
1795
1796 kfree_skb(skb);
1797
1798 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1799 return 0;
1800
1801 if (mtyp != STATE_MSG)
1802 return 0;
1803
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001804 if (dnode == tipc_own_addr(l->net)) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04001805 tipc_link_bc_ack_rcv(l, acked, xmitq);
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001806 rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001807 l->stats.recv_nacks++;
1808 return rc;
1809 }
1810
1811 /* Msg for other node => suppress own NACK at next sync if applicable */
1812 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1813 l->nack_state = BC_NACK_SND_SUPPRESS;
1814
1815 return 0;
1816}
1817
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001818void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001819{
Jon Maloy218527f2018-03-29 23:20:41 +02001820 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001821
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001822 l->window = win;
Jon Paul Maloy5a0950c2016-08-16 11:53:51 -04001823 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
1824 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
1825 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
1826 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001827 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001828}
1829
Allan Stephens5c216e12011-10-18 11:34:29 -04001830/**
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001831 * link_reset_stats - reset link statistics
Jon Paul Maloy1a906322015-11-19 14:30:47 -05001832 * @l: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +01001833 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001834void tipc_link_reset_stats(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001835{
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001836 memset(&l->stats, 0, sizeof(l->stats));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001837}
1838
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001839static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001840{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001841 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001842 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001843 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08001844
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001845 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001846 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1847 skb_queue_len(&l->transmq), head, tail,
1848 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001849}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001850
1851/* Parse and validate nested (link) properties valid for media, bearer and link
1852 */
1853int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1854{
1855 int err;
1856
1857 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
Johannes Bergfceb6432017-04-12 14:34:07 +02001858 tipc_nl_prop_policy, NULL);
Richard Alpe0655f6a2014-11-20 10:29:07 +01001859 if (err)
1860 return err;
1861
1862 if (props[TIPC_NLA_PROP_PRIO]) {
1863 u32 prio;
1864
1865 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1866 if (prio > TIPC_MAX_LINK_PRI)
1867 return -EINVAL;
1868 }
1869
1870 if (props[TIPC_NLA_PROP_TOL]) {
1871 u32 tol;
1872
1873 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1874 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1875 return -EINVAL;
1876 }
1877
1878 if (props[TIPC_NLA_PROP_WIN]) {
1879 u32 win;
1880
1881 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1882 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1883 return -EINVAL;
1884 }
1885
1886 return 0;
1887}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001888
Richard Alped8182802014-11-24 11:10:29 +01001889static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001890{
1891 int i;
1892 struct nlattr *stats;
1893
1894 struct nla_map {
1895 u32 key;
1896 u32 val;
1897 };
1898
1899 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05001900 {TIPC_NLA_STATS_RX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01001901 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1902 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1903 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1904 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05001905 {TIPC_NLA_STATS_TX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01001906 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1907 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1908 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1909 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1910 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1911 s->msg_length_counts : 1},
1912 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1913 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1914 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1915 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1916 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1917 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1918 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1919 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1920 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1921 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1922 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1923 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1924 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1925 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1926 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1927 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1928 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1929 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1930 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1931 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1932 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1933 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1934 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1935 };
1936
1937 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1938 if (!stats)
1939 return -EMSGSIZE;
1940
1941 for (i = 0; i < ARRAY_SIZE(map); i++)
1942 if (nla_put_u32(skb, map[i].key, map[i].val))
1943 goto msg_full;
1944
1945 nla_nest_end(skb, stats);
1946
1947 return 0;
1948msg_full:
1949 nla_nest_cancel(skb, stats);
1950
1951 return -EMSGSIZE;
1952}
1953
1954/* Caller should hold appropriate locks to protect the link */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001955int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1956 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001957{
Jon Maloy23fd3ea2018-03-22 20:42:49 +01001958 u32 self = tipc_own_addr(net);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001959 struct nlattr *attrs;
1960 struct nlattr *prop;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01001961 void *hdr;
1962 int err;
Richard Alpe7be57fc2014-11-20 10:29:12 +01001963
Richard Alpebfb3e5d2015-02-09 09:50:03 +01001964 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001965 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001966 if (!hdr)
1967 return -EMSGSIZE;
1968
1969 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1970 if (!attrs)
1971 goto msg_full;
1972
1973 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1974 goto attr_msg_full;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01001975 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001976 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001977 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001978 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001979 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001980 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001981 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001982 goto attr_msg_full;
1983
1984 if (tipc_link_is_up(link))
1985 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1986 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001987 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001988 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1989 goto attr_msg_full;
1990
1991 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1992 if (!prop)
1993 goto attr_msg_full;
1994 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1995 goto prop_msg_full;
1996 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1997 goto prop_msg_full;
1998 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001999 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002000 goto prop_msg_full;
2001 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2002 goto prop_msg_full;
2003 nla_nest_end(msg->skb, prop);
2004
2005 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2006 if (err)
2007 goto attr_msg_full;
2008
2009 nla_nest_end(msg->skb, attrs);
2010 genlmsg_end(msg->skb, hdr);
2011
2012 return 0;
2013
2014prop_msg_full:
2015 nla_nest_cancel(msg->skb, prop);
2016attr_msg_full:
2017 nla_nest_cancel(msg->skb, attrs);
2018msg_full:
2019 genlmsg_cancel(msg->skb, hdr);
2020
2021 return -EMSGSIZE;
2022}
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002023
2024static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2025 struct tipc_stats *stats)
2026{
2027 int i;
2028 struct nlattr *nest;
2029
2030 struct nla_map {
2031 __u32 key;
2032 __u32 val;
2033 };
2034
2035 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002036 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002037 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2038 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2039 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2040 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002041 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002042 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2043 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2044 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2045 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2046 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2047 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2048 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2049 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2050 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2051 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2052 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2053 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2054 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2055 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2056 };
2057
2058 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2059 if (!nest)
2060 return -EMSGSIZE;
2061
2062 for (i = 0; i < ARRAY_SIZE(map); i++)
2063 if (nla_put_u32(skb, map[i].key, map[i].val))
2064 goto msg_full;
2065
2066 nla_nest_end(skb, nest);
2067
2068 return 0;
2069msg_full:
2070 nla_nest_cancel(skb, nest);
2071
2072 return -EMSGSIZE;
2073}
2074
2075int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2076{
2077 int err;
2078 void *hdr;
2079 struct nlattr *attrs;
2080 struct nlattr *prop;
2081 struct tipc_net *tn = net_generic(net, tipc_net_id);
2082 struct tipc_link *bcl = tn->bcl;
2083
2084 if (!bcl)
2085 return 0;
2086
2087 tipc_bcast_lock(net);
2088
2089 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2090 NLM_F_MULTI, TIPC_NL_LINK_GET);
Insu Yunb53ce3e2016-02-17 11:47:35 -05002091 if (!hdr) {
2092 tipc_bcast_unlock(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002093 return -EMSGSIZE;
Insu Yunb53ce3e2016-02-17 11:47:35 -05002094 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002095
2096 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2097 if (!attrs)
2098 goto msg_full;
2099
2100 /* The broadcast link is always up */
2101 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2102 goto attr_msg_full;
2103
2104 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2105 goto attr_msg_full;
2106 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2107 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002108 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002109 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002110 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002111 goto attr_msg_full;
2112
2113 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2114 if (!prop)
2115 goto attr_msg_full;
2116 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2117 goto prop_msg_full;
2118 nla_nest_end(msg->skb, prop);
2119
2120 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2121 if (err)
2122 goto attr_msg_full;
2123
2124 tipc_bcast_unlock(net);
2125 nla_nest_end(msg->skb, attrs);
2126 genlmsg_end(msg->skb, hdr);
2127
2128 return 0;
2129
2130prop_msg_full:
2131 nla_nest_cancel(msg->skb, prop);
2132attr_msg_full:
2133 nla_nest_cancel(msg->skb, attrs);
2134msg_full:
2135 tipc_bcast_unlock(net);
2136 genlmsg_cancel(msg->skb, hdr);
2137
2138 return -EMSGSIZE;
2139}
2140
Richard Alped01332f2016-02-01 08:19:56 +01002141void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2142 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002143{
2144 l->tolerance = tol;
Jon Maloy37c64cf2018-02-14 13:34:39 +01002145 if (link_is_up(l))
2146 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002147}
2148
Richard Alped01332f2016-02-01 08:19:56 +01002149void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2150 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002151{
2152 l->priority = prio;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002153 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002154}
2155
2156void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2157{
2158 l->abort_limit = limit;
2159}