blob: f0761c771734fc0fd94dc85c18559830c921b661 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/bcast.c: TIPC broadcast code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloy078bec82014-07-16 20:41:00 -04004 * Copyright (c) 2004-2006, 2014, Ericsson AB
Per Lidenb97bf3f2006-01-02 19:04:38 +01005 * Copyright (c) 2004, Intel Corporation.
Allan Stephens2d627b92011-01-07 13:00:11 -05006 * Copyright (c) 2005, 2010-2011, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01007 * All rights reserved.
8 *
Per Liden9ea1fd32006-01-11 13:30:43 +01009 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +010010 * modification, are permitted provided that the following conditions are met:
11 *
Per Liden9ea1fd32006-01-11 13:30:43 +010012 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010020 *
Per Liden9ea1fd32006-01-11 13:30:43 +010021 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010035 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "core.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Jon Paul Maloy078bec82014-07-16 20:41:00 -040040#include "socket.h"
41#include "msg.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "bcast.h"
Allan Stephens9f6bdcd42011-04-07 14:57:53 -040043#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010044
Ying Xue987b58b2014-03-27 12:54:35 +080045#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
46#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
47#define BCBEARER MAX_BEARERS
Per Lidenb97bf3f2006-01-02 19:04:38 +010048
Per Lidenb97bf3f2006-01-02 19:04:38 +010049/**
Paul Gortmaker7f9ab6a2011-12-29 20:55:27 -050050 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
Per Lidenb97bf3f2006-01-02 19:04:38 +010051 * @primary: pointer to primary bearer
52 * @secondary: pointer to secondary bearer
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +090053 *
54 * Bearers must have same priority and same set of reachable destinations
Per Lidenb97bf3f2006-01-02 19:04:38 +010055 * to be paired.
56 */
57
Paul Gortmaker7f9ab6a2011-12-29 20:55:27 -050058struct tipc_bcbearer_pair {
Allan Stephens2d627b92011-01-07 13:00:11 -050059 struct tipc_bearer *primary;
60 struct tipc_bearer *secondary;
Per Lidenb97bf3f2006-01-02 19:04:38 +010061};
62
63/**
Paul Gortmaker7f9ab6a2011-12-29 20:55:27 -050064 * struct tipc_bcbearer - bearer used by broadcast link
Per Lidenb97bf3f2006-01-02 19:04:38 +010065 * @bearer: (non-standard) broadcast bearer structure
66 * @media: (non-standard) broadcast media structure
67 * @bpairs: array of bearer pairs
Allan Stephens65f51ef2006-06-25 23:53:20 -070068 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
69 * @remains: temporary node map used by tipc_bcbearer_send()
70 * @remains_new: temporary node map used tipc_bcbearer_send()
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +090071 *
Allan Stephens65f51ef2006-06-25 23:53:20 -070072 * Note: The fields labelled "temporary" are incorporated into the bearer
73 * to avoid consuming potentially limited stack space through the use of
74 * large local variables within multicast routines. Concurrent access is
Ying Xued69afc92014-05-05 08:56:15 +080075 * prevented through use of the spinlock "bclink_lock".
Per Lidenb97bf3f2006-01-02 19:04:38 +010076 */
Paul Gortmaker7f9ab6a2011-12-29 20:55:27 -050077struct tipc_bcbearer {
Allan Stephens2d627b92011-01-07 13:00:11 -050078 struct tipc_bearer bearer;
Paul Gortmaker358a0d12011-12-29 20:19:42 -050079 struct tipc_media media;
Paul Gortmaker7f9ab6a2011-12-29 20:55:27 -050080 struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
81 struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
David S. Miller6c000552008-09-02 23:38:32 -070082 struct tipc_node_map remains;
83 struct tipc_node_map remains_new;
Per Lidenb97bf3f2006-01-02 19:04:38 +010084};
85
86/**
Paul Gortmaker6765fd62011-12-29 20:52:18 -050087 * struct tipc_bclink - link used for broadcast messages
Ying Xued69afc92014-05-05 08:56:15 +080088 * @lock: spinlock governing access to structure
Per Lidenb97bf3f2006-01-02 19:04:38 +010089 * @link: (non-standard) broadcast link structure
90 * @node: (non-standard) node structure representing b'cast link's peer node
Ying Xue3f5a12b2014-05-05 08:56:17 +080091 * @flags: represent bclink states
Allan Stephenscd3decd2011-10-24 11:18:12 -040092 * @bcast_nodes: map of broadcast-capable nodes
Allan Stephens01d83ed2011-01-18 13:53:16 -050093 * @retransmit_to: node that most recently requested a retransmit
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +090094 *
Per Lidenb97bf3f2006-01-02 19:04:38 +010095 * Handles sequence numbering, fragmentation, bundling, etc.
96 */
Paul Gortmaker6765fd62011-12-29 20:52:18 -050097struct tipc_bclink {
Ying Xued69afc92014-05-05 08:56:15 +080098 spinlock_t lock;
Paul Gortmakera18c4bc2011-12-29 20:58:42 -050099 struct tipc_link link;
David S. Miller6c000552008-09-02 23:38:32 -0700100 struct tipc_node node;
Ying Xue3f5a12b2014-05-05 08:56:17 +0800101 unsigned int flags;
Allan Stephenscd3decd2011-10-24 11:18:12 -0400102 struct tipc_node_map bcast_nodes;
Allan Stephens01d83ed2011-01-18 13:53:16 -0500103 struct tipc_node *retransmit_to;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100104};
105
Ying Xueeb8b00f2014-05-05 08:56:16 +0800106static struct tipc_bcbearer *bcbearer;
107static struct tipc_bclink *bclink;
108static struct tipc_link *bcl;
Allan Stephensc47e9b92011-10-24 10:29:26 -0400109
Allan Stephens3aec9cc2010-05-11 14:30:07 +0000110const char tipc_bclink_name[] = "broadcast-link";
Per Lidenb97bf3f2006-01-02 19:04:38 +0100111
stephen hemminger31e3c3f2010-10-13 13:20:35 +0000112static void tipc_nmap_diff(struct tipc_node_map *nm_a,
113 struct tipc_node_map *nm_b,
114 struct tipc_node_map *nm_diff);
Ying Xue28dd9412014-04-21 10:55:51 +0800115static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
116static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100117
Ying Xued69afc92014-05-05 08:56:15 +0800118static void tipc_bclink_lock(void)
119{
120 spin_lock_bh(&bclink->lock);
121}
122
123static void tipc_bclink_unlock(void)
124{
Ying Xue3f5a12b2014-05-05 08:56:17 +0800125 struct tipc_node *node = NULL;
126
127 if (likely(!bclink->flags)) {
128 spin_unlock_bh(&bclink->lock);
129 return;
130 }
131
132 if (bclink->flags & TIPC_BCLINK_RESET) {
133 bclink->flags &= ~TIPC_BCLINK_RESET;
134 node = tipc_bclink_retransmit_to();
135 }
Ying Xued69afc92014-05-05 08:56:15 +0800136 spin_unlock_bh(&bclink->lock);
Ying Xue3f5a12b2014-05-05 08:56:17 +0800137
138 if (node)
139 tipc_link_reset_all(node);
140}
141
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400142uint tipc_bclink_get_mtu(void)
143{
144 return MAX_PKT_DEFAULT_MCAST;
145}
146
Ying Xue3f5a12b2014-05-05 08:56:17 +0800147void tipc_bclink_set_flags(unsigned int flags)
148{
149 bclink->flags |= flags;
Ying Xued69afc92014-05-05 08:56:15 +0800150}
151
Sam Ravnborg05790c62006-03-20 22:37:04 -0800152static u32 bcbuf_acks(struct sk_buff *buf)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100153{
David S. Miller880b0052006-01-12 13:22:32 -0800154 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100155}
156
Sam Ravnborg05790c62006-03-20 22:37:04 -0800157static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100158{
David S. Miller880b0052006-01-12 13:22:32 -0800159 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100160}
161
Sam Ravnborg05790c62006-03-20 22:37:04 -0800162static void bcbuf_decr_acks(struct sk_buff *buf)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100163{
164 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
165}
166
Allan Stephenscd3decd2011-10-24 11:18:12 -0400167void tipc_bclink_add_node(u32 addr)
168{
Ying Xued69afc92014-05-05 08:56:15 +0800169 tipc_bclink_lock();
Allan Stephenscd3decd2011-10-24 11:18:12 -0400170 tipc_nmap_add(&bclink->bcast_nodes, addr);
Ying Xued69afc92014-05-05 08:56:15 +0800171 tipc_bclink_unlock();
Allan Stephenscd3decd2011-10-24 11:18:12 -0400172}
173
174void tipc_bclink_remove_node(u32 addr)
175{
Ying Xued69afc92014-05-05 08:56:15 +0800176 tipc_bclink_lock();
Allan Stephenscd3decd2011-10-24 11:18:12 -0400177 tipc_nmap_remove(&bclink->bcast_nodes, addr);
Ying Xued69afc92014-05-05 08:56:15 +0800178 tipc_bclink_unlock();
Allan Stephenscd3decd2011-10-24 11:18:12 -0400179}
Per Lidenb97bf3f2006-01-02 19:04:38 +0100180
Allan Stephens5b1f7bd2010-08-17 11:00:09 +0000181static void bclink_set_last_sent(void)
182{
183 if (bcl->next_out)
184 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
185 else
186 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
187}
188
189u32 tipc_bclink_get_last_sent(void)
190{
191 return bcl->fsm_msg_cnt;
192}
193
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400194static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100195{
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400196 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
197 seqno : node->bclink.last_sent;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100198}
199
200
Ben Hutchings2c530402012-07-10 10:55:09 +0000201/**
Allan Stephens01d83ed2011-01-18 13:53:16 -0500202 * tipc_bclink_retransmit_to - get most recent node to request retransmission
203 *
Ying Xued69afc92014-05-05 08:56:15 +0800204 * Called with bclink_lock locked
Allan Stephens01d83ed2011-01-18 13:53:16 -0500205 */
Allan Stephens01d83ed2011-01-18 13:53:16 -0500206struct tipc_node *tipc_bclink_retransmit_to(void)
207{
208 return bclink->retransmit_to;
209}
210
211/**
Per Lidenb97bf3f2006-01-02 19:04:38 +0100212 * bclink_retransmit_pkt - retransmit broadcast packets
213 * @after: sequence number of last packet to *not* retransmit
214 * @to: sequence number of last packet to retransmit
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900215 *
Ying Xued69afc92014-05-05 08:56:15 +0800216 * Called with bclink_lock locked
Per Lidenb97bf3f2006-01-02 19:04:38 +0100217 */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100218static void bclink_retransmit_pkt(u32 after, u32 to)
219{
Ying Xue58dc55f2014-11-26 11:41:52 +0800220 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100221
Ying Xue58dc55f2014-11-26 11:41:52 +0800222 skb_queue_walk(&bcl->outqueue, skb) {
223 if (more(buf_seqno(skb), after))
224 break;
225 }
226 tipc_link_retransmit(bcl, skb, mod(to - after));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100227}
228
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900229/**
Jon Maloy908344c2014-10-07 14:12:34 -0400230 * tipc_bclink_wakeup_users - wake up pending users
231 *
232 * Called with no locks taken
233 */
234void tipc_bclink_wakeup_users(void)
235{
236 while (skb_queue_len(&bclink->link.waiting_sks))
237 tipc_sk_rcv(skb_dequeue(&bclink->link.waiting_sks));
238}
239
240/**
Per Liden4323add2006-01-18 00:38:21 +0100241 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
Per Lidenb97bf3f2006-01-02 19:04:38 +0100242 * @n_ptr: node that sent acknowledgement info
243 * @acked: broadcast sequence # that has been acknowledged
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900244 *
Ying Xued69afc92014-05-05 08:56:15 +0800245 * Node is locked, bclink_lock unlocked.
Per Lidenb97bf3f2006-01-02 19:04:38 +0100246 */
David S. Miller6c000552008-09-02 23:38:32 -0700247void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100248{
Ying Xue58dc55f2014-11-26 11:41:52 +0800249 struct sk_buff *skb, *tmp;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100250 struct sk_buff *next;
251 unsigned int released = 0;
252
Ying Xued69afc92014-05-05 08:56:15 +0800253 tipc_bclink_lock();
Allan Stephens365595912011-10-24 15:26:24 -0400254 /* Bail out if tx queue is empty (no clean up is required) */
Ying Xue58dc55f2014-11-26 11:41:52 +0800255 skb = skb_peek(&bcl->outqueue);
256 if (!skb)
Allan Stephens365595912011-10-24 15:26:24 -0400257 goto exit;
258
259 /* Determine which messages need to be acknowledged */
260 if (acked == INVALID_LINK_SEQ) {
261 /*
262 * Contact with specified node has been lost, so need to
263 * acknowledge sent messages only (if other nodes still exist)
264 * or both sent and unsent messages (otherwise)
265 */
266 if (bclink->bcast_nodes.count)
267 acked = bcl->fsm_msg_cnt;
268 else
269 acked = bcl->next_out_no;
270 } else {
271 /*
272 * Bail out if specified sequence number does not correspond
273 * to a message that has been sent and not yet acknowledged
274 */
Ying Xue58dc55f2014-11-26 11:41:52 +0800275 if (less(acked, buf_seqno(skb)) ||
Allan Stephens365595912011-10-24 15:26:24 -0400276 less(bcl->fsm_msg_cnt, acked) ||
277 less_eq(acked, n_ptr->bclink.acked))
278 goto exit;
279 }
280
281 /* Skip over packets that node has previously acknowledged */
Ying Xue58dc55f2014-11-26 11:41:52 +0800282 skb_queue_walk(&bcl->outqueue, skb) {
283 if (more(buf_seqno(skb), n_ptr->bclink.acked))
284 break;
285 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100286
287 /* Update packets that node is now acknowledging */
Ying Xue58dc55f2014-11-26 11:41:52 +0800288 skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
289 if (more(buf_seqno(skb), acked))
290 break;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100291
Ying Xue58dc55f2014-11-26 11:41:52 +0800292 next = tipc_skb_queue_next(&bcl->outqueue, skb);
293 if (skb != bcl->next_out) {
294 bcbuf_decr_acks(skb);
295 } else {
296 bcbuf_set_acks(skb, 0);
Allan Stephens10745cd2011-10-24 14:59:20 -0400297 bcl->next_out = next;
298 bclink_set_last_sent();
299 }
300
Ying Xue58dc55f2014-11-26 11:41:52 +0800301 if (bcbuf_acks(skb) == 0) {
302 __skb_unlink(skb, &bcl->outqueue);
303 kfree_skb(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100304 released = 1;
305 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100306 }
307 n_ptr->bclink.acked = acked;
308
309 /* Try resolving broadcast link congestion, if necessary */
Allan Stephens5b1f7bd2010-08-17 11:00:09 +0000310 if (unlikely(bcl->next_out)) {
Ying Xue47b4c9a2014-11-26 11:41:48 +0800311 tipc_link_push_packets(bcl);
Allan Stephens5b1f7bd2010-08-17 11:00:09 +0000312 bclink_set_last_sent();
313 }
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400314 if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
Jon Maloy908344c2014-10-07 14:12:34 -0400315 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
316
Allan Stephens365595912011-10-24 15:26:24 -0400317exit:
Ying Xued69afc92014-05-05 08:56:15 +0800318 tipc_bclink_unlock();
Per Lidenb97bf3f2006-01-02 19:04:38 +0100319}
320
Ben Hutchings2c530402012-07-10 10:55:09 +0000321/**
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400322 * tipc_bclink_update_link_state - update broadcast link state
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900323 *
Ying Xue7216cd92014-04-21 10:55:48 +0800324 * RCU and node lock set
Per Lidenb97bf3f2006-01-02 19:04:38 +0100325 */
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400326void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100327{
328 struct sk_buff *buf;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100329
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400330 /* Ignore "stale" link state info */
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400331 if (less_eq(last_sent, n_ptr->bclink.last_in))
Per Lidenb97bf3f2006-01-02 19:04:38 +0100332 return;
333
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400334 /* Update link synchronization state; quit if in sync */
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400335 bclink_update_last_sent(n_ptr, last_sent);
336
337 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
338 return;
339
340 /* Update out-of-sync state; quit if loss is still unconfirmed */
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400341 if ((++n_ptr->bclink.oos_state) == 1) {
342 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
343 return;
344 n_ptr->bclink.oos_state++;
345 }
346
347 /* Don't NACK if one has been recently sent (or seen) */
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400348 if (n_ptr->bclink.oos_state & 0x1)
349 return;
350
351 /* Send NACK */
stephen hemminger31e3c3f2010-10-13 13:20:35 +0000352 buf = tipc_buf_acquire(INT_H_SIZE);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100353 if (buf) {
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400354 struct tipc_msg *msg = buf_msg(buf);
Ying Xuebc6fecd2014-11-26 11:41:53 +0800355 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
356 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400357
Allan Stephensc68ca7b2010-05-11 14:30:12 +0000358 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400359 INT_H_SIZE, n_ptr->addr);
Allan Stephensbf781ec2011-01-25 16:12:39 -0500360 msg_set_non_seq(msg, 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100361 msg_set_mc_netid(msg, tipc_net_id);
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400362 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
363 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
Ying Xuebc6fecd2014-11-26 11:41:53 +0800364 msg_set_bcgap_to(msg, to);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100365
Ying Xued69afc92014-05-05 08:56:15 +0800366 tipc_bclink_lock();
Ying Xue7a2f7d12014-04-21 10:55:46 +0800367 tipc_bearer_send(MAX_BEARERS, buf, NULL);
Allan Stephens0f385132011-04-07 15:47:48 -0400368 bcl->stats.sent_nacks++;
Ying Xued69afc92014-05-05 08:56:15 +0800369 tipc_bclink_unlock();
Allan Stephens5f6d9122011-11-04 13:24:29 -0400370 kfree_skb(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100371
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400372 n_ptr->bclink.oos_state++;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100373 }
374}
375
Ben Hutchings2c530402012-07-10 10:55:09 +0000376/**
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400377 * bclink_peek_nack - monitor retransmission requests sent by other nodes
Per Lidenb97bf3f2006-01-02 19:04:38 +0100378 *
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400379 * Delay any upcoming NACK by this node if another node has already
380 * requested the first message this node is going to ask for.
Per Lidenb97bf3f2006-01-02 19:04:38 +0100381 */
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400382static void bclink_peek_nack(struct tipc_msg *msg)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100383{
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400384 struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100385
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400386 if (unlikely(!n_ptr))
Per Lidenb97bf3f2006-01-02 19:04:38 +0100387 return;
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400388
Per Liden4323add2006-01-18 00:38:21 +0100389 tipc_node_lock(n_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100390
Ying Xue389dd9b2012-11-16 13:51:30 +0800391 if (n_ptr->bclink.recv_permitted &&
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400392 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
393 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
394 n_ptr->bclink.oos_state = 2;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100395
Per Liden4323add2006-01-18 00:38:21 +0100396 tipc_node_unlock(n_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100397}
398
Jon Paul Maloy9fbfb8b2014-07-16 20:41:03 -0400399/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
400 * and to identified node local sockets
Ying Xuea6ca1092014-11-26 11:41:55 +0800401 * @list: chain of buffers containing message
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400402 * Consumes the buffer chain, except when returning -ELINKCONG
403 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
404 */
Ying Xuea6ca1092014-11-26 11:41:55 +0800405int tipc_bclink_xmit(struct sk_buff_head *list)
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400406{
407 int rc = 0;
408 int bc = 0;
Ying Xuea6ca1092014-11-26 11:41:55 +0800409 struct sk_buff *skb;
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400410
411 /* Prepare clone of message for local node */
Ying Xuea6ca1092014-11-26 11:41:55 +0800412 skb = tipc_msg_reassemble(list);
413 if (unlikely(!skb)) {
414 __skb_queue_purge(list);
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400415 return -EHOSTUNREACH;
416 }
417
418 /* Broadcast to all other nodes */
419 if (likely(bclink)) {
420 tipc_bclink_lock();
421 if (likely(bclink->bcast_nodes.count)) {
Ying Xuea6ca1092014-11-26 11:41:55 +0800422 rc = __tipc_link_xmit(bcl, list);
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400423 if (likely(!rc)) {
Ying Xue58dc55f2014-11-26 11:41:52 +0800424 u32 len = skb_queue_len(&bcl->outqueue);
425
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400426 bclink_set_last_sent();
427 bcl->stats.queue_sz_counts++;
Ying Xue58dc55f2014-11-26 11:41:52 +0800428 bcl->stats.accu_queue_sz += len;
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400429 }
430 bc = 1;
431 }
432 tipc_bclink_unlock();
433 }
434
435 if (unlikely(!bc))
Ying Xuea6ca1092014-11-26 11:41:55 +0800436 __skb_queue_purge(list);
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400437
438 /* Deliver message clone */
439 if (likely(!rc))
Ying Xuea6ca1092014-11-26 11:41:55 +0800440 tipc_sk_mcast_rcv(skb);
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400441 else
Ying Xuea6ca1092014-11-26 11:41:55 +0800442 kfree_skb(skb);
Jon Paul Maloy078bec82014-07-16 20:41:00 -0400443
444 return rc;
445}
446
Ben Hutchings2c530402012-07-10 10:55:09 +0000447/**
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400448 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
449 *
Ying Xued69afc92014-05-05 08:56:15 +0800450 * Called with both sending node's lock and bclink_lock taken.
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400451 */
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400452static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
453{
454 bclink_update_last_sent(node, seqno);
455 node->bclink.last_in = seqno;
456 node->bclink.oos_state = 0;
457 bcl->stats.recv_info++;
458
459 /*
460 * Unicast an ACK periodically, ensuring that
461 * all nodes in the cluster don't ACK at the same time
462 */
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400463 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
Ying Xue247f0f32014-02-18 16:06:46 +0800464 tipc_link_proto_xmit(node->active_links[node->addr & 1],
465 STATE_MSG, 0, 0, 0, 0, 0);
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400466 bcl->stats.sent_acks++;
467 }
468}
469
Ben Hutchings2c530402012-07-10 10:55:09 +0000470/**
Ying Xue247f0f32014-02-18 16:06:46 +0800471 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900472 *
Ying Xue7216cd92014-04-21 10:55:48 +0800473 * RCU is locked, no other locks set
Per Lidenb97bf3f2006-01-02 19:04:38 +0100474 */
Ying Xue247f0f32014-02-18 16:06:46 +0800475void tipc_bclink_rcv(struct sk_buff *buf)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700476{
Per Lidenb97bf3f2006-01-02 19:04:38 +0100477 struct tipc_msg *msg = buf_msg(buf);
Allan Stephens5d3c4882011-04-07 13:57:25 -0400478 struct tipc_node *node;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100479 u32 next_in;
480 u32 seqno;
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400481 int deferred = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100482
Allan Stephens5d3c4882011-04-07 13:57:25 -0400483 /* Screen out unwanted broadcast messages */
Allan Stephens5d3c4882011-04-07 13:57:25 -0400484 if (msg_mc_netid(msg) != tipc_net_id)
485 goto exit;
486
487 node = tipc_node_find(msg_prevnode(msg));
488 if (unlikely(!node))
489 goto exit;
490
491 tipc_node_lock(node);
Ying Xue389dd9b2012-11-16 13:51:30 +0800492 if (unlikely(!node->bclink.recv_permitted))
Allan Stephens5d3c4882011-04-07 13:57:25 -0400493 goto unlock;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100494
Allan Stephens8a275a62011-10-26 15:33:44 -0400495 /* Handle broadcast protocol message */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100496 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
Allan Stephens9f6bdcd42011-04-07 14:57:53 -0400497 if (msg_type(msg) != STATE_MSG)
498 goto unlock;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100499 if (msg_destnode(msg) == tipc_own_addr) {
Per Liden4323add2006-01-18 00:38:21 +0100500 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
501 tipc_node_unlock(node);
Ying Xued69afc92014-05-05 08:56:15 +0800502 tipc_bclink_lock();
Per Lidenb97bf3f2006-01-02 19:04:38 +0100503 bcl->stats.recv_nacks++;
Allan Stephens01d83ed2011-01-18 13:53:16 -0500504 bclink->retransmit_to = node;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100505 bclink_retransmit_pkt(msg_bcgap_after(msg),
506 msg_bcgap_to(msg));
Ying Xued69afc92014-05-05 08:56:15 +0800507 tipc_bclink_unlock();
Per Lidenb97bf3f2006-01-02 19:04:38 +0100508 } else {
Allan Stephens5d3c4882011-04-07 13:57:25 -0400509 tipc_node_unlock(node);
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400510 bclink_peek_nack(msg);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100511 }
Allan Stephens5d3c4882011-04-07 13:57:25 -0400512 goto exit;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100513 }
514
Allan Stephens5d3c4882011-04-07 13:57:25 -0400515 /* Handle in-sequence broadcast message */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100516 seqno = msg_seqno(msg);
Allan Stephens8a275a62011-10-26 15:33:44 -0400517 next_in = mod(node->bclink.last_in + 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100518
519 if (likely(seqno == next_in)) {
Allan Stephens8a275a62011-10-26 15:33:44 -0400520receive:
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400521 /* Deliver message to destination */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100522 if (likely(msg_isdata(msg))) {
Ying Xued69afc92014-05-05 08:56:15 +0800523 tipc_bclink_lock();
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400524 bclink_accept_pkt(node, seqno);
Ying Xued69afc92014-05-05 08:56:15 +0800525 tipc_bclink_unlock();
Per Liden4323add2006-01-18 00:38:21 +0100526 tipc_node_unlock(node);
Allan Stephens9f6bdcd42011-04-07 14:57:53 -0400527 if (likely(msg_mcast(msg)))
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400528 tipc_sk_mcast_rcv(buf);
Allan Stephens9f6bdcd42011-04-07 14:57:53 -0400529 else
Allan Stephens5f6d9122011-11-04 13:24:29 -0400530 kfree_skb(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100531 } else if (msg_user(msg) == MSG_BUNDLER) {
Ying Xued69afc92014-05-05 08:56:15 +0800532 tipc_bclink_lock();
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400533 bclink_accept_pkt(node, seqno);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100534 bcl->stats.recv_bundles++;
535 bcl->stats.recv_bundled += msg_msgcnt(msg);
Ying Xued69afc92014-05-05 08:56:15 +0800536 tipc_bclink_unlock();
Per Liden4323add2006-01-18 00:38:21 +0100537 tipc_node_unlock(node);
Ying Xue247f0f32014-02-18 16:06:46 +0800538 tipc_link_bundle_rcv(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100539 } else if (msg_user(msg) == MSG_FRAGMENTER) {
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400540 tipc_buf_append(&node->bclink.reasm_buf, &buf);
541 if (unlikely(!buf && !node->bclink.reasm_buf))
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400542 goto unlock;
Ying Xued69afc92014-05-05 08:56:15 +0800543 tipc_bclink_lock();
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400544 bclink_accept_pkt(node, seqno);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100545 bcl->stats.recv_fragments++;
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400546 if (buf) {
Per Lidenb97bf3f2006-01-02 19:04:38 +0100547 bcl->stats.recv_fragmented++;
Erik Hugne40ba3cd2013-11-06 09:28:06 +0100548 msg = buf_msg(buf);
Ying Xued69afc92014-05-05 08:56:15 +0800549 tipc_bclink_unlock();
Erik Hugne528f6f42013-11-06 09:28:05 +0100550 goto receive;
551 }
Ying Xued69afc92014-05-05 08:56:15 +0800552 tipc_bclink_unlock();
Per Liden4323add2006-01-18 00:38:21 +0100553 tipc_node_unlock(node);
Allan Stephens9f6bdcd42011-04-07 14:57:53 -0400554 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
Ying Xued69afc92014-05-05 08:56:15 +0800555 tipc_bclink_lock();
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400556 bclink_accept_pkt(node, seqno);
Ying Xued69afc92014-05-05 08:56:15 +0800557 tipc_bclink_unlock();
Allan Stephens9f6bdcd42011-04-07 14:57:53 -0400558 tipc_node_unlock(node);
Ying Xue247f0f32014-02-18 16:06:46 +0800559 tipc_named_rcv(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100560 } else {
Ying Xued69afc92014-05-05 08:56:15 +0800561 tipc_bclink_lock();
Allan Stephens63e7f1a2011-10-27 16:43:09 -0400562 bclink_accept_pkt(node, seqno);
Ying Xued69afc92014-05-05 08:56:15 +0800563 tipc_bclink_unlock();
Per Liden4323add2006-01-18 00:38:21 +0100564 tipc_node_unlock(node);
Allan Stephens5f6d9122011-11-04 13:24:29 -0400565 kfree_skb(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100566 }
Allan Stephens5d3c4882011-04-07 13:57:25 -0400567 buf = NULL;
Allan Stephens8a275a62011-10-26 15:33:44 -0400568
569 /* Determine new synchronization state */
Allan Stephens5d3c4882011-04-07 13:57:25 -0400570 tipc_node_lock(node);
Allan Stephens8a275a62011-10-26 15:33:44 -0400571 if (unlikely(!tipc_node_is_up(node)))
572 goto unlock;
573
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400574 if (node->bclink.last_in == node->bclink.last_sent)
Allan Stephens8a275a62011-10-26 15:33:44 -0400575 goto unlock;
576
Ying Xuebc6fecd2014-11-26 11:41:53 +0800577 if (skb_queue_empty(&node->bclink.deferred_queue)) {
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400578 node->bclink.oos_state = 1;
579 goto unlock;
580 }
581
Ying Xuebc6fecd2014-11-26 11:41:53 +0800582 msg = buf_msg(skb_peek(&node->bclink.deferred_queue));
Allan Stephens8a275a62011-10-26 15:33:44 -0400583 seqno = msg_seqno(msg);
584 next_in = mod(next_in + 1);
585 if (seqno != next_in)
586 goto unlock;
587
588 /* Take in-sequence message from deferred queue & deliver it */
Ying Xuebc6fecd2014-11-26 11:41:53 +0800589 buf = __skb_dequeue(&node->bclink.deferred_queue);
Allan Stephens8a275a62011-10-26 15:33:44 -0400590 goto receive;
591 }
592
593 /* Handle out-of-sequence broadcast message */
Allan Stephens8a275a62011-10-26 15:33:44 -0400594 if (less(next_in, seqno)) {
Ying Xuebc6fecd2014-11-26 11:41:53 +0800595 deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue,
Allan Stephens8a275a62011-10-26 15:33:44 -0400596 buf);
Allan Stephens7a54d4a2011-10-27 14:17:53 -0400597 bclink_update_last_sent(node, seqno);
Allan Stephens5d3c4882011-04-07 13:57:25 -0400598 buf = NULL;
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400599 }
Allan Stephens8a275a62011-10-26 15:33:44 -0400600
Ying Xued69afc92014-05-05 08:56:15 +0800601 tipc_bclink_lock();
Allan Stephensb98158e2011-10-26 16:13:35 -0400602
Allan Stephens8a275a62011-10-26 15:33:44 -0400603 if (deferred)
604 bcl->stats.deferred_recv++;
Allan Stephens0232c5a2011-10-26 15:57:26 -0400605 else
606 bcl->stats.duplicates++;
Allan Stephens8a275a62011-10-26 15:33:44 -0400607
Ying Xued69afc92014-05-05 08:56:15 +0800608 tipc_bclink_unlock();
Allan Stephensb98158e2011-10-26 16:13:35 -0400609
Allan Stephens5d3c4882011-04-07 13:57:25 -0400610unlock:
Per Liden4323add2006-01-18 00:38:21 +0100611 tipc_node_unlock(node);
Allan Stephens5d3c4882011-04-07 13:57:25 -0400612exit:
Allan Stephens5f6d9122011-11-04 13:24:29 -0400613 kfree_skb(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100614}
615
David S. Miller6c000552008-09-02 23:38:32 -0700616u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100617{
Ying Xue389dd9b2012-11-16 13:51:30 +0800618 return (n_ptr->bclink.recv_permitted &&
Per Liden4323add2006-01-18 00:38:21 +0100619 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100620}
621
622
623/**
Per Liden4323add2006-01-18 00:38:21 +0100624 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900625 *
Allan Stephens2ff9f922011-04-07 10:44:54 -0400626 * Send packet over as many bearers as necessary to reach all nodes
627 * that have joined the broadcast link.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900628 *
Allan Stephens2ff9f922011-04-07 10:44:54 -0400629 * Returns 0 (packet sent successfully) under all circumstances,
630 * since the broadcast link's pseudo-bearer never blocks
Per Lidenb97bf3f2006-01-02 19:04:38 +0100631 */
Paul Gortmakerae8509c2013-06-17 10:54:47 -0400632static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
Adrian Bunk988f0882006-03-20 22:37:52 -0800633 struct tipc_media_addr *unused2)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100634{
Per Lidenb97bf3f2006-01-02 19:04:38 +0100635 int bp_index;
Jon Paul Maloy6f92ee52014-07-16 20:41:04 -0400636 struct tipc_msg *msg = buf_msg(buf);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100637
Gerlando Falautoe6160712013-05-01 12:04:44 +0000638 /* Prepare broadcast link message for reliable transmission,
Allan Stephens2ff9f922011-04-07 10:44:54 -0400639 * if first time trying to send it;
640 * preparation is skipped for broadcast link protocol messages
641 * since they are sent in an unreliable manner and don't need it
642 */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100643 if (likely(!msg_non_seq(buf_msg(buf)))) {
Allan Stephenscd3decd2011-10-24 11:18:12 -0400644 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
Allan Stephens40aecb12008-06-04 17:54:48 -0700645 msg_set_non_seq(msg, 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100646 msg_set_mc_netid(msg, tipc_net_id);
Allan Stephens0048b822010-08-17 11:00:10 +0000647 bcl->stats.sent_info++;
Allan Stephens5e726902011-05-23 13:14:18 -0400648
Allan Stephenscd3decd2011-10-24 11:18:12 -0400649 if (WARN_ON(!bclink->bcast_nodes.count)) {
Allan Stephens5e726902011-05-23 13:14:18 -0400650 dump_stack();
651 return 0;
652 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100653 }
654
Per Lidenb97bf3f2006-01-02 19:04:38 +0100655 /* Send buffer over bearers until all targets reached */
Allan Stephenscd3decd2011-10-24 11:18:12 -0400656 bcbearer->remains = bclink->bcast_nodes;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100657
658 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
Allan Stephens2d627b92011-01-07 13:00:11 -0500659 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
660 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
Jon Paul Maloy6f92ee52014-07-16 20:41:04 -0400661 struct tipc_bearer *bp[2] = {p, s};
662 struct tipc_bearer *b = bp[msg_link_selector(msg)];
Gerlando Falauto488fc9a2013-05-01 12:04:46 +0000663 struct sk_buff *tbuf;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100664
665 if (!p)
Gerlando Falautoe6160712013-05-01 12:04:44 +0000666 break; /* No more bearers to try */
Jon Paul Maloy6f92ee52014-07-16 20:41:04 -0400667 if (!b)
668 b = p;
Gerlando Falauto77861d92013-05-01 12:04:45 +0000669 tipc_nmap_diff(&bcbearer->remains, &b->nodes,
Gerlando Falautoe6160712013-05-01 12:04:44 +0000670 &bcbearer->remains_new);
Allan Stephens65f51ef2006-06-25 23:53:20 -0700671 if (bcbearer->remains_new.count == bcbearer->remains.count)
Gerlando Falautoe6160712013-05-01 12:04:44 +0000672 continue; /* Nothing added by bearer pair */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100673
Gerlando Falauto488fc9a2013-05-01 12:04:46 +0000674 if (bp_index == 0) {
675 /* Use original buffer for first bearer */
Ying Xue7a2f7d12014-04-21 10:55:46 +0800676 tipc_bearer_send(b->identity, buf, &b->bcast_addr);
Gerlando Falauto488fc9a2013-05-01 12:04:46 +0000677 } else {
678 /* Avoid concurrent buffer access */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300679 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
Gerlando Falauto488fc9a2013-05-01 12:04:46 +0000680 if (!tbuf)
681 break;
Ying Xue7a2f7d12014-04-21 10:55:46 +0800682 tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
Gerlando Falauto488fc9a2013-05-01 12:04:46 +0000683 kfree_skb(tbuf); /* Bearer keeps a clone */
684 }
Allan Stephens65f51ef2006-06-25 23:53:20 -0700685 if (bcbearer->remains_new.count == 0)
Gerlando Falautoe6160712013-05-01 12:04:44 +0000686 break; /* All targets reached */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100687
Allan Stephens65f51ef2006-06-25 23:53:20 -0700688 bcbearer->remains = bcbearer->remains_new;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100689 }
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900690
Allan Stephens2ff9f922011-04-07 10:44:54 -0400691 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100692}
693
694/**
Per Liden4323add2006-01-18 00:38:21 +0100695 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
Per Lidenb97bf3f2006-01-02 19:04:38 +0100696 */
Ying Xue28dd9412014-04-21 10:55:51 +0800697void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100698{
Paul Gortmaker7f9ab6a2011-12-29 20:55:27 -0500699 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
700 struct tipc_bcbearer_pair *bp_curr;
Ying Xuef8322df2014-04-21 10:55:45 +0800701 struct tipc_bearer *b;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100702 int b_index;
703 int pri;
704
Ying Xued69afc92014-05-05 08:56:15 +0800705 tipc_bclink_lock();
Per Lidenb97bf3f2006-01-02 19:04:38 +0100706
Ying Xue28dd9412014-04-21 10:55:51 +0800707 if (action)
708 tipc_nmap_add(nm_ptr, node);
709 else
710 tipc_nmap_remove(nm_ptr, node);
711
Per Lidenb97bf3f2006-01-02 19:04:38 +0100712 /* Group bearers by priority (can assume max of two per priority) */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100713 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
714
Ying Xuef8322df2014-04-21 10:55:45 +0800715 rcu_read_lock();
Per Lidenb97bf3f2006-01-02 19:04:38 +0100716 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
Ying Xuef8322df2014-04-21 10:55:45 +0800717 b = rcu_dereference_rtnl(bearer_list[b_index]);
Ying Xuef47de122014-03-27 12:54:34 +0800718 if (!b || !b->nodes.count)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100719 continue;
720
721 if (!bp_temp[b->priority].primary)
722 bp_temp[b->priority].primary = b;
723 else
724 bp_temp[b->priority].secondary = b;
725 }
Ying Xuef8322df2014-04-21 10:55:45 +0800726 rcu_read_unlock();
Per Lidenb97bf3f2006-01-02 19:04:38 +0100727
728 /* Create array of bearer pairs for broadcasting */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100729 bp_curr = bcbearer->bpairs;
730 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
731
Per Liden16cb4b32006-01-13 22:22:22 +0100732 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
Per Lidenb97bf3f2006-01-02 19:04:38 +0100733
734 if (!bp_temp[pri].primary)
735 continue;
736
737 bp_curr->primary = bp_temp[pri].primary;
738
739 if (bp_temp[pri].secondary) {
Per Liden4323add2006-01-18 00:38:21 +0100740 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
741 &bp_temp[pri].secondary->nodes)) {
Per Lidenb97bf3f2006-01-02 19:04:38 +0100742 bp_curr->secondary = bp_temp[pri].secondary;
743 } else {
744 bp_curr++;
745 bp_curr->primary = bp_temp[pri].secondary;
746 }
747 }
748
749 bp_curr++;
750 }
751
Ying Xued69afc92014-05-05 08:56:15 +0800752 tipc_bclink_unlock();
Per Lidenb97bf3f2006-01-02 19:04:38 +0100753}
754
Richard Alped8182802014-11-24 11:10:29 +0100755static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
756 struct tipc_stats *stats)
Richard Alpe7be57fc2014-11-20 10:29:12 +0100757{
758 int i;
759 struct nlattr *nest;
760
761 struct nla_map {
762 __u32 key;
763 __u32 val;
764 };
765
766 struct nla_map map[] = {
767 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
768 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
769 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
770 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
771 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
772 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
773 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
774 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
775 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
776 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
777 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
778 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
779 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
780 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
781 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
782 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
783 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
784 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
785 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
786 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
787 };
788
789 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
790 if (!nest)
791 return -EMSGSIZE;
792
793 for (i = 0; i < ARRAY_SIZE(map); i++)
794 if (nla_put_u32(skb, map[i].key, map[i].val))
795 goto msg_full;
796
797 nla_nest_end(skb, nest);
798
799 return 0;
800msg_full:
801 nla_nest_cancel(skb, nest);
802
803 return -EMSGSIZE;
804}
805
806int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
807{
808 int err;
809 void *hdr;
810 struct nlattr *attrs;
811 struct nlattr *prop;
812
813 if (!bcl)
814 return 0;
815
816 tipc_bclink_lock();
817
818 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
819 NLM_F_MULTI, TIPC_NL_LINK_GET);
820 if (!hdr)
821 return -EMSGSIZE;
822
823 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
824 if (!attrs)
825 goto msg_full;
826
827 /* The broadcast link is always up */
828 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
829 goto attr_msg_full;
830
831 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
832 goto attr_msg_full;
833 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
834 goto attr_msg_full;
835 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
836 goto attr_msg_full;
837 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
838 goto attr_msg_full;
839
840 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
841 if (!prop)
842 goto attr_msg_full;
843 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0]))
844 goto prop_msg_full;
845 nla_nest_end(msg->skb, prop);
846
847 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
848 if (err)
849 goto attr_msg_full;
850
851 tipc_bclink_unlock();
852 nla_nest_end(msg->skb, attrs);
853 genlmsg_end(msg->skb, hdr);
854
855 return 0;
856
857prop_msg_full:
858 nla_nest_cancel(msg->skb, prop);
859attr_msg_full:
860 nla_nest_cancel(msg->skb, attrs);
861msg_full:
862 tipc_bclink_unlock();
863 genlmsg_cancel(msg->skb, hdr);
864
865 return -EMSGSIZE;
866}
Per Lidenb97bf3f2006-01-02 19:04:38 +0100867
Per Liden4323add2006-01-18 00:38:21 +0100868int tipc_bclink_stats(char *buf, const u32 buf_size)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100869{
Erik Hugnedc1aed32012-06-29 00:50:23 -0400870 int ret;
871 struct tipc_stats *s;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100872
873 if (!bcl)
874 return 0;
875
Ying Xued69afc92014-05-05 08:56:15 +0800876 tipc_bclink_lock();
Per Lidenb97bf3f2006-01-02 19:04:38 +0100877
Erik Hugnedc1aed32012-06-29 00:50:23 -0400878 s = &bcl->stats;
879
880 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
881 " Window:%u packets\n",
882 bcl->name, bcl->queue_limit[0]);
883 ret += tipc_snprintf(buf + ret, buf_size - ret,
884 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
885 s->recv_info, s->recv_fragments,
886 s->recv_fragmented, s->recv_bundles,
887 s->recv_bundled);
888 ret += tipc_snprintf(buf + ret, buf_size - ret,
889 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
890 s->sent_info, s->sent_fragments,
891 s->sent_fragmented, s->sent_bundles,
892 s->sent_bundled);
893 ret += tipc_snprintf(buf + ret, buf_size - ret,
894 " RX naks:%u defs:%u dups:%u\n",
895 s->recv_nacks, s->deferred_recv, s->duplicates);
896 ret += tipc_snprintf(buf + ret, buf_size - ret,
897 " TX naks:%u acks:%u dups:%u\n",
898 s->sent_nacks, s->sent_acks, s->retransmitted);
899 ret += tipc_snprintf(buf + ret, buf_size - ret,
Ying Xue3c294cb2012-11-15 11:34:45 +0800900 " Congestion link:%u Send queue max:%u avg:%u\n",
901 s->link_congs, s->max_queue_sz,
Erik Hugnedc1aed32012-06-29 00:50:23 -0400902 s->queue_sz_counts ?
903 (s->accu_queue_sz / s->queue_sz_counts) : 0);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100904
Ying Xued69afc92014-05-05 08:56:15 +0800905 tipc_bclink_unlock();
Erik Hugnedc1aed32012-06-29 00:50:23 -0400906 return ret;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100907}
908
Per Liden4323add2006-01-18 00:38:21 +0100909int tipc_bclink_reset_stats(void)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100910{
911 if (!bcl)
912 return -ENOPROTOOPT;
913
Ying Xued69afc92014-05-05 08:56:15 +0800914 tipc_bclink_lock();
Per Lidenb97bf3f2006-01-02 19:04:38 +0100915 memset(&bcl->stats, 0, sizeof(bcl->stats));
Ying Xued69afc92014-05-05 08:56:15 +0800916 tipc_bclink_unlock();
Allan Stephens0e35fd52008-07-14 22:44:01 -0700917 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100918}
919
Per Liden4323add2006-01-18 00:38:21 +0100920int tipc_bclink_set_queue_limits(u32 limit)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100921{
922 if (!bcl)
923 return -ENOPROTOOPT;
924 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
925 return -EINVAL;
926
Ying Xued69afc92014-05-05 08:56:15 +0800927 tipc_bclink_lock();
Per Liden4323add2006-01-18 00:38:21 +0100928 tipc_link_set_queue_limits(bcl, limit);
Ying Xued69afc92014-05-05 08:56:15 +0800929 tipc_bclink_unlock();
Allan Stephens0e35fd52008-07-14 22:44:01 -0700930 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100931}
932
Ying Xueeb8b00f2014-05-05 08:56:16 +0800933int tipc_bclink_init(void)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100934{
Ying Xueeb8b00f2014-05-05 08:56:16 +0800935 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
936 if (!bcbearer)
937 return -ENOMEM;
938
939 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
940 if (!bclink) {
941 kfree(bcbearer);
942 return -ENOMEM;
943 }
944
945 bcl = &bclink->link;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100946 bcbearer->bearer.media = &bcbearer->media;
Per Liden4323add2006-01-18 00:38:21 +0100947 bcbearer->media.send_msg = tipc_bcbearer_send;
Allan Stephens2e2d9be2011-04-07 10:22:31 -0400948 sprintf(bcbearer->media.name, "tipc-broadcast");
Per Lidenb97bf3f2006-01-02 19:04:38 +0100949
Ying Xued69afc92014-05-05 08:56:15 +0800950 spin_lock_init(&bclink->lock);
Ying Xue58dc55f2014-11-26 11:41:52 +0800951 __skb_queue_head_init(&bcl->outqueue);
Ying Xuebc6fecd2014-11-26 11:41:53 +0800952 __skb_queue_head_init(&bcl->deferred_queue);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400953 __skb_queue_head_init(&bcl->waiting_sks);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100954 bcl->next_out_no = 1;
Ingo Molnar34af9462006-06-27 02:53:55 -0700955 spin_lock_init(&bclink->node.lock);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400956 __skb_queue_head_init(&bclink->node.waiting_sks);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100957 bcl->owner = &bclink->node;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900958 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
Per Liden4323add2006-01-18 00:38:21 +0100959 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
Ying Xue7a2f7d12014-04-21 10:55:46 +0800960 bcl->bearer_id = MAX_BEARERS;
Ying Xuef8322df2014-04-21 10:55:45 +0800961 rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100962 bcl->state = WORKING_WORKING;
Stephen Hemminger4b704d52009-03-18 19:11:29 -0700963 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
Ying Xueeb8b00f2014-05-05 08:56:16 +0800964 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100965}
966
Per Liden4323add2006-01-18 00:38:21 +0100967void tipc_bclink_stop(void)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100968{
Ying Xued69afc92014-05-05 08:56:15 +0800969 tipc_bclink_lock();
Jon Paul Maloy581465f2014-01-07 17:02:44 -0500970 tipc_link_purge_queues(bcl);
Ying Xued69afc92014-05-05 08:56:15 +0800971 tipc_bclink_unlock();
Allan Stephensc47e9b92011-10-24 10:29:26 -0400972
Ying Xuef8322df2014-04-21 10:55:45 +0800973 RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
Ying Xueeb8b00f2014-05-05 08:56:16 +0800974 synchronize_net();
975 kfree(bcbearer);
976 kfree(bclink);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100977}
978
Allan Stephens3e22e622010-05-11 14:30:14 +0000979/**
980 * tipc_nmap_add - add a node to a node map
981 */
Ying Xue28dd9412014-04-21 10:55:51 +0800982static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
Allan Stephens3e22e622010-05-11 14:30:14 +0000983{
984 int n = tipc_node(node);
985 int w = n / WSIZE;
986 u32 mask = (1 << (n % WSIZE));
987
988 if ((nm_ptr->map[w] & mask) == 0) {
989 nm_ptr->count++;
990 nm_ptr->map[w] |= mask;
991 }
992}
993
994/**
995 * tipc_nmap_remove - remove a node from a node map
996 */
Ying Xue28dd9412014-04-21 10:55:51 +0800997static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
Allan Stephens3e22e622010-05-11 14:30:14 +0000998{
999 int n = tipc_node(node);
1000 int w = n / WSIZE;
1001 u32 mask = (1 << (n % WSIZE));
1002
1003 if ((nm_ptr->map[w] & mask) != 0) {
1004 nm_ptr->map[w] &= ~mask;
1005 nm_ptr->count--;
1006 }
1007}
1008
1009/**
1010 * tipc_nmap_diff - find differences between node maps
1011 * @nm_a: input node map A
1012 * @nm_b: input node map B
1013 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
1014 */
stephen hemminger31e3c3f2010-10-13 13:20:35 +00001015static void tipc_nmap_diff(struct tipc_node_map *nm_a,
1016 struct tipc_node_map *nm_b,
1017 struct tipc_node_map *nm_diff)
Allan Stephens3e22e622010-05-11 14:30:14 +00001018{
1019 int stop = ARRAY_SIZE(nm_a->map);
1020 int w;
1021 int b;
1022 u32 map;
1023
1024 memset(nm_diff, 0, sizeof(*nm_diff));
1025 for (w = 0; w < stop; w++) {
1026 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
1027 nm_diff->map[w] = map;
1028 if (map != 0) {
1029 for (b = 0 ; b < WSIZE; b++) {
1030 if (map & (1 << b))
1031 nm_diff->count++;
1032 }
1033 }
1034 }
1035}
Allan Stephens43608ed2010-05-11 14:30:15 +00001036
1037/**
1038 * tipc_port_list_add - add a port to a port list, ensuring no duplicates
1039 */
Paul Gortmaker45843102011-12-29 20:33:30 -05001040void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
Allan Stephens43608ed2010-05-11 14:30:15 +00001041{
Paul Gortmaker45843102011-12-29 20:33:30 -05001042 struct tipc_port_list *item = pl_ptr;
Allan Stephens43608ed2010-05-11 14:30:15 +00001043 int i;
1044 int item_sz = PLSIZE;
1045 int cnt = pl_ptr->count;
1046
1047 for (; ; cnt -= item_sz, item = item->next) {
1048 if (cnt < PLSIZE)
1049 item_sz = cnt;
1050 for (i = 0; i < item_sz; i++)
1051 if (item->ports[i] == port)
1052 return;
1053 if (i < PLSIZE) {
1054 item->ports[i] = port;
1055 pl_ptr->count++;
1056 return;
1057 }
1058 if (!item->next) {
1059 item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
1060 if (!item->next) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04001061 pr_warn("Incomplete multicast delivery, no memory\n");
Allan Stephens43608ed2010-05-11 14:30:15 +00001062 return;
1063 }
1064 item->next->next = NULL;
1065 }
1066 }
1067}
1068
1069/**
1070 * tipc_port_list_free - free dynamically created entries in port_list chain
1071 *
1072 */
Paul Gortmaker45843102011-12-29 20:33:30 -05001073void tipc_port_list_free(struct tipc_port_list *pl_ptr)
Allan Stephens43608ed2010-05-11 14:30:15 +00001074{
Paul Gortmaker45843102011-12-29 20:33:30 -05001075 struct tipc_port_list *item;
1076 struct tipc_port_list *next;
Allan Stephens43608ed2010-05-11 14:30:15 +00001077
1078 for (item = pl_ptr->next; item; item = next) {
1079 next = item->next;
1080 kfree(item);
1081 }
1082}