blob: 35cd831508a966ae965424b685091a504f35e7dd [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann567db7b2012-01-01 00:41:38 +01002 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22
23
24#ifndef _NET_BATMAN_ADV_TYPES_H_
25#define _NET_BATMAN_ADV_TYPES_H_
26
27#include "packet.h"
28#include "bitarray.h"
29
30#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \
31 ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
32 sizeof(struct unicast_packet) : \
33 sizeof(struct bcast_packet))))
34
35
Marek Lindnere6c10f42011-02-18 12:33:20 +000036struct hard_iface {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000037 struct list_head list;
38 int16_t if_num;
39 char if_status;
40 struct net_device *net_dev;
41 atomic_t seqno;
42 atomic_t frag_seqno;
43 unsigned char *packet_buff;
44 int packet_len;
45 struct kobject *hardif_obj;
Marek Lindnered75ccb2011-02-10 14:33:51 +000046 atomic_t refcount;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000047 struct packet_type batman_adv_ptype;
48 struct net_device *soft_iface;
49 struct rcu_head rcu;
50};
51
52/**
53 * orig_node - structure for orig_list maintaining nodes of mesh
54 * @primary_addr: hosts primary interface address
55 * @last_valid: when last packet from this node was received
56 * @bcast_seqno_reset: time when the broadcast seqno window was reset
57 * @batman_seqno_reset: time when the batman seqno window was reset
58 * @gw_flags: flags related to gateway class
59 * @flags: for now only VIS_SERVER flag
Antonio Quartulli015758d2011-07-09 17:52:13 +020060 * @last_real_seqno: last and best known sequence number
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000061 * @last_ttl: ttl of last received packet
62 * @last_bcast_seqno: last broadcast sequence number received by this host
63 *
64 * @candidates: how many candidates are available
65 * @selected: next bonding candidate
66 */
67struct orig_node {
68 uint8_t orig[ETH_ALEN];
69 uint8_t primary_addr[ETH_ALEN];
Linus Lüssinge1a5382f2011-03-14 22:43:37 +000070 struct neigh_node __rcu *router; /* rcu protected pointer */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000071 unsigned long *bcast_own;
72 uint8_t *bcast_own_sum;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000073 unsigned long last_valid;
74 unsigned long bcast_seqno_reset;
75 unsigned long batman_seqno_reset;
76 uint8_t gw_flags;
77 uint8_t flags;
Antonio Quartullia73105b2011-04-27 14:27:44 +020078 atomic_t last_ttvn; /* last seen translation table version number */
79 uint16_t tt_crc;
Antonio Quartulli2dafb492011-05-05 08:42:45 +020080 unsigned char *tt_buff;
81 int16_t tt_buff_len;
Antonio Quartullia73105b2011-04-27 14:27:44 +020082 spinlock_t tt_buff_lock; /* protects tt_buff */
83 atomic_t tt_size;
Antonio Quartulli17071572011-11-07 16:36:40 +010084 bool tt_initialised;
Antonio Quartullicc47f662011-04-27 14:27:57 +020085 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
86 * If true, then I sent a Roaming_adv to this orig_node and I have to
87 * inspect every packet directed to it to check whether it is still
88 * the true destination or not. This flag will be reset to false as
89 * soon as I receive a new TTVN from this orig_node */
90 bool tt_poss_change;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000091 uint32_t last_real_seqno;
92 uint8_t last_ttl;
Sven Eckelmann0079d2c2012-02-04 17:34:52 +010093 DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000094 uint32_t last_bcast_seqno;
Marek Lindner9591a792010-12-12 21:57:11 +000095 struct hlist_head neigh_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000096 struct list_head frag_list;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +000097 spinlock_t neigh_list_lock; /* protects neigh_list and router */
Marek Lindner7b36e8e2011-02-18 12:28:10 +000098 atomic_t refcount;
99 struct rcu_head rcu;
Marek Lindner7aadf882011-02-18 12:28:09 +0000100 struct hlist_node hash_entry;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000101 struct bat_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000102 unsigned long last_frag_packet;
Sven Eckelmann6e215fd2011-05-08 12:45:45 +0200103 /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
104 * neigh_node->real_bits, neigh_node->real_packet_count */
105 spinlock_t ogm_cnt_lock;
106 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
107 spinlock_t bcast_seqno_lock;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200108 spinlock_t tt_list_lock; /* protects tt_list */
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000109 atomic_t bond_candidates;
110 struct list_head bond_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000111};
112
113struct gw_node {
114 struct hlist_node list;
115 struct orig_node *orig_node;
116 unsigned long deleted;
Marek Lindner25b6d3c2011-02-10 14:33:49 +0000117 atomic_t refcount;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000118 struct rcu_head rcu;
119};
120
121/**
122 * neigh_node
123 * @last_valid: when last packet via this neighbor was received
124 */
125struct neigh_node {
Marek Lindner9591a792010-12-12 21:57:11 +0000126 struct hlist_node list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000127 uint8_t addr[ETH_ALEN];
128 uint8_t real_packet_count;
129 uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE];
130 uint8_t tq_index;
131 uint8_t tq_avg;
132 uint8_t last_ttl;
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000133 struct list_head bonding_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000134 unsigned long last_valid;
Sven Eckelmann0079d2c2012-02-04 17:34:52 +0100135 DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
Marek Lindner44524fc2011-02-10 14:33:53 +0000136 atomic_t refcount;
Marek Lindnerf987ed62010-12-12 21:57:12 +0000137 struct rcu_head rcu;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138 struct orig_node *orig_node;
Marek Lindnere6c10f42011-02-18 12:33:20 +0000139 struct hard_iface *if_incoming;
Linus Lüssing68003902011-03-14 22:43:40 +0000140 spinlock_t tq_lock; /* protects: tq_recv, tq_index */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000141};
142
143
144struct bat_priv {
145 atomic_t mesh_state;
146 struct net_device_stats stats;
147 atomic_t aggregated_ogms; /* boolean */
148 atomic_t bonding; /* boolean */
149 atomic_t fragmentation; /* boolean */
Antonio Quartulli59b699c2011-07-07 15:35:36 +0200150 atomic_t ap_isolation; /* boolean */
Simon Wunderlich23721382012-01-22 20:00:19 +0100151 atomic_t bridge_loop_avoidance; /* boolean */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000152 atomic_t vis_mode; /* VIS_TYPE_* */
153 atomic_t gw_mode; /* GW_MODE_* */
154 atomic_t gw_sel_class; /* uint */
155 atomic_t gw_bandwidth; /* gw bandwidth */
156 atomic_t orig_interval; /* uint */
157 atomic_t hop_penalty; /* uint */
158 atomic_t log_level; /* uint */
159 atomic_t bcast_seqno;
160 atomic_t bcast_queue_left;
161 atomic_t batman_queue_left;
Antonio Quartulli015758d2011-07-09 17:52:13 +0200162 atomic_t ttvn; /* translation table version number */
Antonio Quartullia73105b2011-04-27 14:27:44 +0200163 atomic_t tt_ogm_append_cnt;
164 atomic_t tt_local_changes; /* changes registered in a OGM interval */
Simon Wunderlich23721382012-01-22 20:00:19 +0100165 atomic_t bla_num_requests; /* number of bla requests in flight */
Antonio Quartullicc47f662011-04-27 14:27:57 +0200166 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
167 * If true, then I received a Roaming_adv and I have to inspect every
168 * packet directed to me to check whether I am still the true
169 * destination or not. This flag will be reset to false as soon as I
170 * increase my TTVN */
171 bool tt_poss_change;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000172 char num_ifaces;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000173 struct debug_log *debug_log;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000174 struct kobject *mesh_obj;
175 struct dentry *debug_dir;
176 struct hlist_head forw_bat_list;
177 struct hlist_head forw_bcast_list;
178 struct hlist_head gw_list;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200179 struct list_head tt_changes_list; /* tracks changes in a OGM int */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000180 struct list_head vis_send_list;
181 struct hashtable_t *orig_hash;
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200182 struct hashtable_t *tt_local_hash;
183 struct hashtable_t *tt_global_hash;
Simon Wunderlich23721382012-01-22 20:00:19 +0100184 struct hashtable_t *claim_hash;
185 struct hashtable_t *backbone_hash;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200186 struct list_head tt_req_list; /* list of pending tt_requests */
Antonio Quartullicc47f662011-04-27 14:27:57 +0200187 struct list_head tt_roam_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000188 struct hashtable_t *vis_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000189 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
190 spinlock_t forw_bcast_list_lock; /* protects */
Antonio Quartullia73105b2011-04-27 14:27:44 +0200191 spinlock_t tt_changes_list_lock; /* protects tt_changes */
Antonio Quartullia73105b2011-04-27 14:27:44 +0200192 spinlock_t tt_req_list_lock; /* protects tt_req_list */
Antonio Quartullicc47f662011-04-27 14:27:57 +0200193 spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
Linus Lüssing5d02b3c2011-02-13 21:13:02 +0000194 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000195 spinlock_t vis_hash_lock; /* protects vis_hash */
196 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
Antonio Quartullia73105b2011-04-27 14:27:44 +0200197 atomic_t num_local_tt;
198 /* Checksum of the local table, recomputed before sending a new OGM */
199 atomic_t tt_crc;
200 unsigned char *tt_buff;
201 int16_t tt_buff_len;
202 spinlock_t tt_buff_lock; /* protects tt_buff */
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200203 struct delayed_work tt_work;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000204 struct delayed_work orig_work;
205 struct delayed_work vis_work;
Simon Wunderlich23721382012-01-22 20:00:19 +0100206 struct delayed_work bla_work;
Linus Lüssing5d02b3c2011-02-13 21:13:02 +0000207 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
Antonio Quartulli2265c142011-04-27 00:22:00 +0200208 atomic_t gw_reselect;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200209 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000210 struct vis_info *my_vis_info;
Marek Lindner1c280472011-11-28 17:40:17 +0800211 struct bat_algo_ops *bat_algo_ops;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000212};
213
214struct socket_client {
215 struct list_head queue_list;
216 unsigned int queue_len;
217 unsigned char index;
218 spinlock_t lock; /* protects queue_list, queue_len, index */
219 wait_queue_head_t queue_wait;
220 struct bat_priv *bat_priv;
221};
222
223struct socket_packet {
224 struct list_head list;
225 size_t icmp_len;
226 struct icmp_packet_rr icmp_packet;
227};
228
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100229struct tt_common_entry {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000230 uint8_t addr[ETH_ALEN];
Antonio Quartulli93840ac2011-10-22 00:55:39 +0200231 struct hlist_node hash_entry;
Antonio Quartulli5fbc1592011-06-17 16:11:27 +0200232 uint16_t flags;
Antonio Quartulli7683fdc2011-04-27 14:28:07 +0200233 atomic_t refcount;
234 struct rcu_head rcu;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000235};
236
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100237struct tt_local_entry {
238 struct tt_common_entry common;
239 unsigned long last_seen;
240};
241
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200242struct tt_global_entry {
Antonio Quartulli48100ba2011-10-30 12:17:33 +0100243 struct tt_common_entry common;
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200244 struct hlist_head orig_list;
245 spinlock_t list_lock; /* protects the list */
246 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
247};
248
249struct tt_orig_list_entry {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000250 struct orig_node *orig_node;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200251 uint8_t ttvn;
Simon Wunderlichdb08e6e2011-10-22 20:12:51 +0200252 struct rcu_head rcu;
253 struct hlist_node list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000254};
255
Simon Wunderlich23721382012-01-22 20:00:19 +0100256struct backbone_gw {
257 uint8_t orig[ETH_ALEN];
258 short vid; /* used VLAN ID */
259 struct hlist_node hash_entry;
260 struct bat_priv *bat_priv;
261 unsigned long lasttime; /* last time we heard of this backbone gw */
262 atomic_t request_sent;
263 atomic_t refcount;
264 struct rcu_head rcu;
265 uint16_t crc; /* crc checksum over all claims */
266};
267
268struct claim {
269 uint8_t addr[ETH_ALEN];
270 short vid;
271 struct backbone_gw *backbone_gw;
272 unsigned long lasttime; /* last time we heard of claim (locals only) */
273 struct rcu_head rcu;
274 atomic_t refcount;
275 struct hlist_node hash_entry;
276};
277
Antonio Quartullia73105b2011-04-27 14:27:44 +0200278struct tt_change_node {
279 struct list_head list;
280 struct tt_change change;
281};
282
283struct tt_req_node {
284 uint8_t addr[ETH_ALEN];
285 unsigned long issued_at;
286 struct list_head list;
287};
288
Antonio Quartullicc47f662011-04-27 14:27:57 +0200289struct tt_roam_node {
290 uint8_t addr[ETH_ALEN];
291 atomic_t counter;
292 unsigned long first_time;
293 struct list_head list;
294};
295
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000296/**
297 * forw_packet - structure for forw_list maintaining packets to be
298 * send/forwarded
299 */
300struct forw_packet {
301 struct hlist_node list;
302 unsigned long send_time;
303 uint8_t own;
304 struct sk_buff *skb;
305 uint16_t packet_len;
306 uint32_t direct_link_flags;
307 uint8_t num_packets;
308 struct delayed_work delayed_work;
Marek Lindnere6c10f42011-02-18 12:33:20 +0000309 struct hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000310};
311
312/* While scanning for vis-entries of a particular vis-originator
313 * this list collects its interfaces to create a subgraph/cluster
314 * out of them later
315 */
316struct if_list_entry {
317 uint8_t addr[ETH_ALEN];
318 bool primary;
319 struct hlist_node list;
320};
321
322struct debug_log {
323 char log_buff[LOG_BUF_LEN];
324 unsigned long log_start;
325 unsigned long log_end;
326 spinlock_t lock; /* protects log_buff, log_start and log_end */
327 wait_queue_head_t queue_wait;
328};
329
330struct frag_packet_list_entry {
331 struct list_head list;
332 uint16_t seqno;
333 struct sk_buff *skb;
334};
335
336struct vis_info {
Antonio Quartulli40219672011-05-18 16:47:23 +0200337 unsigned long first_seen;
338 /* list of server-neighbors we received a vis-packet
339 * from. we should not reply to them. */
340 struct list_head recv_list;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000341 struct list_head send_list;
342 struct kref refcount;
Marek Lindner7aadf882011-02-18 12:28:09 +0000343 struct hlist_node hash_entry;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000344 struct bat_priv *bat_priv;
345 /* this packet might be part of the vis send queue. */
346 struct sk_buff *skb_packet;
347 /* vis_info may follow here*/
Sven Eckelmannaa0adb12011-01-15 14:39:43 +0000348} __packed;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000349
350struct vis_info_entry {
351 uint8_t src[ETH_ALEN];
352 uint8_t dest[ETH_ALEN];
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200353 uint8_t quality; /* quality = 0 client */
Sven Eckelmannaa0adb12011-01-15 14:39:43 +0000354} __packed;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000355
356struct recvlist_node {
357 struct list_head list;
358 uint8_t mac[ETH_ALEN];
359};
360
Marek Lindner1c280472011-11-28 17:40:17 +0800361struct bat_algo_ops {
362 struct hlist_node list;
363 char *name;
Marek Lindner01c42242011-11-28 21:31:55 +0800364 /* init OGM when hard-interface is enabled */
365 void (*bat_ogm_init)(struct hard_iface *hard_iface);
366 /* init primary OGM when primary interface is selected */
367 void (*bat_ogm_init_primary)(struct hard_iface *hard_iface);
368 /* init mac addresses of the OGM belonging to this hard-interface */
369 void (*bat_ogm_update_mac)(struct hard_iface *hard_iface);
370 /* prepare a new outgoing OGM for the send queue */
371 void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
372 int tt_num_changes);
373 /* send scheduled OGM */
374 void (*bat_ogm_emit)(struct forw_packet *forw_packet);
375 /* receive incoming OGM */
376 void (*bat_ogm_receive)(struct hard_iface *if_incoming,
377 struct sk_buff *skb);
Marek Lindner1c280472011-11-28 17:40:17 +0800378};
379
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000380#endif /* _NET_BATMAN_ADV_TYPES_H_ */