Merge branch 'batman-adv/next' of git://git.open-mesh.org/ecsv/linux-merge
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 6c051ad..2b68d06 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -5,6 +5,7 @@
 config BATMAN_ADV
 	tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
 	depends on NET
+	select CRC16
         default n
 	---help---
 
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index 4080970..c583e04 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -20,17 +20,12 @@
  */
 
 #include "main.h"
+#include "translation-table.h"
 #include "aggregation.h"
 #include "send.h"
 #include "routing.h"
 #include "hard-interface.h"
 
-/* calculate the size of the tt information for a given packet */
-static int tt_len(const struct batman_packet *batman_packet)
-{
-	return batman_packet->num_tt * ETH_ALEN;
-}
-
 /* return true if new_packet can be aggregated with forw_packet */
 static bool can_aggregate_with(const struct batman_packet *new_batman_packet,
 			       int packet_len,
@@ -195,7 +190,7 @@
 
 void add_bat_packet_to_list(struct bat_priv *bat_priv,
 			    unsigned char *packet_buff, int packet_len,
-			    struct hard_iface *if_incoming, char own_packet,
+			    struct hard_iface *if_incoming, int own_packet,
 			    unsigned long send_time)
 {
 	/**
@@ -264,18 +259,20 @@
 	batman_packet = (struct batman_packet *)packet_buff;
 
 	do {
-		/* network to host order for our 32bit seqno, and the
-		   orig_interval. */
+		/* network to host order for our 32bit seqno and the
+		   orig_interval */
 		batman_packet->seqno = ntohl(batman_packet->seqno);
+		batman_packet->tt_crc = ntohs(batman_packet->tt_crc);
 
 		tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
-		receive_bat_packet(ethhdr, batman_packet,
-				   tt_buff, tt_len(batman_packet),
-				   if_incoming);
 
-		buff_pos += BAT_PACKET_LEN + tt_len(batman_packet);
+		receive_bat_packet(ethhdr, batman_packet, tt_buff, if_incoming);
+
+		buff_pos += BAT_PACKET_LEN +
+			tt_len(batman_packet->tt_num_changes);
+
 		batman_packet = (struct batman_packet *)
 			(packet_buff + buff_pos);
 	} while (aggregated_packet(buff_pos, packet_len,
-				   batman_packet->num_tt));
+				   batman_packet->tt_num_changes));
 }
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index fedeb8d..216337b 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -25,9 +25,11 @@
 #include "main.h"
 
 /* is there another aggregated packet here? */
-static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt)
+static inline int aggregated_packet(int buff_pos, int packet_len,
+				    int tt_num_changes)
 {
-	int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_tt * ETH_ALEN);
+	int next_buff_pos = buff_pos + BAT_PACKET_LEN + (tt_num_changes *
+						sizeof(struct tt_change));
 
 	return (next_buff_pos <= packet_len) &&
 		(next_buff_pos <= MAX_AGGREGATION_BYTES);
@@ -35,7 +37,7 @@
 
 void add_bat_packet_to_list(struct bat_priv *bat_priv,
 			    unsigned char *packet_buff, int packet_len,
-			    struct hard_iface *if_incoming, char own_packet,
+			    struct hard_iface *if_incoming, int own_packet,
 			    unsigned long send_time);
 void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
 			     unsigned char *packet_buff, int packet_len,
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 924d577..cd15deb 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -40,6 +40,20 @@
 	return netdev_priv(net_dev);
 }
 
+#define UEV_TYPE_VAR	"BATTYPE="
+#define UEV_ACTION_VAR	"BATACTION="
+#define UEV_DATA_VAR	"BATDATA="
+
+static char *uev_action_str[] = {
+	"add",
+	"del",
+	"change"
+};
+
+static char *uev_type_str[] = {
+	"gw"
+};
+
 /* Use this, if you have customized show and store functions */
 #define BAT_ATTR(_name, _mode, _show, _store)	\
 struct bat_attribute bat_attr_##_name = {	\
@@ -375,7 +389,7 @@
 static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
 		store_gw_bwidth);
 #ifdef CONFIG_BATMAN_ADV_DEBUG
-BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 3, NULL);
+BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL);
 #endif
 
 static struct bat_attribute *mesh_attrs[] = {
@@ -601,3 +615,60 @@
 	kobject_put(*hardif_obj);
 	*hardif_obj = NULL;
 }
+
+int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
+		 enum uev_action action, const char *data)
+{
+	int ret = -1;
+	struct hard_iface *primary_if = NULL;
+	struct kobject *bat_kobj;
+	char *uevent_env[4] = { NULL, NULL, NULL, NULL };
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	bat_kobj = &primary_if->soft_iface->dev.kobj;
+
+	uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) +
+				strlen(uev_type_str[type]) + 1,
+				GFP_ATOMIC);
+	if (!uevent_env[0])
+		goto out;
+
+	sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]);
+
+	uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) +
+				strlen(uev_action_str[action]) + 1,
+				GFP_ATOMIC);
+	if (!uevent_env[1])
+		goto out;
+
+	sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]);
+
+	/* If the event is DEL, ignore the data field */
+	if (action != UEV_DEL) {
+		uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) +
+					strlen(data) + 1, GFP_ATOMIC);
+		if (!uevent_env[2])
+			goto out;
+
+		sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data);
+	}
+
+	ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
+out:
+	kfree(uevent_env[0]);
+	kfree(uevent_env[1]);
+	kfree(uevent_env[2]);
+
+	if (primary_if)
+		hardif_free_ref(primary_if);
+
+	if (ret)
+		bat_dbg(DBG_BATMAN, bat_priv, "Impossible to send "
+			"uevent for (%s,%s,%s) event (err: %d)\n",
+			uev_type_str[type], uev_action_str[action],
+			(action == UEV_DEL ? "NULL" : data), ret);
+	return ret;
+}
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index 02f1fa7..a3f75a7 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -38,5 +38,7 @@
 void sysfs_del_meshif(struct net_device *dev);
 int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
 void sysfs_del_hardif(struct kobject **hardif_obj);
+int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
+		 enum uev_action action, const char *data);
 
 #endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 700ee4f..c1f4bfc 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -26,8 +26,8 @@
 
 /* returns true if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
-		       uint32_t curr_seqno)
+int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
+		   uint32_t curr_seqno)
 {
 	int32_t diff, word_offset, word_num;
 
@@ -127,8 +127,8 @@
  *  1 if the window was moved (either new or very old)
  *  0 if the window was not moved/shifted.
  */
-char bit_get_packet(void *priv, unsigned long *seq_bits,
-		    int32_t seq_num_diff, int8_t set_mark)
+int bit_get_packet(void *priv, unsigned long *seq_bits,
+		    int32_t seq_num_diff, int set_mark)
 {
 	struct bat_priv *bat_priv = priv;
 
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index e32eb2d..9c04422 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -26,8 +26,8 @@
 
 /* returns true if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
-		       uint32_t curr_seqno);
+int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
+		   uint32_t curr_seqno);
 
 /* turn corresponding bit on, so we can remember that we got the packet */
 void bit_mark(unsigned long *seq_bits, int32_t n);
@@ -35,8 +35,8 @@
 
 /* receive and process one packet, returns 1 if received seq_num is considered
  * new, 0 if old  */
-char bit_get_packet(void *priv, unsigned long *seq_bits,
-		    int32_t seq_num_diff, int8_t set_mark);
+int bit_get_packet(void *priv, unsigned long *seq_bits,
+		   int32_t seq_num_diff, int set_mark);
 
 /* count the hamming weight, how many good packets did we receive? */
 int bit_packet_count(const unsigned long *seq_bits);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 24aee56..8b25b52 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -20,15 +20,22 @@
  */
 
 #include "main.h"
+#include "bat_sysfs.h"
 #include "gateway_client.h"
 #include "gateway_common.h"
 #include "hard-interface.h"
 #include "originator.h"
+#include "routing.h"
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/udp.h>
 #include <linux/if_vlan.h>
 
+/* This is the offset of the options field in a dhcp packet starting at
+ * the beginning of the dhcp header */
+#define DHCP_OPTIONS_OFFSET 240
+#define DHCP_REQUEST 3
+
 static void gw_node_free_ref(struct gw_node *gw_node)
 {
 	if (atomic_dec_and_test(&gw_node->refcount))
@@ -97,40 +104,19 @@
 
 void gw_deselect(struct bat_priv *bat_priv)
 {
-	gw_select(bat_priv, NULL);
+	atomic_set(&bat_priv->gw_reselect, 1);
 }
 
-void gw_election(struct bat_priv *bat_priv)
+static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
 {
-	struct hlist_node *node;
-	struct gw_node *gw_node, *curr_gw = NULL, *curr_gw_tmp = NULL;
 	struct neigh_node *router;
-	uint8_t max_tq = 0;
+	struct hlist_node *node;
+	struct gw_node *gw_node, *curr_gw = NULL;
 	uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
+	uint8_t max_tq = 0;
 	int down, up;
 
-	/**
-	 * The batman daemon checks here if we already passed a full originator
-	 * cycle in order to make sure we don't choose the first gateway we
-	 * hear about. This check is based on the daemon's uptime which we
-	 * don't have.
-	 **/
-	if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
-		return;
-
-	curr_gw = gw_get_selected_gw_node(bat_priv);
-	if (curr_gw)
-		goto out;
-
 	rcu_read_lock();
-	if (hlist_empty(&bat_priv->gw_list)) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Removing selected gateway - "
-			"no gateway in range\n");
-		gw_deselect(bat_priv);
-		goto unlock;
-	}
-
 	hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
 		if (gw_node->deleted)
 			continue;
@@ -139,6 +125,9 @@
 		if (!router)
 			continue;
 
+		if (!atomic_inc_not_zero(&gw_node->refcount))
+			goto next;
+
 		switch (atomic_read(&bat_priv->gw_sel_class)) {
 		case 1: /* fast connection */
 			gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
@@ -151,8 +140,12 @@
 
 			if ((tmp_gw_factor > max_gw_factor) ||
 			    ((tmp_gw_factor == max_gw_factor) &&
-			     (router->tq_avg > max_tq)))
-				curr_gw_tmp = gw_node;
+			     (router->tq_avg > max_tq))) {
+				if (curr_gw)
+					gw_node_free_ref(curr_gw);
+				curr_gw = gw_node;
+				atomic_inc(&curr_gw->refcount);
+			}
 			break;
 
 		default: /**
@@ -163,8 +156,12 @@
 			  *     soon as a better gateway appears which has
 			  *     $routing_class more tq points)
 			  **/
-			if (router->tq_avg > max_tq)
-				curr_gw_tmp = gw_node;
+			if (router->tq_avg > max_tq) {
+				if (curr_gw)
+					gw_node_free_ref(curr_gw);
+				curr_gw = gw_node;
+				atomic_inc(&curr_gw->refcount);
+			}
 			break;
 		}
 
@@ -174,42 +171,81 @@
 		if (tmp_gw_factor > max_gw_factor)
 			max_gw_factor = tmp_gw_factor;
 
+		gw_node_free_ref(gw_node);
+
+next:
 		neigh_node_free_ref(router);
 	}
-
-	if (curr_gw != curr_gw_tmp) {
-		router = orig_node_get_router(curr_gw_tmp->orig_node);
-		if (!router)
-			goto unlock;
-
-		if ((curr_gw) && (!curr_gw_tmp))
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"Removing selected gateway - "
-				"no gateway in range\n");
-		else if ((!curr_gw) && (curr_gw_tmp))
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"Adding route to gateway %pM "
-				"(gw_flags: %i, tq: %i)\n",
-				curr_gw_tmp->orig_node->orig,
-				curr_gw_tmp->orig_node->gw_flags,
-				router->tq_avg);
-		else
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"Changing route to gateway %pM "
-				"(gw_flags: %i, tq: %i)\n",
-				curr_gw_tmp->orig_node->orig,
-				curr_gw_tmp->orig_node->gw_flags,
-				router->tq_avg);
-
-		neigh_node_free_ref(router);
-		gw_select(bat_priv, curr_gw_tmp);
-	}
-
-unlock:
 	rcu_read_unlock();
+
+	return curr_gw;
+}
+
+void gw_election(struct bat_priv *bat_priv)
+{
+	struct gw_node *curr_gw = NULL, *next_gw = NULL;
+	struct neigh_node *router = NULL;
+	char gw_addr[18] = { '\0' };
+
+	/**
+	 * The batman daemon checks here if we already passed a full originator
+	 * cycle in order to make sure we don't choose the first gateway we
+	 * hear about. This check is based on the daemon's uptime which we
+	 * don't have.
+	 **/
+	if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
+		goto out;
+
+	if (!atomic_dec_not_zero(&bat_priv->gw_reselect))
+		goto out;
+
+	curr_gw = gw_get_selected_gw_node(bat_priv);
+
+	next_gw = gw_get_best_gw_node(bat_priv);
+
+	if (curr_gw == next_gw)
+		goto out;
+
+	if (next_gw) {
+		sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
+
+		router = orig_node_get_router(next_gw->orig_node);
+		if (!router) {
+			gw_deselect(bat_priv);
+			goto out;
+		}
+	}
+
+	if ((curr_gw) && (!next_gw)) {
+		bat_dbg(DBG_BATMAN, bat_priv,
+			"Removing selected gateway - no gateway in range\n");
+		throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL);
+	} else if ((!curr_gw) && (next_gw)) {
+		bat_dbg(DBG_BATMAN, bat_priv,
+			"Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
+			next_gw->orig_node->orig,
+			next_gw->orig_node->gw_flags,
+			router->tq_avg);
+		throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
+	} else {
+		bat_dbg(DBG_BATMAN, bat_priv,
+			"Changing route to gateway %pM "
+			"(gw_flags: %i, tq: %i)\n",
+			next_gw->orig_node->orig,
+			next_gw->orig_node->gw_flags,
+			router->tq_avg);
+		throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
+	}
+
+	gw_select(bat_priv, next_gw);
+
 out:
 	if (curr_gw)
 		gw_node_free_ref(curr_gw);
+	if (next_gw)
+		gw_node_free_ref(next_gw);
+	if (router)
+		neigh_node_free_ref(router);
 }
 
 void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
@@ -360,7 +396,7 @@
 	struct gw_node *gw_node, *curr_gw;
 	struct hlist_node *node, *node_tmp;
 	unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
-	char do_deselect = 0;
+	int do_deselect = 0;
 
 	curr_gw = gw_get_selected_gw_node(bat_priv);
 
@@ -479,14 +515,75 @@
 	return ret;
 }
 
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
+static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
+{
+	int ret = false;
+	unsigned char *p;
+	int pkt_len;
+
+	if (skb_linearize(skb) < 0)
+		goto out;
+
+	pkt_len = skb_headlen(skb);
+
+	if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1)
+		goto out;
+
+	p = skb->data + header_len + DHCP_OPTIONS_OFFSET;
+	pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
+
+	/* Access the dhcp option lists. Each entry is made up by:
+	 * - octect 1: option type
+	 * - octect 2: option data len (only if type != 255 and 0)
+	 * - octect 3: option data */
+	while (*p != 255 && !ret) {
+		/* p now points to the first octect: option type */
+		if (*p == 53) {
+			/* type 53 is the message type option.
+			 * Jump the len octect and go to the data octect */
+			if (pkt_len < 2)
+				goto out;
+			p += 2;
+
+			/* check if the message type is what we need */
+			if (*p == DHCP_REQUEST)
+				ret = true;
+			break;
+		} else if (*p == 0) {
+			/* option type 0 (padding), just go forward */
+			if (pkt_len < 1)
+				goto out;
+			pkt_len--;
+			p++;
+		} else {
+			/* This is any other option. So we get the length... */
+			if (pkt_len < 1)
+				goto out;
+			pkt_len--;
+			p++;
+
+			/* ...and then we jump over the data */
+			if (pkt_len < *p)
+				goto out;
+			pkt_len -= *p;
+			p += (*p);
+		}
+	}
+out:
+	return ret;
+}
+
+int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
+		 struct orig_node *old_gw)
 {
 	struct ethhdr *ethhdr;
 	struct iphdr *iphdr;
 	struct ipv6hdr *ipv6hdr;
 	struct udphdr *udphdr;
 	struct gw_node *curr_gw;
+	struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
 	unsigned int header_len = 0;
+	int ret = 1;
 
 	if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
 		return 0;
@@ -554,7 +651,30 @@
 	if (!curr_gw)
 		return 0;
 
+	/* If old_gw != NULL then this packet is unicast.
+	 * So, at this point we have to check the message type: if it is a
+	 * DHCPREQUEST we have to decide whether to drop it or not */
+	if (old_gw && curr_gw->orig_node != old_gw) {
+		if (is_type_dhcprequest(skb, header_len)) {
+			/* If the dhcp packet has been sent to a different gw,
+			 * we have to evaluate whether the old gw is still
+			 * reliable enough */
+			neigh_curr = find_router(bat_priv, curr_gw->orig_node,
+						 NULL);
+			neigh_old = find_router(bat_priv, old_gw, NULL);
+			if (!neigh_curr || !neigh_old)
+				goto free_neigh;
+			if (neigh_curr->tq_avg - neigh_old->tq_avg <
+								GW_THRESHOLD)
+				ret = -1;
+		}
+	}
+free_neigh:
+	if (neigh_old)
+		neigh_node_free_ref(neigh_old);
+	if (neigh_curr)
+		neigh_node_free_ref(neigh_curr);
 	if (curr_gw)
 		gw_node_free_ref(curr_gw);
-	return 1;
+	return ret;
 }
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 1ce8c60..b9b983c 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -31,6 +31,7 @@
 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
 void gw_node_purge(struct bat_priv *bat_priv);
 int gw_client_seq_print_text(struct seq_file *seq, void *offset);
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb);
+int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
+		 struct orig_node *old_gw);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index e74307b..18661af 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -61,9 +61,9 @@
 /* returns the up and downspeeds in kbit, calculated from the class */
 void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
 {
-	char sbit = (gw_srv_class & 0x80) >> 7;
-	char dpart = (gw_srv_class & 0x78) >> 3;
-	char upart = (gw_srv_class & 0x07);
+	int sbit = (gw_srv_class & 0x80) >> 7;
+	int dpart = (gw_srv_class & 0x78) >> 3;
+	int upart = (gw_srv_class & 0x07);
 
 	if (!gw_srv_class) {
 		*down = 0;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index abb4901..db7aacf 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -152,12 +152,6 @@
 	batman_packet->ttl = TTL;
 
 	primary_if_update_addr(bat_priv);
-
-	/***
-	 * hacky trick to make sure that we send the TT information via
-	 * our new primary interface
-	 */
-	atomic_set(&bat_priv->tt_local_changed, 1);
 }
 
 static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
@@ -340,7 +334,8 @@
 	batman_packet->flags = NO_FLAGS;
 	batman_packet->ttl = 2;
 	batman_packet->tq = TQ_MAX_VALUE;
-	batman_packet->num_tt = 0;
+	batman_packet->tt_num_changes = 0;
+	batman_packet->ttvn = 0;
 
 	hard_iface->if_num = bat_priv->num_ifaces;
 	bat_priv->num_ifaces++;
@@ -659,6 +654,14 @@
 	case BAT_VIS:
 		ret = recv_vis_packet(skb, hard_iface);
 		break;
+		/* Translation table query (request or response) */
+	case BAT_TT_QUERY:
+		ret = recv_tt_query(skb, hard_iface);
+		break;
+		/* Roaming advertisement */
+	case BAT_ROAM_ADV:
+		ret = recv_roam_adv(skb, hard_iface);
+		break;
 	default:
 		ret = NET_RX_DROP;
 	}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 2d6445e..e367e69 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -84,8 +84,10 @@
 
 	spin_lock_init(&bat_priv->forw_bat_list_lock);
 	spin_lock_init(&bat_priv->forw_bcast_list_lock);
-	spin_lock_init(&bat_priv->tt_lhash_lock);
-	spin_lock_init(&bat_priv->tt_ghash_lock);
+	spin_lock_init(&bat_priv->tt_changes_list_lock);
+	spin_lock_init(&bat_priv->tt_req_list_lock);
+	spin_lock_init(&bat_priv->tt_roam_list_lock);
+	spin_lock_init(&bat_priv->tt_buff_lock);
 	spin_lock_init(&bat_priv->gw_list_lock);
 	spin_lock_init(&bat_priv->vis_hash_lock);
 	spin_lock_init(&bat_priv->vis_list_lock);
@@ -96,14 +98,14 @@
 	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
 	INIT_HLIST_HEAD(&bat_priv->gw_list);
 	INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
+	INIT_LIST_HEAD(&bat_priv->tt_changes_list);
+	INIT_LIST_HEAD(&bat_priv->tt_req_list);
+	INIT_LIST_HEAD(&bat_priv->tt_roam_list);
 
 	if (originator_init(bat_priv) < 1)
 		goto err;
 
-	if (tt_local_init(bat_priv) < 1)
-		goto err;
-
-	if (tt_global_init(bat_priv) < 1)
+	if (tt_init(bat_priv) < 1)
 		goto err;
 
 	tt_local_add(soft_iface, soft_iface->dev_addr);
@@ -111,6 +113,7 @@
 	if (vis_init(bat_priv) < 1)
 		goto err;
 
+	atomic_set(&bat_priv->gw_reselect, 0);
 	atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
 	goto end;
 
@@ -137,8 +140,7 @@
 	gw_node_purge(bat_priv);
 	originator_free(bat_priv);
 
-	tt_local_free(bat_priv);
-	tt_global_free(bat_priv);
+	tt_free(bat_priv);
 
 	softif_neigh_purge(bat_priv);
 
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index ed488cb..4f293b5 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -42,15 +42,23 @@
  * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
 #define PURGE_TIMEOUT 200
 #define TT_LOCAL_TIMEOUT 3600 /* in seconds */
-
+#define TT_CLIENT_ROAM_TIMEOUT 600
 /* sliding packet range of received originator messages in squence numbers
  * (should be a multiple of our word size) */
 #define TQ_LOCAL_WINDOW_SIZE 64
+#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */
+
 #define TQ_GLOBAL_WINDOW_SIZE 5
 #define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
 #define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
 #define TQ_TOTAL_BIDRECT_LIMIT 1
 
+#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */
+
+#define ROAMING_MAX_TIME 20 /* Time in which a client can roam at most
+			     * ROAMING_MAX_COUNT times */
+#define ROAMING_MAX_COUNT 5
+
 #define NO_FLAGS 0
 
 #define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
@@ -83,6 +91,18 @@
 #define BCAST_QUEUE_LEN		256
 #define BATMAN_QUEUE_LEN	256
 
+enum uev_action {
+	UEV_ADD = 0,
+	UEV_DEL,
+	UEV_CHANGE
+};
+
+enum uev_type {
+	UEV_GW = 0
+};
+
+#define GW_THRESHOLD	50
+
 /*
  * Debug Messages
  */
@@ -96,7 +116,8 @@
 enum dbg_level {
 	DBG_BATMAN = 1 << 0,
 	DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
-	DBG_ALL    = 3
+	DBG_TT	   = 1 << 2, /* translation table operations */
+	DBG_ALL    = 7
 };
 
 
@@ -151,7 +172,7 @@
 	while (0)
 #else /* !CONFIG_BATMAN_ADV_DEBUG */
 __printf(3, 4)
-static inline void bat_dbg(char type __always_unused,
+static inline void bat_dbg(int type __always_unused,
 			   struct bat_priv *bat_priv __always_unused,
 			   const char *fmt __always_unused, ...)
 {
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index a6c35d4..338b3c5 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -37,6 +37,14 @@
 	queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
 }
 
+/* returns 1 if they are the same originator */
+static int compare_orig(const struct hlist_node *node, const void *data2)
+{
+	const void *data1 = container_of(node, struct orig_node, hash_entry);
+
+	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
+}
+
 int originator_init(struct bat_priv *bat_priv)
 {
 	if (bat_priv->orig_hash)
@@ -137,6 +145,7 @@
 	tt_global_del_orig(orig_node->bat_priv, orig_node,
 			    "originator timed out");
 
+	kfree(orig_node->tt_buff);
 	kfree(orig_node->bcast_own);
 	kfree(orig_node->bcast_own_sum);
 	kfree(orig_node);
@@ -205,14 +214,18 @@
 	spin_lock_init(&orig_node->ogm_cnt_lock);
 	spin_lock_init(&orig_node->bcast_seqno_lock);
 	spin_lock_init(&orig_node->neigh_list_lock);
+	spin_lock_init(&orig_node->tt_buff_lock);
 
 	/* extra reference for return */
 	atomic_set(&orig_node->refcount, 2);
 
+	orig_node->tt_poss_change = false;
 	orig_node->bat_priv = bat_priv;
 	memcpy(orig_node->orig, addr, ETH_ALEN);
 	orig_node->router = NULL;
 	orig_node->tt_buff = NULL;
+	orig_node->tt_buff_len = 0;
+	atomic_set(&orig_node->tt_size, 0);
 	orig_node->bcast_seqno_reset = jiffies - 1
 					- msecs_to_jiffies(RESET_PROTECTION_MS);
 	orig_node->batman_seqno_reset = jiffies - 1
@@ -322,9 +335,7 @@
 		if (purge_orig_neighbors(bat_priv, orig_node,
 							&best_neigh_node)) {
 			update_routes(bat_priv, orig_node,
-				      best_neigh_node,
-				      orig_node->tt_buff,
-				      orig_node->tt_buff_len);
+				      best_neigh_node);
 		}
 	}
 
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 8e307af..cfc1f60 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -40,14 +40,6 @@
 int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
 
 
-/* returns 1 if they are the same originator */
-static inline int compare_orig(const struct hlist_node *node, const void *data2)
-{
-	const void *data1 = container_of(node, struct orig_node, hash_entry);
-
-	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
-}
-
 /* hashfunction to choose an entry in a hash table of given size */
 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
 static inline int choose_orig(const void *data, int32_t size)
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 9f77086..c5f081d 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -30,11 +30,13 @@
 	BAT_UNICAST      = 0x03,
 	BAT_BCAST        = 0x04,
 	BAT_VIS          = 0x05,
-	BAT_UNICAST_FRAG = 0x06
+	BAT_UNICAST_FRAG = 0x06,
+	BAT_TT_QUERY     = 0x07,
+	BAT_ROAM_ADV     = 0x08
 };
 
 /* this file is included by batctl which needs these defines */
-#define COMPAT_VERSION 12
+#define COMPAT_VERSION 14
 
 enum batman_flags {
 	PRIMARIES_FIRST_HOP = 1 << 4,
@@ -63,18 +65,38 @@
 	UNI_FRAG_LARGETAIL = 1 << 1
 };
 
+/* TT_QUERY subtypes */
+#define TT_QUERY_TYPE_MASK 0x3
+
+enum tt_query_packettype {
+	TT_REQUEST    = 0,
+	TT_RESPONSE   = 1
+};
+
+/* TT_QUERY flags */
+enum tt_query_flags {
+	TT_FULL_TABLE = 1 << 2
+};
+
+/* TT_CHANGE flags */
+enum tt_change_flags {
+	TT_CHANGE_DEL  = 0x01,
+	TT_CLIENT_ROAM = 0x02
+};
+
 struct batman_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
+	uint8_t  ttl;
 	uint8_t  flags;    /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
-	uint8_t  tq;
 	uint32_t seqno;
 	uint8_t  orig[6];
 	uint8_t  prev_sender[6];
-	uint8_t  ttl;
-	uint8_t  num_tt;
 	uint8_t  gw_flags;  /* flags related to gateway class */
-	uint8_t  align;
+	uint8_t  tq;
+	uint8_t  tt_num_changes;
+	uint8_t  ttvn; /* translation table version number */
+	uint16_t tt_crc;
 } __packed;
 
 #define BAT_PACKET_LEN sizeof(struct batman_packet)
@@ -82,12 +104,13 @@
 struct icmp_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  msg_type; /* see ICMP message types above */
 	uint8_t  ttl;
+	uint8_t  msg_type; /* see ICMP message types above */
 	uint8_t  dst[6];
 	uint8_t  orig[6];
 	uint16_t seqno;
 	uint8_t  uid;
+	uint8_t  reserved;
 } __packed;
 
 #define BAT_RR_LEN 16
@@ -97,8 +120,8 @@
 struct icmp_packet_rr {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  msg_type; /* see ICMP message types above */
 	uint8_t  ttl;
+	uint8_t  msg_type; /* see ICMP message types above */
 	uint8_t  dst[6];
 	uint8_t  orig[6];
 	uint16_t seqno;
@@ -110,16 +133,19 @@
 struct unicast_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  dest[6];
 	uint8_t  ttl;
+	uint8_t  ttvn; /* destination translation table version number */
+	uint8_t  dest[6];
 } __packed;
 
 struct unicast_frag_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  dest[6];
 	uint8_t  ttl;
+	uint8_t  ttvn; /* destination translation table version number */
+	uint8_t  dest[6];
 	uint8_t  flags;
+	uint8_t  align;
 	uint8_t  orig[6];
 	uint16_t seqno;
 } __packed;
@@ -127,21 +153,61 @@
 struct bcast_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
-	uint8_t  orig[6];
 	uint8_t  ttl;
+	uint8_t  reserved;
 	uint32_t seqno;
+	uint8_t  orig[6];
 } __packed;
 
 struct vis_packet {
 	uint8_t  packet_type;
 	uint8_t  version;        /* batman version field */
-	uint8_t  vis_type;	 /* which type of vis-participant sent this? */
-	uint8_t  entries;	 /* number of entries behind this struct */
-	uint32_t seqno;		 /* sequence number */
 	uint8_t  ttl;		 /* TTL */
+	uint8_t  vis_type;	 /* which type of vis-participant sent this? */
+	uint32_t seqno;		 /* sequence number */
+	uint8_t  entries;	 /* number of entries behind this struct */
+	uint8_t  reserved;
 	uint8_t  vis_orig[6];	 /* originator that announces its neighbors */
 	uint8_t  target_orig[6]; /* who should receive this packet */
 	uint8_t  sender_orig[6]; /* who sent or rebroadcasted this packet */
 } __packed;
 
+struct tt_query_packet {
+	uint8_t  packet_type;
+	uint8_t  version;  /* batman version field */
+	uint8_t  ttl;
+	/* the flag field is a combination of:
+	 * - TT_REQUEST or TT_RESPONSE
+	 * - TT_FULL_TABLE */
+	uint8_t  flags;
+	uint8_t  dst[ETH_ALEN];
+	uint8_t  src[ETH_ALEN];
+	/* the ttvn field is:
+	 * if TT_REQUEST: ttvn that triggered the
+	 *		  request
+	 * if TT_RESPONSE: new ttvn for the src
+	 *		   orig_node */
+	uint8_t  ttvn;
+	/* tt_data field is:
+	 * if TT_REQUEST: crc associated with the
+	 *		  ttvn
+	 * if TT_RESPONSE: table_size */
+	uint16_t tt_data;
+} __packed;
+
+struct roam_adv_packet {
+	uint8_t  packet_type;
+	uint8_t  version;
+	uint8_t  ttl;
+	uint8_t  reserved;
+	uint8_t  dst[ETH_ALEN];
+	uint8_t  src[ETH_ALEN];
+	uint8_t  client[ETH_ALEN];
+} __packed;
+
+struct tt_change {
+	uint8_t flags;
+	uint8_t addr[ETH_ALEN];
+} __packed;
+
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 934f1f2..0ce090c 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -64,27 +64,57 @@
 	}
 }
 
-static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node,
-		      const unsigned char *tt_buff, int tt_buff_len)
+static void update_transtable(struct bat_priv *bat_priv,
+			      struct orig_node *orig_node,
+			      const unsigned char *tt_buff,
+			      uint8_t tt_num_changes, uint8_t ttvn,
+			      uint16_t tt_crc)
 {
-	if ((tt_buff_len != orig_node->tt_buff_len) ||
-	    ((tt_buff_len > 0) &&
-	     (orig_node->tt_buff_len > 0) &&
-	     (memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) {
+	uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+	bool full_table = true;
 
-		if (orig_node->tt_buff_len > 0)
-			tt_global_del_orig(bat_priv, orig_node,
-					    "originator changed tt");
+	/* the ttvn increased by one -> we can apply the attached changes */
+	if (ttvn - orig_ttvn == 1) {
+		/* the OGM could not contain the changes because they were too
+		 * many to fit in one frame or because they have already been
+		 * sent TT_OGM_APPEND_MAX times. In this case send a tt
+		 * request */
+		if (!tt_num_changes) {
+			full_table = false;
+			goto request_table;
+		}
 
-		if ((tt_buff_len > 0) && (tt_buff))
-			tt_global_add_orig(bat_priv, orig_node,
-					    tt_buff, tt_buff_len);
+		tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
+				  (struct tt_change *)tt_buff);
+
+		/* Even if we received the crc into the OGM, we prefer
+		 * to recompute it to spot any possible inconsistency
+		 * in the global table */
+		orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+		/* Roaming phase is over: tables are in sync again. I can
+		 * unset the flag */
+		orig_node->tt_poss_change = false;
+	} else {
+		/* if we missed more than one change or our tables are not
+		 * in sync anymore -> request fresh tt data */
+		if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
+request_table:
+			bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
+				"Need to retrieve the correct information "
+				"(ttvn: %u last_ttvn: %u crc: %u last_crc: "
+				"%u num_changes: %u)\n", orig_node->orig, ttvn,
+				orig_ttvn, tt_crc, orig_node->tt_crc,
+				tt_num_changes);
+			send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
+					full_table);
+			return;
+		}
 	}
 }
 
-static void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
-			 struct neigh_node *neigh_node,
-			 const unsigned char *tt_buff, int tt_buff_len)
+static void update_route(struct bat_priv *bat_priv,
+			 struct orig_node *orig_node,
+			 struct neigh_node *neigh_node)
 {
 	struct neigh_node *curr_router;
 
@@ -92,11 +122,10 @@
 
 	/* route deleted */
 	if ((curr_router) && (!neigh_node)) {
-
 		bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
 			orig_node->orig);
 		tt_global_del_orig(bat_priv, orig_node,
-				    "originator timed out");
+				    "Deleted route towards originator");
 
 	/* route added */
 	} else if ((!curr_router) && (neigh_node)) {
@@ -104,9 +133,6 @@
 		bat_dbg(DBG_ROUTES, bat_priv,
 			"Adding route towards: %pM (via %pM)\n",
 			orig_node->orig, neigh_node->addr);
-		tt_global_add_orig(bat_priv, orig_node,
-				    tt_buff, tt_buff_len);
-
 	/* route changed */
 	} else if (neigh_node && curr_router) {
 		bat_dbg(DBG_ROUTES, bat_priv,
@@ -133,8 +159,7 @@
 }
 
 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
-		   struct neigh_node *neigh_node, const unsigned char *tt_buff,
-		   int tt_buff_len)
+		   struct neigh_node *neigh_node)
 {
 	struct neigh_node *router = NULL;
 
@@ -144,11 +169,7 @@
 	router = orig_node_get_router(orig_node);
 
 	if (router != neigh_node)
-		update_route(bat_priv, orig_node, neigh_node,
-			     tt_buff, tt_buff_len);
-	/* may be just TT changed */
-	else
-		update_TT(bat_priv, orig_node, tt_buff, tt_buff_len);
+		update_route(bat_priv, orig_node, neigh_node);
 
 out:
 	if (router)
@@ -163,7 +184,7 @@
 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
 	struct hlist_node *node;
-	unsigned char total_count;
+	uint8_t total_count;
 	uint8_t orig_eq_count, neigh_rq_count, tq_own;
 	int tq_asym_penalty, ret = 0;
 
@@ -360,14 +381,12 @@
 			const struct ethhdr *ethhdr,
 			const struct batman_packet *batman_packet,
 			struct hard_iface *if_incoming,
-			const unsigned char *tt_buff, int tt_buff_len,
-			char is_duplicate)
+			const unsigned char *tt_buff, int is_duplicate)
 {
 	struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
 	struct neigh_node *router = NULL;
 	struct orig_node *orig_node_tmp;
 	struct hlist_node *node;
-	int tmp_tt_buff_len;
 	uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
 
 	bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
@@ -432,9 +451,6 @@
 
 	bonding_candidate_add(orig_node, neigh_node);
 
-	tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ?
-			    batman_packet->num_tt * ETH_ALEN : tt_buff_len);
-
 	/* if this neighbor already is our next hop there is nothing
 	 * to change */
 	router = orig_node_get_router(orig_node);
@@ -464,15 +480,19 @@
 			goto update_tt;
 	}
 
-	update_routes(bat_priv, orig_node, neigh_node,
-		      tt_buff, tmp_tt_buff_len);
-	goto update_gw;
+	update_routes(bat_priv, orig_node, neigh_node);
 
 update_tt:
-	update_routes(bat_priv, orig_node, router,
-		      tt_buff, tmp_tt_buff_len);
+	/* I have to check for transtable changes only if the OGM has been
+	 * sent through a primary interface */
+	if (((batman_packet->orig != ethhdr->h_source) &&
+				(batman_packet->ttl > 2)) ||
+				(batman_packet->flags & PRIMARIES_FIRST_HOP))
+		update_transtable(bat_priv, orig_node, tt_buff,
+				  batman_packet->tt_num_changes,
+				  batman_packet->ttvn,
+				  batman_packet->tt_crc);
 
-update_gw:
 	if (orig_node->gw_flags != batman_packet->gw_flags)
 		gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
 
@@ -528,7 +548,7 @@
  *  -1 the packet is old and has been received while the seqno window
  *     was protected. Caller should drop it.
  */
-static char count_real_packets(const struct ethhdr *ethhdr,
+static int count_real_packets(const struct ethhdr *ethhdr,
 			       const struct batman_packet *batman_packet,
 			       const struct hard_iface *if_incoming)
 {
@@ -536,7 +556,7 @@
 	struct orig_node *orig_node;
 	struct neigh_node *tmp_neigh_node;
 	struct hlist_node *node;
-	char is_duplicate = 0;
+	int is_duplicate = 0;
 	int32_t seq_diff;
 	int need_update = 0;
 	int set_mark, ret = -1;
@@ -594,7 +614,7 @@
 
 void receive_bat_packet(const struct ethhdr *ethhdr,
 			struct batman_packet *batman_packet,
-			const unsigned char *tt_buff, int tt_buff_len,
+			const unsigned char *tt_buff,
 			struct hard_iface *if_incoming)
 {
 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -602,10 +622,10 @@
 	struct orig_node *orig_neigh_node, *orig_node;
 	struct neigh_node *router = NULL, *router_router = NULL;
 	struct neigh_node *orig_neigh_router = NULL;
-	char has_directlink_flag;
-	char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
-	char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
-	char is_duplicate;
+	int has_directlink_flag;
+	int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
+	int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
+	int is_duplicate;
 	uint32_t if_incoming_seqno;
 
 	/* Silently drop when the batman packet is actually not a
@@ -633,12 +653,14 @@
 
 	bat_dbg(DBG_BATMAN, bat_priv,
 		"Received BATMAN packet via NB: %pM, IF: %s [%pM] "
-		"(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
-		"TTL %d, V %d, IDF %d)\n",
+		"(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
+		"crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
 		ethhdr->h_source, if_incoming->net_dev->name,
 		if_incoming->net_dev->dev_addr, batman_packet->orig,
 		batman_packet->prev_sender, batman_packet->seqno,
-		batman_packet->tq, batman_packet->ttl, batman_packet->version,
+		batman_packet->ttvn, batman_packet->tt_crc,
+		batman_packet->tt_num_changes, batman_packet->tq,
+		batman_packet->ttl, batman_packet->version,
 		has_directlink_flag);
 
 	rcu_read_lock();
@@ -790,14 +812,14 @@
 	     ((orig_node->last_real_seqno == batman_packet->seqno) &&
 	      (orig_node->last_ttl - 3 <= batman_packet->ttl))))
 		update_orig(bat_priv, orig_node, ethhdr, batman_packet,
-			    if_incoming, tt_buff, tt_buff_len, is_duplicate);
+			    if_incoming, tt_buff, is_duplicate);
 
 	/* is single hop (direct) neighbor */
 	if (is_single_hop_neigh) {
 
 		/* mark direct link on incoming interface */
 		schedule_forward_packet(orig_node, ethhdr, batman_packet,
-					1, tt_buff_len, if_incoming);
+					1, if_incoming);
 
 		bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
 			"rebroadcast neighbor packet with direct link flag\n");
@@ -820,7 +842,7 @@
 	bat_dbg(DBG_BATMAN, bat_priv,
 		"Forwarding packet: rebroadcast originator packet\n");
 	schedule_forward_packet(orig_node, ethhdr, batman_packet,
-				0, tt_buff_len, if_incoming);
+				0, if_incoming);
 
 out_neigh:
 	if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1167,6 +1189,118 @@
 	return router;
 }
 
+int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
+{
+	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+	struct tt_query_packet *tt_query;
+	struct ethhdr *ethhdr;
+
+	/* drop packet if it has not necessary minimum size */
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
+		goto out;
+
+	/* I could need to modify it */
+	if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
+		goto out;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	/* packet with unicast indication but broadcast recipient */
+	if (is_broadcast_ether_addr(ethhdr->h_dest))
+		goto out;
+
+	/* packet with broadcast sender address */
+	if (is_broadcast_ether_addr(ethhdr->h_source))
+		goto out;
+
+	tt_query = (struct tt_query_packet *)skb->data;
+
+	tt_query->tt_data = ntohs(tt_query->tt_data);
+
+	switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
+	case TT_REQUEST:
+		/* If we cannot provide an answer the tt_request is
+		 * forwarded */
+		if (!send_tt_response(bat_priv, tt_query)) {
+			bat_dbg(DBG_TT, bat_priv,
+				"Routing TT_REQUEST to %pM [%c]\n",
+				tt_query->dst,
+				(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
+			tt_query->tt_data = htons(tt_query->tt_data);
+			return route_unicast_packet(skb, recv_if);
+		}
+		break;
+	case TT_RESPONSE:
+		/* packet needs to be linearised to access the TT changes */
+		if (skb_linearize(skb) < 0)
+			goto out;
+
+		if (is_my_mac(tt_query->dst))
+			handle_tt_response(bat_priv, tt_query);
+		else {
+			bat_dbg(DBG_TT, bat_priv,
+				"Routing TT_RESPONSE to %pM [%c]\n",
+				tt_query->dst,
+				(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
+			tt_query->tt_data = htons(tt_query->tt_data);
+			return route_unicast_packet(skb, recv_if);
+		}
+		break;
+	}
+
+out:
+	/* returning NET_RX_DROP will make the caller function kfree the skb */
+	return NET_RX_DROP;
+}
+
+int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
+{
+	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+	struct roam_adv_packet *roam_adv_packet;
+	struct orig_node *orig_node;
+	struct ethhdr *ethhdr;
+
+	/* drop packet if it has not necessary minimum size */
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
+		goto out;
+
+	ethhdr = (struct ethhdr *)skb_mac_header(skb);
+
+	/* packet with unicast indication but broadcast recipient */
+	if (is_broadcast_ether_addr(ethhdr->h_dest))
+		goto out;
+
+	/* packet with broadcast sender address */
+	if (is_broadcast_ether_addr(ethhdr->h_source))
+		goto out;
+
+	roam_adv_packet = (struct roam_adv_packet *)skb->data;
+
+	if (!is_my_mac(roam_adv_packet->dst))
+		return route_unicast_packet(skb, recv_if);
+
+	orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
+	if (!orig_node)
+		goto out;
+
+	bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM "
+		"(client %pM)\n", roam_adv_packet->src,
+		roam_adv_packet->client);
+
+	tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
+		      atomic_read(&orig_node->last_ttvn) + 1, true);
+
+	/* Roaming phase starts: I have new information but the ttvn has not
+	 * been incremented yet. This flag will make me check all the incoming
+	 * packets for the correct destination. */
+	bat_priv->tt_poss_change = true;
+
+	orig_node_free_ref(orig_node);
+out:
+	/* returning NET_RX_DROP will make the caller function kfree the skb */
+	return NET_RX_DROP;
+}
+
 /* find a suitable router for this originator, and use
  * bonding if possible. increases the found neighbors
  * refcount.*/
@@ -1353,14 +1487,84 @@
 	return ret;
 }
 
+static int check_unicast_ttvn(struct bat_priv *bat_priv,
+			       struct sk_buff *skb) {
+	uint8_t curr_ttvn;
+	struct orig_node *orig_node;
+	struct ethhdr *ethhdr;
+	struct hard_iface *primary_if;
+	struct unicast_packet *unicast_packet;
+	bool tt_poss_change;
+
+	/* I could need to modify it */
+	if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
+		return 0;
+
+	unicast_packet = (struct unicast_packet *)skb->data;
+
+	if (is_my_mac(unicast_packet->dest)) {
+		tt_poss_change = bat_priv->tt_poss_change;
+		curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+	} else {
+		orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
+
+		if (!orig_node)
+			return 0;
+
+		curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+		tt_poss_change = orig_node->tt_poss_change;
+		orig_node_free_ref(orig_node);
+	}
+
+	/* Check whether I have to reroute the packet */
+	if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
+		/* Linearize the skb before accessing it */
+		if (skb_linearize(skb) < 0)
+			return 0;
+
+		ethhdr = (struct ethhdr *)(skb->data +
+			sizeof(struct unicast_packet));
+		orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+
+		if (!orig_node) {
+			if (!is_my_client(bat_priv, ethhdr->h_dest))
+				return 0;
+			primary_if = primary_if_get_selected(bat_priv);
+			if (!primary_if)
+				return 0;
+			memcpy(unicast_packet->dest,
+			       primary_if->net_dev->dev_addr, ETH_ALEN);
+			hardif_free_ref(primary_if);
+		} else {
+			memcpy(unicast_packet->dest, orig_node->orig,
+			       ETH_ALEN);
+			curr_ttvn = (uint8_t)
+				atomic_read(&orig_node->last_ttvn);
+			orig_node_free_ref(orig_node);
+		}
+
+		bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u "
+			"new_ttvn %u)! Rerouting unicast packet (for %pM) to "
+			"%pM\n", unicast_packet->ttvn, curr_ttvn,
+			ethhdr->h_dest, unicast_packet->dest);
+
+		unicast_packet->ttvn = curr_ttvn;
+	}
+	return 1;
+}
+
 int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
 {
+	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct unicast_packet *unicast_packet;
 	int hdr_size = sizeof(*unicast_packet);
 
 	if (check_unicast_packet(skb, hdr_size) < 0)
 		return NET_RX_DROP;
 
+	if (!check_unicast_ttvn(bat_priv, skb))
+		return NET_RX_DROP;
+
 	unicast_packet = (struct unicast_packet *)skb->data;
 
 	/* packet for me */
@@ -1383,6 +1587,9 @@
 	if (check_unicast_packet(skb, hdr_size) < 0)
 		return NET_RX_DROP;
 
+	if (!check_unicast_ttvn(bat_priv, skb))
+		return NET_RX_DROP;
+
 	unicast_packet = (struct unicast_frag_packet *)skb->data;
 
 	/* packet for me */
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 0ce0392..fb14e95 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -25,11 +25,10 @@
 void slide_own_bcast_window(struct hard_iface *hard_iface);
 void receive_bat_packet(const struct ethhdr *ethhdr,
 			struct batman_packet *batman_packet,
-			const unsigned char *tt_buff, int tt_buff_len,
+			const unsigned char *tt_buff,
 			struct hard_iface *if_incoming);
 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
-		   struct neigh_node *neigh_node, const unsigned char *tt_buff,
-		   int tt_buff_len);
+		   struct neigh_node *neigh_node);
 int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
@@ -37,6 +36,8 @@
 int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
 int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
 struct neigh_node *find_router(struct bat_priv *bat_priv,
 			       struct orig_node *orig_node,
 			       const struct hard_iface *recv_if);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index a1b8c31..7a2f082 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -120,7 +120,7 @@
 	/* adjust all flags and log packets */
 	while (aggregated_packet(buff_pos,
 				 forw_packet->packet_len,
-				 batman_packet->num_tt)) {
+				 batman_packet->tt_num_changes)) {
 
 		/* we might have aggregated direct link packets with an
 		 * ordinary base packet */
@@ -135,17 +135,17 @@
 							    "Forwarding"));
 		bat_dbg(DBG_BATMAN, bat_priv,
 			"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
-			" IDF %s) on interface %s [%pM]\n",
+			" IDF %s, hvn %d) on interface %s [%pM]\n",
 			fwd_str, (packet_num > 0 ? "aggregated " : ""),
 			batman_packet->orig, ntohl(batman_packet->seqno),
 			batman_packet->tq, batman_packet->ttl,
 			(batman_packet->flags & DIRECTLINK ?
 			 "on" : "off"),
-			hard_iface->net_dev->name,
+			batman_packet->ttvn, hard_iface->net_dev->name,
 			hard_iface->net_dev->dev_addr);
 
 		buff_pos += sizeof(*batman_packet) +
-			(batman_packet->num_tt * ETH_ALEN);
+			tt_len(batman_packet->tt_num_changes);
 		packet_num++;
 		batman_packet = (struct batman_packet *)
 			(forw_packet->skb->data + buff_pos);
@@ -165,7 +165,7 @@
 	struct bat_priv *bat_priv;
 	struct batman_packet *batman_packet =
 		(struct batman_packet *)(forw_packet->skb->data);
-	unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
+	int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
 
 	if (!forw_packet->if_incoming) {
 		pr_err("Error - can't forward packet: incoming iface not "
@@ -213,25 +213,18 @@
 	rcu_read_unlock();
 }
 
-static void rebuild_batman_packet(struct bat_priv *bat_priv,
-				  struct hard_iface *hard_iface)
+static void realloc_packet_buffer(struct hard_iface *hard_iface,
+				int new_len)
 {
-	int new_len;
 	unsigned char *new_buff;
 	struct batman_packet *batman_packet;
 
-	new_len = sizeof(*batman_packet) + (bat_priv->num_local_tt * ETH_ALEN);
 	new_buff = kmalloc(new_len, GFP_ATOMIC);
 
 	/* keep old buffer if kmalloc should fail */
 	if (new_buff) {
 		memcpy(new_buff, hard_iface->packet_buff,
 		       sizeof(*batman_packet));
-		batman_packet = (struct batman_packet *)new_buff;
-
-		batman_packet->num_tt = tt_local_fill_buffer(bat_priv,
-					new_buff + sizeof(*batman_packet),
-					new_len - sizeof(*batman_packet));
 
 		kfree(hard_iface->packet_buff);
 		hard_iface->packet_buff = new_buff;
@@ -239,6 +232,46 @@
 	}
 }
 
+/* when calling this function (hard_iface == primary_if) has to be true */
+static void prepare_packet_buffer(struct bat_priv *bat_priv,
+				  struct hard_iface *hard_iface)
+{
+	int new_len;
+	struct batman_packet *batman_packet;
+
+	new_len = BAT_PACKET_LEN +
+		  tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
+
+	/* if we have too many changes for one packet don't send any
+	 * and wait for the tt table request which will be fragmented */
+	if (new_len > hard_iface->soft_iface->mtu)
+		new_len = BAT_PACKET_LEN;
+
+	realloc_packet_buffer(hard_iface, new_len);
+	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
+
+	atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
+
+	/* reset the sending counter */
+	atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
+
+	batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
+				hard_iface->packet_buff + BAT_PACKET_LEN,
+				hard_iface->packet_len - BAT_PACKET_LEN);
+
+}
+
+static void reset_packet_buffer(struct bat_priv *bat_priv,
+	struct hard_iface *hard_iface)
+{
+	struct batman_packet *batman_packet;
+
+	realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
+
+	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
+	batman_packet->tt_num_changes = 0;
+}
+
 void schedule_own_packet(struct hard_iface *hard_iface)
 {
 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -264,14 +297,23 @@
 	if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
 		hard_iface->if_status = IF_ACTIVE;
 
-	/* if local tt has changed and interface is a primary interface */
-	if ((atomic_read(&bat_priv->tt_local_changed)) &&
-	    (hard_iface == primary_if))
-		rebuild_batman_packet(bat_priv, hard_iface);
+	if (hard_iface == primary_if) {
+		/* if at least one change happened */
+		if (atomic_read(&bat_priv->tt_local_changes) > 0) {
+			prepare_packet_buffer(bat_priv, hard_iface);
+			/* Increment the TTVN only once per OGM interval */
+			atomic_inc(&bat_priv->ttvn);
+			bat_priv->tt_poss_change = false;
+		}
+
+		/* if the changes have been sent enough times */
+		if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
+			reset_packet_buffer(bat_priv, hard_iface);
+	}
 
 	/**
 	 * NOTE: packet_buff might just have been re-allocated in
-	 * rebuild_batman_packet()
+	 * prepare_packet_buffer() or in reset_packet_buffer()
 	 */
 	batman_packet = (struct batman_packet *)hard_iface->packet_buff;
 
@@ -279,6 +321,9 @@
 	batman_packet->seqno =
 		htonl((uint32_t)atomic_read(&hard_iface->seqno));
 
+	batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
+	batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
+
 	if (vis_server == VIS_TYPE_SERVER_SYNC)
 		batman_packet->flags |= VIS_SERVER;
 	else
@@ -307,13 +352,14 @@
 void schedule_forward_packet(struct orig_node *orig_node,
 			     const struct ethhdr *ethhdr,
 			     struct batman_packet *batman_packet,
-			     uint8_t directlink, int tt_buff_len,
+			     int directlink,
 			     struct hard_iface *if_incoming)
 {
 	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct neigh_node *router;
-	unsigned char in_tq, in_ttl, tq_avg = 0;
+	uint8_t in_tq, in_ttl, tq_avg = 0;
 	unsigned long send_time;
+	uint8_t tt_num_changes;
 
 	if (batman_packet->ttl <= 1) {
 		bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -324,6 +370,7 @@
 
 	in_tq = batman_packet->tq;
 	in_ttl = batman_packet->ttl;
+	tt_num_changes = batman_packet->tt_num_changes;
 
 	batman_packet->ttl--;
 	memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
@@ -356,6 +403,7 @@
 		batman_packet->ttl);
 
 	batman_packet->seqno = htonl(batman_packet->seqno);
+	batman_packet->tt_crc = htons(batman_packet->tt_crc);
 
 	/* switch of primaries first hop flag when forwarding */
 	batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
@@ -367,7 +415,7 @@
 	send_time = forward_send_time();
 	add_bat_packet_to_list(bat_priv,
 			       (unsigned char *)batman_packet,
-			       sizeof(*batman_packet) + tt_buff_len,
+			       sizeof(*batman_packet) + tt_len(tt_num_changes),
 			       if_incoming, 0, send_time);
 }
 
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index eceab87..633224a 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -28,7 +28,7 @@
 void schedule_forward_packet(struct orig_node *orig_node,
 			     const struct ethhdr *ethhdr,
 			     struct batman_packet *batman_packet,
-			     uint8_t directlink, int tt_buff_len,
+			     int directlink,
 			     struct hard_iface *if_outgoing);
 int add_bcast_packet_to_list(struct bat_priv *bat_priv,
 			     const struct sk_buff *skb);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index b8d3f248..2dcdbb7 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -30,6 +30,7 @@
 #include "gateway_common.h"
 #include "gateway_client.h"
 #include "bat_sysfs.h"
+#include "originator.h"
 #include <linux/slab.h>
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
@@ -380,7 +381,7 @@
 	struct softif_neigh *softif_neigh, *curr_softif_neigh;
 	struct softif_neigh_vid *softif_neigh_vid;
 	struct hlist_node *node, *node_tmp, *node_tmp2;
-	char do_deselect;
+	int do_deselect;
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(softif_neigh_vid, node,
@@ -534,7 +535,7 @@
 	/* only modify transtable if it has been initialised before */
 	if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
 		tt_local_remove(bat_priv, dev->dev_addr,
-				 "mac address changed");
+				"mac address changed", false);
 		tt_local_add(dev, addr->sa_data);
 	}
 
@@ -553,7 +554,7 @@
 	return 0;
 }
 
-int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
+static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
 {
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
@@ -561,6 +562,7 @@
 	struct bcast_packet *bcast_packet;
 	struct vlan_ethhdr *vhdr;
 	struct softif_neigh *curr_softif_neigh = NULL;
+	struct orig_node *orig_node = NULL;
 	int data_len = skb->len, ret;
 	short vid = -1;
 	bool do_bcast = false;
@@ -592,11 +594,13 @@
 	if (curr_softif_neigh)
 		goto dropped;
 
-	/* TODO: check this for locks */
+	/* Register the client MAC in the transtable */
 	tt_local_add(soft_iface, ethhdr->h_source);
 
-	if (is_multicast_ether_addr(ethhdr->h_dest)) {
-		ret = gw_is_target(bat_priv, skb);
+	orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+	if (is_multicast_ether_addr(ethhdr->h_dest) ||
+				(orig_node && orig_node->gw_flags)) {
+		ret = gw_is_target(bat_priv, skb, orig_node);
 
 		if (ret < 0)
 			goto dropped;
@@ -656,6 +660,8 @@
 		softif_neigh_free_ref(curr_softif_neigh);
 	if (primary_if)
 		hardif_free_ref(primary_if);
+	if (orig_node)
+		orig_node_free_ref(orig_node);
 	return NETDEV_TX_OK;
 }
 
@@ -830,7 +836,13 @@
 
 	atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
 	atomic_set(&bat_priv->bcast_seqno, 1);
-	atomic_set(&bat_priv->tt_local_changed, 0);
+	atomic_set(&bat_priv->ttvn, 0);
+	atomic_set(&bat_priv->tt_local_changes, 0);
+	atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+
+	bat_priv->tt_buff = NULL;
+	bat_priv->tt_buff_len = 0;
+	bat_priv->tt_poss_change = false;
 
 	bat_priv->primary_if = NULL;
 	bat_priv->num_ifaces = 0;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index c24906d..001546f 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -25,7 +25,6 @@
 int my_skb_head_push(struct sk_buff *skb, unsigned int len);
 int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
 void softif_neigh_purge(struct bat_priv *bat_priv);
-int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
 void interface_rx(struct net_device *soft_iface,
 		  struct sk_buff *skb, struct hard_iface *recv_if,
 		  int hdr_size);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 561f769..5f1fcd5 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -23,13 +23,17 @@
 #include "translation-table.h"
 #include "soft-interface.h"
 #include "hard-interface.h"
+#include "send.h"
 #include "hash.h"
 #include "originator.h"
+#include "routing.h"
 
-static void tt_local_purge(struct work_struct *work);
-static void _tt_global_del_orig(struct bat_priv *bat_priv,
-				 struct tt_global_entry *tt_global_entry,
-				 const char *message);
+#include <linux/crc16.h>
+
+static void _tt_global_del(struct bat_priv *bat_priv,
+			   struct tt_global_entry *tt_global_entry,
+			   const char *message);
+static void tt_purge(struct work_struct *work);
 
 /* returns 1 if they are the same mac addr */
 static int compare_ltt(const struct hlist_node *node, const void *data2)
@@ -49,10 +53,11 @@
 	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
-static void tt_local_start_timer(struct bat_priv *bat_priv)
+static void tt_start_timer(struct bat_priv *bat_priv)
 {
-	INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge);
-	queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ);
+	INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
+	queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
+			   msecs_to_jiffies(5000));
 }
 
 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
@@ -75,6 +80,9 @@
 		if (!compare_eth(tt_local_entry, data))
 			continue;
 
+		if (!atomic_inc_not_zero(&tt_local_entry->refcount))
+			continue;
+
 		tt_local_entry_tmp = tt_local_entry;
 		break;
 	}
@@ -104,6 +112,9 @@
 		if (!compare_eth(tt_global_entry, data))
 			continue;
 
+		if (!atomic_inc_not_zero(&tt_global_entry->refcount))
+			continue;
+
 		tt_global_entry_tmp = tt_global_entry;
 		break;
 	}
@@ -112,7 +123,57 @@
 	return tt_global_entry_tmp;
 }
 
-int tt_local_init(struct bat_priv *bat_priv)
+static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
+{
+	unsigned long deadline;
+	deadline = starting_time + msecs_to_jiffies(timeout);
+
+	return time_after(jiffies, deadline);
+}
+
+static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
+{
+	if (atomic_dec_and_test(&tt_local_entry->refcount))
+		kfree_rcu(tt_local_entry, rcu);
+}
+
+static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
+{
+	if (atomic_dec_and_test(&tt_global_entry->refcount))
+		kfree_rcu(tt_global_entry, rcu);
+}
+
+static void tt_local_event(struct bat_priv *bat_priv, uint8_t op,
+			   const uint8_t *addr, bool roaming)
+{
+	struct tt_change_node *tt_change_node;
+
+	tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
+
+	if (!tt_change_node)
+		return;
+
+	tt_change_node->change.flags = op;
+	if (roaming)
+		tt_change_node->change.flags |= TT_CLIENT_ROAM;
+
+	memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
+
+	spin_lock_bh(&bat_priv->tt_changes_list_lock);
+	/* track the change in the OGMinterval list */
+	list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
+	atomic_inc(&bat_priv->tt_local_changes);
+	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+
+	atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+}
+
+int tt_len(int changes_num)
+{
+	return changes_num * sizeof(struct tt_change);
+}
+
+static int tt_local_init(struct bat_priv *bat_priv)
 {
 	if (bat_priv->tt_local_hash)
 		return 1;
@@ -122,54 +183,35 @@
 	if (!bat_priv->tt_local_hash)
 		return 0;
 
-	atomic_set(&bat_priv->tt_local_changed, 0);
-	tt_local_start_timer(bat_priv);
-
 	return 1;
 }
 
 void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
 {
 	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-	struct tt_local_entry *tt_local_entry;
-	struct tt_global_entry *tt_global_entry;
-	int required_bytes;
+	struct tt_local_entry *tt_local_entry = NULL;
+	struct tt_global_entry *tt_global_entry = NULL;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
 
 	if (tt_local_entry) {
 		tt_local_entry->last_seen = jiffies;
-		return;
+		goto out;
 	}
 
-	/* only announce as many hosts as possible in the batman-packet and
-	   space in batman_packet->num_tt That also should give a limit to
-	   MAC-flooding. */
-	required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
-	required_bytes += BAT_PACKET_LEN;
-
-	if ((required_bytes > ETH_DATA_LEN) ||
-	    (atomic_read(&bat_priv->aggregated_ogms) &&
-	     required_bytes > MAX_AGGREGATION_BYTES) ||
-	    (bat_priv->num_local_tt + 1 > 255)) {
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Can't add new local tt entry (%pM): "
-			"number of local tt entries exceeds packet size\n",
-			addr);
-		return;
-	}
-
-	bat_dbg(DBG_ROUTES, bat_priv,
-		"Creating new local tt entry: %pM\n", addr);
-
 	tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
 	if (!tt_local_entry)
-		return;
+		goto out;
+
+	tt_local_event(bat_priv, NO_FLAGS, addr, false);
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Creating new local tt entry: %pM (ttvn: %d)\n", addr,
+		(uint8_t)atomic_read(&bat_priv->ttvn));
 
 	memcpy(tt_local_entry->addr, addr, ETH_ALEN);
 	tt_local_entry->last_seen = jiffies;
+	atomic_set(&tt_local_entry->refcount, 2);
 
 	/* the batman interface mac address should never be purged */
 	if (compare_eth(addr, soft_iface->dev_addr))
@@ -177,61 +219,75 @@
 	else
 		tt_local_entry->never_purge = 0;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
-
 	hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
 		 tt_local_entry, &tt_local_entry->hash_entry);
-	bat_priv->num_local_tt++;
-	atomic_set(&bat_priv->tt_local_changed, 1);
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
+	atomic_inc(&bat_priv->num_local_tt);
 
 	/* remove address from global hash if present */
-	spin_lock_bh(&bat_priv->tt_ghash_lock);
-
 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
 
+	/* Check whether it is a roaming! */
+	if (tt_global_entry) {
+		/* This node is probably going to update its tt table */
+		tt_global_entry->orig_node->tt_poss_change = true;
+		_tt_global_del(bat_priv, tt_global_entry,
+			       "local tt received");
+		send_roam_adv(bat_priv, tt_global_entry->addr,
+			      tt_global_entry->orig_node);
+	}
+out:
+	if (tt_local_entry)
+		tt_local_entry_free_ref(tt_local_entry);
 	if (tt_global_entry)
-		_tt_global_del_orig(bat_priv, tt_global_entry,
-				     "local tt received");
-
-	spin_unlock_bh(&bat_priv->tt_ghash_lock);
+		tt_global_entry_free_ref(tt_global_entry);
 }
 
-int tt_local_fill_buffer(struct bat_priv *bat_priv,
-			  unsigned char *buff, int buff_len)
+int tt_changes_fill_buffer(struct bat_priv *bat_priv,
+			   unsigned char *buff, int buff_len)
 {
-	struct hashtable_t *hash = bat_priv->tt_local_hash;
-	struct tt_local_entry *tt_local_entry;
-	struct hlist_node *node;
-	struct hlist_head *head;
-	int i, count = 0;
+	int count = 0, tot_changes = 0;
+	struct tt_change_node *entry, *safe;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
+	if (buff_len > 0)
+		tot_changes = buff_len / tt_len(1);
 
-	for (i = 0; i < hash->size; i++) {
-		head = &hash->table[i];
+	spin_lock_bh(&bat_priv->tt_changes_list_lock);
+	atomic_set(&bat_priv->tt_local_changes, 0);
 
-		rcu_read_lock();
-		hlist_for_each_entry_rcu(tt_local_entry, node,
-					 head, hash_entry) {
-			if (buff_len < (count + 1) * ETH_ALEN)
-				break;
-
-			memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
-			       ETH_ALEN);
-
+	list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+			list) {
+		if (count < tot_changes) {
+			memcpy(buff + tt_len(count),
+			       &entry->change, sizeof(struct tt_change));
 			count++;
 		}
-		rcu_read_unlock();
+		list_del(&entry->list);
+		kfree(entry);
 	}
+	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
 
-	/* if we did not get all new local tts see you next time  ;-) */
-	if (count == bat_priv->num_local_tt)
-		atomic_set(&bat_priv->tt_local_changed, 0);
+	/* Keep the buffer for possible tt_request */
+	spin_lock_bh(&bat_priv->tt_buff_lock);
+	kfree(bat_priv->tt_buff);
+	bat_priv->tt_buff_len = 0;
+	bat_priv->tt_buff = NULL;
+	/* We check whether this new OGM has no changes due to size
+	 * problems */
+	if (buff_len > 0) {
+		/**
+		 * if kmalloc() fails we will reply with the full table
+		 * instead of providing the diff
+		 */
+		bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
+		if (bat_priv->tt_buff) {
+			memcpy(bat_priv->tt_buff, buff, buff_len);
+			bat_priv->tt_buff_len = buff_len;
+		}
+	}
+	spin_unlock_bh(&bat_priv->tt_buff_lock);
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
-	return count;
+	return tot_changes;
 }
 
 int tt_local_seq_print_text(struct seq_file *seq, void *offset)
@@ -263,10 +319,8 @@
 	}
 
 	seq_printf(seq, "Locally retrieved addresses (from %s) "
-		   "announced via TT:\n",
-		   net_dev->name);
-
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
+		   "announced via TT (TTVN: %u):\n",
+		   net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
 
 	buf_size = 1;
 	/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
@@ -281,7 +335,6 @@
 
 	buff = kmalloc(buf_size, GFP_ATOMIC);
 	if (!buff) {
-		spin_unlock_bh(&bat_priv->tt_lhash_lock);
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -301,8 +354,6 @@
 		rcu_read_unlock();
 	}
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
-
 	seq_printf(seq, "%s", buff);
 	kfree(buff);
 out:
@@ -311,92 +362,108 @@
 	return ret;
 }
 
-static void _tt_local_del(struct hlist_node *node, void *arg)
-{
-	struct bat_priv *bat_priv = arg;
-	void *data = container_of(node, struct tt_local_entry, hash_entry);
-
-	kfree(data);
-	bat_priv->num_local_tt--;
-	atomic_set(&bat_priv->tt_local_changed, 1);
-}
-
 static void tt_local_del(struct bat_priv *bat_priv,
 			 struct tt_local_entry *tt_local_entry,
 			 const char *message)
 {
-	bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n",
+	bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry (%pM): %s\n",
 		tt_local_entry->addr, message);
 
+	atomic_dec(&bat_priv->num_local_tt);
+
 	hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
 		    tt_local_entry->addr);
-	_tt_local_del(&tt_local_entry->hash_entry, bat_priv);
+
+	tt_local_entry_free_ref(tt_local_entry);
 }
 
-void tt_local_remove(struct bat_priv *bat_priv,
-		     const uint8_t *addr, const char *message)
+void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
+		     const char *message, bool roaming)
 {
-	struct tt_local_entry *tt_local_entry;
-
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
+	struct tt_local_entry *tt_local_entry = NULL;
 
 	tt_local_entry = tt_local_hash_find(bat_priv, addr);
 
-	if (tt_local_entry)
-		tt_local_del(bat_priv, tt_local_entry, message);
+	if (!tt_local_entry)
+		goto out;
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
+	tt_local_event(bat_priv, TT_CHANGE_DEL, tt_local_entry->addr, roaming);
+	tt_local_del(bat_priv, tt_local_entry, message);
+out:
+	if (tt_local_entry)
+		tt_local_entry_free_ref(tt_local_entry);
 }
 
-static void tt_local_purge(struct work_struct *work)
+static void tt_local_purge(struct bat_priv *bat_priv)
 {
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
-	struct bat_priv *bat_priv =
-		container_of(delayed_work, struct bat_priv, tt_work);
 	struct hashtable_t *hash = bat_priv->tt_local_hash;
 	struct tt_local_entry *tt_local_entry;
 	struct hlist_node *node, *node_tmp;
 	struct hlist_head *head;
-	unsigned long timeout;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
 	int i;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
-
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
 
+		spin_lock_bh(list_lock);
 		hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
 					  head, hash_entry) {
 			if (tt_local_entry->never_purge)
 				continue;
 
-			timeout = tt_local_entry->last_seen;
-			timeout += TT_LOCAL_TIMEOUT * HZ;
-
-			if (time_before(jiffies, timeout))
+			if (!is_out_of_time(tt_local_entry->last_seen,
+					    TT_LOCAL_TIMEOUT * 1000))
 				continue;
 
-			tt_local_del(bat_priv, tt_local_entry,
-				      "address timed out");
+			tt_local_event(bat_priv, TT_CHANGE_DEL,
+				       tt_local_entry->addr, false);
+			atomic_dec(&bat_priv->num_local_tt);
+			bat_dbg(DBG_TT, bat_priv, "Deleting local "
+				"tt entry (%pM): timed out\n",
+				tt_local_entry->addr);
+			hlist_del_rcu(node);
+			tt_local_entry_free_ref(tt_local_entry);
 		}
+		spin_unlock_bh(list_lock);
 	}
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
-	tt_local_start_timer(bat_priv);
 }
 
-void tt_local_free(struct bat_priv *bat_priv)
+static void tt_local_table_free(struct bat_priv *bat_priv)
 {
+	struct hashtable_t *hash;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
+	struct tt_local_entry *tt_local_entry;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	int i;
+
 	if (!bat_priv->tt_local_hash)
 		return;
 
-	cancel_delayed_work_sync(&bat_priv->tt_work);
-	hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv);
+	hash = bat_priv->tt_local_hash;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
+
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+					  head, hash_entry) {
+			hlist_del_rcu(node);
+			tt_local_entry_free_ref(tt_local_entry);
+		}
+		spin_unlock_bh(list_lock);
+	}
+
+	hash_destroy(hash);
+
 	bat_priv->tt_local_hash = NULL;
 }
 
-int tt_global_init(struct bat_priv *bat_priv)
+static int tt_global_init(struct bat_priv *bat_priv)
 {
 	if (bat_priv->tt_global_hash)
 		return 1;
@@ -409,73 +476,78 @@
 	return 1;
 }
 
-void tt_global_add_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node,
-			 const unsigned char *tt_buff, int tt_buff_len)
+static void tt_changes_list_free(struct bat_priv *bat_priv)
+{
+	struct tt_change_node *entry, *safe;
+
+	spin_lock_bh(&bat_priv->tt_changes_list_lock);
+
+	list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+				 list) {
+		list_del(&entry->list);
+		kfree(entry);
+	}
+
+	atomic_set(&bat_priv->tt_local_changes, 0);
+	spin_unlock_bh(&bat_priv->tt_changes_list_lock);
+}
+
+/* caller must hold orig_node refcount */
+int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
+		  const unsigned char *tt_addr, uint8_t ttvn, bool roaming)
 {
 	struct tt_global_entry *tt_global_entry;
-	struct tt_local_entry *tt_local_entry;
-	int tt_buff_count = 0;
-	const unsigned char *tt_ptr;
+	struct orig_node *orig_node_tmp;
+	int ret = 0;
 
-	while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
-		spin_lock_bh(&bat_priv->tt_ghash_lock);
+	tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
 
-		tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
-		tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
+	if (!tt_global_entry) {
+		tt_global_entry =
+			kmalloc(sizeof(*tt_global_entry),
+				GFP_ATOMIC);
+		if (!tt_global_entry)
+			goto out;
 
-		if (!tt_global_entry) {
-			spin_unlock_bh(&bat_priv->tt_ghash_lock);
-
-			tt_global_entry = kmalloc(sizeof(*tt_global_entry),
-						  GFP_ATOMIC);
-
-			if (!tt_global_entry)
-				break;
-
-			memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN);
-
-			bat_dbg(DBG_ROUTES, bat_priv,
-				"Creating new global tt entry: "
-				"%pM (via %pM)\n",
-				tt_global_entry->addr, orig_node->orig);
-
-			spin_lock_bh(&bat_priv->tt_ghash_lock);
-			hash_add(bat_priv->tt_global_hash, compare_gtt,
-				 choose_orig, tt_global_entry,
-				 &tt_global_entry->hash_entry);
-
-		}
-
+		memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
+		/* Assign the new orig_node */
+		atomic_inc(&orig_node->refcount);
 		tt_global_entry->orig_node = orig_node;
-		spin_unlock_bh(&bat_priv->tt_ghash_lock);
+		tt_global_entry->ttvn = ttvn;
+		tt_global_entry->flags = NO_FLAGS;
+		tt_global_entry->roam_at = 0;
+		atomic_set(&tt_global_entry->refcount, 2);
 
-		/* remove address from local hash if present */
-		spin_lock_bh(&bat_priv->tt_lhash_lock);
-
-		tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
-		tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr);
-
-		if (tt_local_entry)
-			tt_local_del(bat_priv, tt_local_entry,
-				      "global tt received");
-
-		spin_unlock_bh(&bat_priv->tt_lhash_lock);
-
-		tt_buff_count++;
-	}
-
-	/* initialize, and overwrite if malloc succeeds */
-	orig_node->tt_buff = NULL;
-	orig_node->tt_buff_len = 0;
-
-	if (tt_buff_len > 0) {
-		orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
-		if (orig_node->tt_buff) {
-			memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
-			orig_node->tt_buff_len = tt_buff_len;
+		hash_add(bat_priv->tt_global_hash, compare_gtt,
+			 choose_orig, tt_global_entry,
+			 &tt_global_entry->hash_entry);
+		atomic_inc(&orig_node->tt_size);
+	} else {
+		if (tt_global_entry->orig_node != orig_node) {
+			atomic_dec(&tt_global_entry->orig_node->tt_size);
+			orig_node_tmp = tt_global_entry->orig_node;
+			atomic_inc(&orig_node->refcount);
+			tt_global_entry->orig_node = orig_node;
+			orig_node_free_ref(orig_node_tmp);
+			atomic_inc(&orig_node->tt_size);
 		}
+		tt_global_entry->ttvn = ttvn;
+		tt_global_entry->flags = NO_FLAGS;
+		tt_global_entry->roam_at = 0;
 	}
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Creating new global tt entry: %pM (via %pM)\n",
+		tt_global_entry->addr, orig_node->orig);
+
+	/* remove address from local hash if present */
+	tt_local_remove(bat_priv, tt_global_entry->addr,
+			"global tt received", roaming);
+	ret = 1;
+out:
+	if (tt_global_entry)
+		tt_global_entry_free_ref(tt_global_entry);
+	return ret;
 }
 
 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
@@ -509,26 +581,27 @@
 	seq_printf(seq,
 		   "Globally announced TT entries received via the mesh %s\n",
 		   net_dev->name);
-
-	spin_lock_bh(&bat_priv->tt_ghash_lock);
+	seq_printf(seq, "       %-13s %s       %-15s %s\n",
+		   "Client", "(TTVN)", "Originator", "(Curr TTVN)");
 
 	buf_size = 1;
-	/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
+	/* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
+	 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
 		rcu_read_lock();
 		__hlist_for_each_rcu(node, head)
-			buf_size += 43;
+			buf_size += 59;
 		rcu_read_unlock();
 	}
 
 	buff = kmalloc(buf_size, GFP_ATOMIC);
 	if (!buff) {
-		spin_unlock_bh(&bat_priv->tt_ghash_lock);
 		ret = -ENOMEM;
 		goto out;
 	}
+
 	buff[0] = '\0';
 	pos = 0;
 
@@ -538,16 +611,18 @@
 		rcu_read_lock();
 		hlist_for_each_entry_rcu(tt_global_entry, node,
 					 head, hash_entry) {
-			pos += snprintf(buff + pos, 44,
-					" * %pM via %pM\n",
+			pos += snprintf(buff + pos, 61,
+					" * %pM  (%3u) via %pM     (%3u)\n",
 					tt_global_entry->addr,
-					tt_global_entry->orig_node->orig);
+					tt_global_entry->ttvn,
+					tt_global_entry->orig_node->orig,
+					(uint8_t) atomic_read(
+						&tt_global_entry->orig_node->
+						last_ttvn));
 		}
 		rcu_read_unlock();
 	}
 
-	spin_unlock_bh(&bat_priv->tt_ghash_lock);
-
 	seq_printf(seq, "%s", buff);
 	kfree(buff);
 out:
@@ -556,64 +631,145 @@
 	return ret;
 }
 
-static void _tt_global_del_orig(struct bat_priv *bat_priv,
-				 struct tt_global_entry *tt_global_entry,
-				 const char *message)
+static void _tt_global_del(struct bat_priv *bat_priv,
+			   struct tt_global_entry *tt_global_entry,
+			   const char *message)
 {
-	bat_dbg(DBG_ROUTES, bat_priv,
+	if (!tt_global_entry)
+		goto out;
+
+	bat_dbg(DBG_TT, bat_priv,
 		"Deleting global tt entry %pM (via %pM): %s\n",
 		tt_global_entry->addr, tt_global_entry->orig_node->orig,
 		message);
 
+	atomic_dec(&tt_global_entry->orig_node->tt_size);
+
 	hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
 		    tt_global_entry->addr);
-	kfree(tt_global_entry);
+out:
+	if (tt_global_entry)
+		tt_global_entry_free_ref(tt_global_entry);
+}
+
+void tt_global_del(struct bat_priv *bat_priv,
+		   struct orig_node *orig_node, const unsigned char *addr,
+		   const char *message, bool roaming)
+{
+	struct tt_global_entry *tt_global_entry = NULL;
+
+	tt_global_entry = tt_global_hash_find(bat_priv, addr);
+	if (!tt_global_entry)
+		goto out;
+
+	if (tt_global_entry->orig_node == orig_node) {
+		if (roaming) {
+			tt_global_entry->flags |= TT_CLIENT_ROAM;
+			tt_global_entry->roam_at = jiffies;
+			goto out;
+		}
+		_tt_global_del(bat_priv, tt_global_entry, message);
+	}
+out:
+	if (tt_global_entry)
+		tt_global_entry_free_ref(tt_global_entry);
 }
 
 void tt_global_del_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node, const char *message)
+			struct orig_node *orig_node, const char *message)
 {
 	struct tt_global_entry *tt_global_entry;
-	int tt_buff_count = 0;
-	unsigned char *tt_ptr;
+	int i;
+	struct hashtable_t *hash = bat_priv->tt_global_hash;
+	struct hlist_node *node, *safe;
+	struct hlist_head *head;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
 
-	if (orig_node->tt_buff_len == 0)
-		return;
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
 
-	spin_lock_bh(&bat_priv->tt_ghash_lock);
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(tt_global_entry, node, safe,
+					 head, hash_entry) {
+			if (tt_global_entry->orig_node == orig_node) {
+				bat_dbg(DBG_TT, bat_priv,
+					"Deleting global tt entry %pM "
+					"(via %pM): originator time out\n",
+					tt_global_entry->addr,
+					tt_global_entry->orig_node->orig);
+				hlist_del_rcu(node);
+				tt_global_entry_free_ref(tt_global_entry);
+			}
+		}
+		spin_unlock_bh(list_lock);
+	}
+	atomic_set(&orig_node->tt_size, 0);
+}
 
-	while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) {
-		tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN);
-		tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
+static void tt_global_roam_purge(struct bat_priv *bat_priv)
+{
+	struct hashtable_t *hash = bat_priv->tt_global_hash;
+	struct tt_global_entry *tt_global_entry;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
+	int i;
 
-		if ((tt_global_entry) &&
-		    (tt_global_entry->orig_node == orig_node))
-			_tt_global_del_orig(bat_priv, tt_global_entry,
-					     message);
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
 
-		tt_buff_count++;
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+					  head, hash_entry) {
+			if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
+				continue;
+			if (!is_out_of_time(tt_global_entry->roam_at,
+					    TT_CLIENT_ROAM_TIMEOUT * 1000))
+				continue;
+
+			bat_dbg(DBG_TT, bat_priv, "Deleting global "
+				"tt entry (%pM): Roaming timeout\n",
+				tt_global_entry->addr);
+			atomic_dec(&tt_global_entry->orig_node->tt_size);
+			hlist_del_rcu(node);
+			tt_global_entry_free_ref(tt_global_entry);
+		}
+		spin_unlock_bh(list_lock);
 	}
 
-	spin_unlock_bh(&bat_priv->tt_ghash_lock);
-
-	orig_node->tt_buff_len = 0;
-	kfree(orig_node->tt_buff);
-	orig_node->tt_buff = NULL;
 }
 
-static void tt_global_del(struct hlist_node *node, void *arg)
+static void tt_global_table_free(struct bat_priv *bat_priv)
 {
-	void *data = container_of(node, struct tt_global_entry, hash_entry);
+	struct hashtable_t *hash;
+	spinlock_t *list_lock; /* protects write access to the hash lists */
+	struct tt_global_entry *tt_global_entry;
+	struct hlist_node *node, *node_tmp;
+	struct hlist_head *head;
+	int i;
 
-	kfree(data);
-}
-
-void tt_global_free(struct bat_priv *bat_priv)
-{
 	if (!bat_priv->tt_global_hash)
 		return;
 
-	hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL);
+	hash = bat_priv->tt_global_hash;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+		list_lock = &hash->list_locks[i];
+
+		spin_lock_bh(list_lock);
+		hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+					  head, hash_entry) {
+			hlist_del_rcu(node);
+			tt_global_entry_free_ref(tt_global_entry);
+		}
+		spin_unlock_bh(list_lock);
+	}
+
+	hash_destroy(hash);
+
 	bat_priv->tt_global_hash = NULL;
 }
 
@@ -623,18 +779,846 @@
 	struct tt_global_entry *tt_global_entry;
 	struct orig_node *orig_node = NULL;
 
-	spin_lock_bh(&bat_priv->tt_ghash_lock);
 	tt_global_entry = tt_global_hash_find(bat_priv, addr);
 
 	if (!tt_global_entry)
 		goto out;
 
 	if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
-		goto out;
+		goto free_tt;
 
 	orig_node = tt_global_entry->orig_node;
 
+free_tt:
+	tt_global_entry_free_ref(tt_global_entry);
 out:
-	spin_unlock_bh(&bat_priv->tt_ghash_lock);
 	return orig_node;
 }
+
+/* Calculates the checksum of the local table of a given orig_node */
+uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
+{
+	uint16_t total = 0, total_one;
+	struct hashtable_t *hash = bat_priv->tt_global_hash;
+	struct tt_global_entry *tt_global_entry;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	int i, j;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(tt_global_entry, node,
+					 head, hash_entry) {
+			if (compare_eth(tt_global_entry->orig_node,
+					orig_node)) {
+				/* Roaming clients are in the global table for
+				 * consistency only. They don't have to be
+				 * taken into account while computing the
+				 * global crc */
+				if (tt_global_entry->flags & TT_CLIENT_ROAM)
+					continue;
+				total_one = 0;
+				for (j = 0; j < ETH_ALEN; j++)
+					total_one = crc16_byte(total_one,
+						tt_global_entry->addr[j]);
+				total ^= total_one;
+			}
+		}
+		rcu_read_unlock();
+	}
+
+	return total;
+}
+
+/* Calculates the checksum of the local table */
+uint16_t tt_local_crc(struct bat_priv *bat_priv)
+{
+	uint16_t total = 0, total_one;
+	struct hashtable_t *hash = bat_priv->tt_local_hash;
+	struct tt_local_entry *tt_local_entry;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	int i, j;
+
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(tt_local_entry, node,
+					 head, hash_entry) {
+			total_one = 0;
+			for (j = 0; j < ETH_ALEN; j++)
+				total_one = crc16_byte(total_one,
+						   tt_local_entry->addr[j]);
+			total ^= total_one;
+		}
+		rcu_read_unlock();
+	}
+
+	return total;
+}
+
+static void tt_req_list_free(struct bat_priv *bat_priv)
+{
+	struct tt_req_node *node, *safe;
+
+	spin_lock_bh(&bat_priv->tt_req_list_lock);
+
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+		list_del(&node->list);
+		kfree(node);
+	}
+
+	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+}
+
+void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
+			 const unsigned char *tt_buff, uint8_t tt_num_changes)
+{
+	uint16_t tt_buff_len = tt_len(tt_num_changes);
+
+	/* Replace the old buffer only if I received something in the
+	 * last OGM (the OGM could carry no changes) */
+	spin_lock_bh(&orig_node->tt_buff_lock);
+	if (tt_buff_len > 0) {
+		kfree(orig_node->tt_buff);
+		orig_node->tt_buff_len = 0;
+		orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
+		if (orig_node->tt_buff) {
+			memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
+			orig_node->tt_buff_len = tt_buff_len;
+		}
+	}
+	spin_unlock_bh(&orig_node->tt_buff_lock);
+}
+
+static void tt_req_purge(struct bat_priv *bat_priv)
+{
+	struct tt_req_node *node, *safe;
+
+	spin_lock_bh(&bat_priv->tt_req_list_lock);
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+		if (is_out_of_time(node->issued_at,
+		    TT_REQUEST_TIMEOUT * 1000)) {
+			list_del(&node->list);
+			kfree(node);
+		}
+	}
+	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+}
+
+/* returns the pointer to the new tt_req_node struct if no request
+ * has already been issued for this orig_node, NULL otherwise */
+static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
+					  struct orig_node *orig_node)
+{
+	struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
+
+	spin_lock_bh(&bat_priv->tt_req_list_lock);
+	list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
+		if (compare_eth(tt_req_node_tmp, orig_node) &&
+		    !is_out_of_time(tt_req_node_tmp->issued_at,
+				    TT_REQUEST_TIMEOUT * 1000))
+			goto unlock;
+	}
+
+	tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
+	if (!tt_req_node)
+		goto unlock;
+
+	memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
+	tt_req_node->issued_at = jiffies;
+
+	list_add(&tt_req_node->list, &bat_priv->tt_req_list);
+unlock:
+	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+	return tt_req_node;
+}
+
+static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
+{
+	const struct tt_global_entry *tt_global_entry = entry_ptr;
+	const struct orig_node *orig_node = data_ptr;
+
+	if (tt_global_entry->flags & TT_CLIENT_ROAM)
+		return 0;
+
+	return (tt_global_entry->orig_node == orig_node);
+}
+
+static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
+					      struct hashtable_t *hash,
+					      struct hard_iface *primary_if,
+					      int (*valid_cb)(const void *,
+							      const void *),
+					      void *cb_data)
+{
+	struct tt_local_entry *tt_local_entry;
+	struct tt_query_packet *tt_response;
+	struct tt_change *tt_change;
+	struct hlist_node *node;
+	struct hlist_head *head;
+	struct sk_buff *skb = NULL;
+	uint16_t tt_tot, tt_count;
+	ssize_t tt_query_size = sizeof(struct tt_query_packet);
+	int i;
+
+	if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
+		tt_len = primary_if->soft_iface->mtu - tt_query_size;
+		tt_len -= tt_len % sizeof(struct tt_change);
+	}
+	tt_tot = tt_len / sizeof(struct tt_change);
+
+	skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
+	if (!skb)
+		goto out;
+
+	skb_reserve(skb, ETH_HLEN);
+	tt_response = (struct tt_query_packet *)skb_put(skb,
+						     tt_query_size + tt_len);
+	tt_response->ttvn = ttvn;
+	tt_response->tt_data = htons(tt_tot);
+
+	tt_change = (struct tt_change *)(skb->data + tt_query_size);
+	tt_count = 0;
+
+	rcu_read_lock();
+	for (i = 0; i < hash->size; i++) {
+		head = &hash->table[i];
+
+		hlist_for_each_entry_rcu(tt_local_entry, node,
+					 head, hash_entry) {
+			if (tt_count == tt_tot)
+				break;
+
+			if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
+				continue;
+
+			memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
+			tt_change->flags = NO_FLAGS;
+
+			tt_count++;
+			tt_change++;
+		}
+	}
+	rcu_read_unlock();
+
+out:
+	return skb;
+}
+
+int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node,
+		    uint8_t ttvn, uint16_t tt_crc, bool full_table)
+{
+	struct sk_buff *skb = NULL;
+	struct tt_query_packet *tt_request;
+	struct neigh_node *neigh_node = NULL;
+	struct hard_iface *primary_if;
+	struct tt_req_node *tt_req_node = NULL;
+	int ret = 1;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	/* The new tt_req will be issued only if I'm not waiting for a
+	 * reply from the same orig_node yet */
+	tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
+	if (!tt_req_node)
+		goto out;
+
+	skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
+	if (!skb)
+		goto out;
+
+	skb_reserve(skb, ETH_HLEN);
+
+	tt_request = (struct tt_query_packet *)skb_put(skb,
+				sizeof(struct tt_query_packet));
+
+	tt_request->packet_type = BAT_TT_QUERY;
+	tt_request->version = COMPAT_VERSION;
+	memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+	memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
+	tt_request->ttl = TTL;
+	tt_request->ttvn = ttvn;
+	tt_request->tt_data = tt_crc;
+	tt_request->flags = TT_REQUEST;
+
+	if (full_table)
+		tt_request->flags |= TT_FULL_TABLE;
+
+	neigh_node = orig_node_get_router(dst_orig_node);
+	if (!neigh_node)
+		goto out;
+
+	bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM "
+		"[%c]\n", dst_orig_node->orig, neigh_node->addr,
+		(full_table ? 'F' : '.'));
+
+	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+	ret = 0;
+
+out:
+	if (neigh_node)
+		neigh_node_free_ref(neigh_node);
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (ret)
+		kfree_skb(skb);
+	if (ret && tt_req_node) {
+		spin_lock_bh(&bat_priv->tt_req_list_lock);
+		list_del(&tt_req_node->list);
+		spin_unlock_bh(&bat_priv->tt_req_list_lock);
+		kfree(tt_req_node);
+	}
+	return ret;
+}
+
+static bool send_other_tt_response(struct bat_priv *bat_priv,
+				   struct tt_query_packet *tt_request)
+{
+	struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
+	struct neigh_node *neigh_node = NULL;
+	struct hard_iface *primary_if = NULL;
+	uint8_t orig_ttvn, req_ttvn, ttvn;
+	int ret = false;
+	unsigned char *tt_buff;
+	bool full_table;
+	uint16_t tt_len, tt_tot;
+	struct sk_buff *skb = NULL;
+	struct tt_query_packet *tt_response;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Received TT_REQUEST from %pM for "
+		"ttvn: %u (%pM) [%c]\n", tt_request->src,
+		tt_request->ttvn, tt_request->dst,
+		(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
+
+	/* Let's get the orig node of the REAL destination */
+	req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
+	if (!req_dst_orig_node)
+		goto out;
+
+	res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
+	if (!res_dst_orig_node)
+		goto out;
+
+	neigh_node = orig_node_get_router(res_dst_orig_node);
+	if (!neigh_node)
+		goto out;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
+	req_ttvn = tt_request->ttvn;
+
+	/* I have not the requested data */
+	if (orig_ttvn != req_ttvn ||
+	    tt_request->tt_data != req_dst_orig_node->tt_crc)
+		goto out;
+
+	/* If it has explicitly been requested the full table */
+	if (tt_request->flags & TT_FULL_TABLE ||
+	    !req_dst_orig_node->tt_buff)
+		full_table = true;
+	else
+		full_table = false;
+
+	/* In this version, fragmentation is not implemented, then
+	 * I'll send only one packet with as much TT entries as I can */
+	if (!full_table) {
+		spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
+		tt_len = req_dst_orig_node->tt_buff_len;
+		tt_tot = tt_len / sizeof(struct tt_change);
+
+		skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
+				    tt_len + ETH_HLEN);
+		if (!skb)
+			goto unlock;
+
+		skb_reserve(skb, ETH_HLEN);
+		tt_response = (struct tt_query_packet *)skb_put(skb,
+				sizeof(struct tt_query_packet) + tt_len);
+		tt_response->ttvn = req_ttvn;
+		tt_response->tt_data = htons(tt_tot);
+
+		tt_buff = skb->data + sizeof(struct tt_query_packet);
+		/* Copy the last orig_node's OGM buffer */
+		memcpy(tt_buff, req_dst_orig_node->tt_buff,
+		       req_dst_orig_node->tt_buff_len);
+
+		spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
+	} else {
+		tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
+						sizeof(struct tt_change);
+		ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
+
+		skb = tt_response_fill_table(tt_len, ttvn,
+					     bat_priv->tt_global_hash,
+					     primary_if, tt_global_valid_entry,
+					     req_dst_orig_node);
+		if (!skb)
+			goto out;
+
+		tt_response = (struct tt_query_packet *)skb->data;
+	}
+
+	tt_response->packet_type = BAT_TT_QUERY;
+	tt_response->version = COMPAT_VERSION;
+	tt_response->ttl = TTL;
+	memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
+	memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
+	tt_response->flags = TT_RESPONSE;
+
+	if (full_table)
+		tt_response->flags |= TT_FULL_TABLE;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
+		res_dst_orig_node->orig, neigh_node->addr,
+		req_dst_orig_node->orig, req_ttvn);
+
+	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+	ret = true;
+	goto out;
+
+unlock:
+	spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
+
+out:
+	if (res_dst_orig_node)
+		orig_node_free_ref(res_dst_orig_node);
+	if (req_dst_orig_node)
+		orig_node_free_ref(req_dst_orig_node);
+	if (neigh_node)
+		neigh_node_free_ref(neigh_node);
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (!ret)
+		kfree_skb(skb);
+	return ret;
+
+}
+static bool send_my_tt_response(struct bat_priv *bat_priv,
+				struct tt_query_packet *tt_request)
+{
+	struct orig_node *orig_node = NULL;
+	struct neigh_node *neigh_node = NULL;
+	struct hard_iface *primary_if = NULL;
+	uint8_t my_ttvn, req_ttvn, ttvn;
+	int ret = false;
+	unsigned char *tt_buff;
+	bool full_table;
+	uint16_t tt_len, tt_tot;
+	struct sk_buff *skb = NULL;
+	struct tt_query_packet *tt_response;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Received TT_REQUEST from %pM for "
+		"ttvn: %u (me) [%c]\n", tt_request->src,
+		tt_request->ttvn,
+		(tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
+
+
+	my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+	req_ttvn = tt_request->ttvn;
+
+	orig_node = get_orig_node(bat_priv, tt_request->src);
+	if (!orig_node)
+		goto out;
+
+	neigh_node = orig_node_get_router(orig_node);
+	if (!neigh_node)
+		goto out;
+
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+
+	/* If the full table has been explicitly requested or the gap
+	 * is too big send the whole local translation table */
+	if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
+	    !bat_priv->tt_buff)
+		full_table = true;
+	else
+		full_table = false;
+
+	/* In this version, fragmentation is not implemented, then
+	 * I'll send only one packet with as much TT entries as I can */
+	if (!full_table) {
+		spin_lock_bh(&bat_priv->tt_buff_lock);
+		tt_len = bat_priv->tt_buff_len;
+		tt_tot = tt_len / sizeof(struct tt_change);
+
+		skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
+				    tt_len + ETH_HLEN);
+		if (!skb)
+			goto unlock;
+
+		skb_reserve(skb, ETH_HLEN);
+		tt_response = (struct tt_query_packet *)skb_put(skb,
+				sizeof(struct tt_query_packet) + tt_len);
+		tt_response->ttvn = req_ttvn;
+		tt_response->tt_data = htons(tt_tot);
+
+		tt_buff = skb->data + sizeof(struct tt_query_packet);
+		memcpy(tt_buff, bat_priv->tt_buff,
+		       bat_priv->tt_buff_len);
+		spin_unlock_bh(&bat_priv->tt_buff_lock);
+	} else {
+		tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
+						sizeof(struct tt_change);
+		ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
+
+		skb = tt_response_fill_table(tt_len, ttvn,
+					     bat_priv->tt_local_hash,
+					     primary_if, NULL, NULL);
+		if (!skb)
+			goto out;
+
+		tt_response = (struct tt_query_packet *)skb->data;
+	}
+
+	tt_response->packet_type = BAT_TT_QUERY;
+	tt_response->version = COMPAT_VERSION;
+	tt_response->ttl = TTL;
+	memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+	memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
+	tt_response->flags = TT_RESPONSE;
+
+	if (full_table)
+		tt_response->flags |= TT_FULL_TABLE;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Sending TT_RESPONSE to %pM via %pM [%c]\n",
+		orig_node->orig, neigh_node->addr,
+		(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
+
+	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+	ret = true;
+	goto out;
+
+unlock:
+	spin_unlock_bh(&bat_priv->tt_buff_lock);
+out:
+	if (orig_node)
+		orig_node_free_ref(orig_node);
+	if (neigh_node)
+		neigh_node_free_ref(neigh_node);
+	if (primary_if)
+		hardif_free_ref(primary_if);
+	if (!ret)
+		kfree_skb(skb);
+	/* This packet was for me, so it doesn't need to be re-routed */
+	return true;
+}
+
+bool send_tt_response(struct bat_priv *bat_priv,
+		      struct tt_query_packet *tt_request)
+{
+	if (is_my_mac(tt_request->dst))
+		return send_my_tt_response(bat_priv, tt_request);
+	else
+		return send_other_tt_response(bat_priv, tt_request);
+}
+
+static void _tt_update_changes(struct bat_priv *bat_priv,
+			       struct orig_node *orig_node,
+			       struct tt_change *tt_change,
+			       uint16_t tt_num_changes, uint8_t ttvn)
+{
+	int i;
+
+	for (i = 0; i < tt_num_changes; i++) {
+		if ((tt_change + i)->flags & TT_CHANGE_DEL)
+			tt_global_del(bat_priv, orig_node,
+				      (tt_change + i)->addr,
+				      "tt removed by changes",
+				      (tt_change + i)->flags & TT_CLIENT_ROAM);
+		else
+			if (!tt_global_add(bat_priv, orig_node,
+					   (tt_change + i)->addr, ttvn, false))
+				/* In case of problem while storing a
+				 * global_entry, we stop the updating
+				 * procedure without committing the
+				 * ttvn change. This will avoid to send
+				 * corrupted data on tt_request
+				 */
+				return;
+	}
+}
+
+static void tt_fill_gtable(struct bat_priv *bat_priv,
+			   struct tt_query_packet *tt_response)
+{
+	struct orig_node *orig_node = NULL;
+
+	orig_node = orig_hash_find(bat_priv, tt_response->src);
+	if (!orig_node)
+		goto out;
+
+	/* Purge the old table first.. */
+	tt_global_del_orig(bat_priv, orig_node, "Received full table");
+
+	_tt_update_changes(bat_priv, orig_node,
+			   (struct tt_change *)(tt_response + 1),
+			   tt_response->tt_data, tt_response->ttvn);
+
+	spin_lock_bh(&orig_node->tt_buff_lock);
+	kfree(orig_node->tt_buff);
+	orig_node->tt_buff_len = 0;
+	orig_node->tt_buff = NULL;
+	spin_unlock_bh(&orig_node->tt_buff_lock);
+
+	atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
+
+out:
+	if (orig_node)
+		orig_node_free_ref(orig_node);
+}
+
+void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
+		       uint16_t tt_num_changes, uint8_t ttvn,
+		       struct tt_change *tt_change)
+{
+	_tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
+			   ttvn);
+
+	tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
+			    tt_num_changes);
+	atomic_set(&orig_node->last_ttvn, ttvn);
+}
+
+bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
+{
+	struct tt_local_entry *tt_local_entry = NULL;
+	bool ret = false;
+
+	tt_local_entry = tt_local_hash_find(bat_priv, addr);
+	if (!tt_local_entry)
+		goto out;
+	ret = true;
+out:
+	if (tt_local_entry)
+		tt_local_entry_free_ref(tt_local_entry);
+	return ret;
+}
+
+void handle_tt_response(struct bat_priv *bat_priv,
+			struct tt_query_packet *tt_response)
+{
+	struct tt_req_node *node, *safe;
+	struct orig_node *orig_node = NULL;
+
+	bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for "
+		"ttvn %d t_size: %d [%c]\n",
+		tt_response->src, tt_response->ttvn,
+		tt_response->tt_data,
+		(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
+
+	orig_node = orig_hash_find(bat_priv, tt_response->src);
+	if (!orig_node)
+		goto out;
+
+	if (tt_response->flags & TT_FULL_TABLE)
+		tt_fill_gtable(bat_priv, tt_response);
+	else
+		tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
+				  tt_response->ttvn,
+				  (struct tt_change *)(tt_response + 1));
+
+	/* Delete the tt_req_node from pending tt_requests list */
+	spin_lock_bh(&bat_priv->tt_req_list_lock);
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
+		if (!compare_eth(node->addr, tt_response->src))
+			continue;
+		list_del(&node->list);
+		kfree(node);
+	}
+	spin_unlock_bh(&bat_priv->tt_req_list_lock);
+
+	/* Recalculate the CRC for this orig_node and store it */
+	orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+	/* Roaming phase is over: tables are in sync again. I can
+	 * unset the flag */
+	orig_node->tt_poss_change = false;
+out:
+	if (orig_node)
+		orig_node_free_ref(orig_node);
+}
+
+int tt_init(struct bat_priv *bat_priv)
+{
+	if (!tt_local_init(bat_priv))
+		return 0;
+
+	if (!tt_global_init(bat_priv))
+		return 0;
+
+	tt_start_timer(bat_priv);
+
+	return 1;
+}
+
+static void tt_roam_list_free(struct bat_priv *bat_priv)
+{
+	struct tt_roam_node *node, *safe;
+
+	spin_lock_bh(&bat_priv->tt_roam_list_lock);
+
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+		list_del(&node->list);
+		kfree(node);
+	}
+
+	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+}
+
+static void tt_roam_purge(struct bat_priv *bat_priv)
+{
+	struct tt_roam_node *node, *safe;
+
+	spin_lock_bh(&bat_priv->tt_roam_list_lock);
+	list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
+		if (!is_out_of_time(node->first_time,
+				    ROAMING_MAX_TIME * 1000))
+			continue;
+
+		list_del(&node->list);
+		kfree(node);
+	}
+	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+}
+
+/* This function checks whether the client already reached the
+ * maximum number of possible roaming phases. In this case the ROAMING_ADV
+ * will not be sent.
+ *
+ * returns true if the ROAMING_ADV can be sent, false otherwise */
+static bool tt_check_roam_count(struct bat_priv *bat_priv,
+				uint8_t *client)
+{
+	struct tt_roam_node *tt_roam_node;
+	bool ret = false;
+
+	spin_lock_bh(&bat_priv->tt_roam_list_lock);
+	/* The new tt_req will be issued only if I'm not waiting for a
+	 * reply from the same orig_node yet */
+	list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
+		if (!compare_eth(tt_roam_node->addr, client))
+			continue;
+
+		if (is_out_of_time(tt_roam_node->first_time,
+				   ROAMING_MAX_TIME * 1000))
+			continue;
+
+		if (!atomic_dec_not_zero(&tt_roam_node->counter))
+			/* Sorry, you roamed too many times! */
+			goto unlock;
+		ret = true;
+		break;
+	}
+
+	if (!ret) {
+		tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
+		if (!tt_roam_node)
+			goto unlock;
+
+		tt_roam_node->first_time = jiffies;
+		atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
+		memcpy(tt_roam_node->addr, client, ETH_ALEN);
+
+		list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
+		ret = true;
+	}
+
+unlock:
+	spin_unlock_bh(&bat_priv->tt_roam_list_lock);
+	return ret;
+}
+
+void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+		   struct orig_node *orig_node)
+{
+	struct neigh_node *neigh_node = NULL;
+	struct sk_buff *skb = NULL;
+	struct roam_adv_packet *roam_adv_packet;
+	int ret = 1;
+	struct hard_iface *primary_if;
+
+	/* before going on we have to check whether the client has
+	 * already roamed to us too many times */
+	if (!tt_check_roam_count(bat_priv, client))
+		goto out;
+
+	skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
+	if (!skb)
+		goto out;
+
+	skb_reserve(skb, ETH_HLEN);
+
+	roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
+					sizeof(struct roam_adv_packet));
+
+	roam_adv_packet->packet_type = BAT_ROAM_ADV;
+	roam_adv_packet->version = COMPAT_VERSION;
+	roam_adv_packet->ttl = TTL;
+	primary_if = primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto out;
+	memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+	hardif_free_ref(primary_if);
+	memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
+	memcpy(roam_adv_packet->client, client, ETH_ALEN);
+
+	neigh_node = orig_node_get_router(orig_node);
+	if (!neigh_node)
+		goto out;
+
+	bat_dbg(DBG_TT, bat_priv,
+		"Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
+		orig_node->orig, client, neigh_node->addr);
+
+	send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+	ret = 0;
+
+out:
+	if (neigh_node)
+		neigh_node_free_ref(neigh_node);
+	if (ret)
+		kfree_skb(skb);
+	return;
+}
+
+static void tt_purge(struct work_struct *work)
+{
+	struct delayed_work *delayed_work =
+		container_of(work, struct delayed_work, work);
+	struct bat_priv *bat_priv =
+		container_of(delayed_work, struct bat_priv, tt_work);
+
+	tt_local_purge(bat_priv);
+	tt_global_roam_purge(bat_priv);
+	tt_req_purge(bat_priv);
+	tt_roam_purge(bat_priv);
+
+	tt_start_timer(bat_priv);
+}
+
+void tt_free(struct bat_priv *bat_priv)
+{
+	cancel_delayed_work_sync(&bat_priv->tt_work);
+
+	tt_local_table_free(bat_priv);
+	tt_global_table_free(bat_priv);
+	tt_req_list_free(bat_priv);
+	tt_changes_list_free(bat_priv);
+	tt_roam_list_free(bat_priv);
+
+	kfree(bat_priv->tt_buff);
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 0f2b990..1cd2d39 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,23 +22,45 @@
 #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 
-int tt_local_init(struct bat_priv *bat_priv);
+int tt_len(int changes_num);
+int tt_changes_fill_buffer(struct bat_priv *bat_priv,
+			   unsigned char *buff, int buff_len);
+int tt_init(struct bat_priv *bat_priv);
 void tt_local_add(struct net_device *soft_iface, const uint8_t *addr);
 void tt_local_remove(struct bat_priv *bat_priv,
-		     const uint8_t *addr, const char *message);
-int tt_local_fill_buffer(struct bat_priv *bat_priv,
-			  unsigned char *buff, int buff_len);
+		     const uint8_t *addr, const char *message, bool roaming);
 int tt_local_seq_print_text(struct seq_file *seq, void *offset);
-void tt_local_free(struct bat_priv *bat_priv);
-int tt_global_init(struct bat_priv *bat_priv);
 void tt_global_add_orig(struct bat_priv *bat_priv,
 			struct orig_node *orig_node,
 			const unsigned char *tt_buff, int tt_buff_len);
+int tt_global_add(struct bat_priv *bat_priv,
+		  struct orig_node *orig_node, const unsigned char *addr,
+		  uint8_t ttvn, bool roaming);
 int tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void tt_global_del_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node, const char *message);
-void tt_global_free(struct bat_priv *bat_priv);
+			struct orig_node *orig_node, const char *message);
+void tt_global_del(struct bat_priv *bat_priv,
+		   struct orig_node *orig_node, const unsigned char *addr,
+		   const char *message, bool roaming);
 struct orig_node *transtable_search(struct bat_priv *bat_priv,
 				    const uint8_t *addr);
+void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
+			 const unsigned char *tt_buff, uint8_t tt_num_changes);
+uint16_t tt_local_crc(struct bat_priv *bat_priv);
+uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
+void tt_free(struct bat_priv *bat_priv);
+int send_tt_request(struct bat_priv *bat_priv,
+		    struct orig_node *dst_orig_node, uint8_t hvn,
+		    uint16_t tt_crc, bool full_table);
+bool send_tt_response(struct bat_priv *bat_priv,
+		      struct tt_query_packet *tt_request);
+void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
+		       uint16_t tt_num_changes, uint8_t ttvn,
+		       struct tt_change *tt_change);
+bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
+void handle_tt_response(struct bat_priv *bat_priv,
+			struct tt_query_packet *tt_response);
+void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
+		   struct orig_node *orig_node);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 65b3222..85cf122 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -75,8 +75,18 @@
 	unsigned long batman_seqno_reset;
 	uint8_t gw_flags;
 	uint8_t flags;
+	atomic_t last_ttvn; /* last seen translation table version number */
+	uint16_t tt_crc;
 	unsigned char *tt_buff;
 	int16_t tt_buff_len;
+	spinlock_t tt_buff_lock; /* protects tt_buff */
+	atomic_t tt_size;
+	/* The tt_poss_change flag is used to detect an ongoing roaming phase.
+	 * If true, then I sent a Roaming_adv to this orig_node and I have to
+	 * inspect every packet directed to it to check whether it is still
+	 * the true destination or not. This flag will be reset to false as
+	 * soon as I receive a new TTVN from this orig_node */
+	bool tt_poss_change;
 	uint32_t last_real_seqno;
 	uint8_t last_ttl;
 	unsigned long bcast_bits[NUM_WORDS];
@@ -94,6 +104,7 @@
 	spinlock_t ogm_cnt_lock;
 	/* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
 	spinlock_t bcast_seqno_lock;
+	spinlock_t tt_list_lock; /* protects tt_list */
 	atomic_t bond_candidates;
 	struct list_head bond_list;
 };
@@ -145,6 +156,15 @@
 	atomic_t bcast_seqno;
 	atomic_t bcast_queue_left;
 	atomic_t batman_queue_left;
+	atomic_t ttvn; /* tranlation table version number */
+	atomic_t tt_ogm_append_cnt;
+	atomic_t tt_local_changes; /* changes registered in a OGM interval */
+	/* The tt_poss_change flag is used to detect an ongoing roaming phase.
+	 * If true, then I received a Roaming_adv and I have to inspect every
+	 * packet directed to me to check whether I am still the true
+	 * destination or not. This flag will be reset to false as soon as I
+	 * increase my TTVN */
+	bool tt_poss_change;
 	char num_ifaces;
 	struct debug_log *debug_log;
 	struct kobject *mesh_obj;
@@ -153,26 +173,35 @@
 	struct hlist_head forw_bcast_list;
 	struct hlist_head gw_list;
 	struct hlist_head softif_neigh_vids;
+	struct list_head tt_changes_list; /* tracks changes in a OGM int */
 	struct list_head vis_send_list;
 	struct hashtable_t *orig_hash;
 	struct hashtable_t *tt_local_hash;
 	struct hashtable_t *tt_global_hash;
+	struct list_head tt_req_list; /* list of pending tt_requests */
+	struct list_head tt_roam_list;
 	struct hashtable_t *vis_hash;
 	spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
 	spinlock_t forw_bcast_list_lock; /* protects  */
-	spinlock_t tt_lhash_lock; /* protects tt_local_hash */
-	spinlock_t tt_ghash_lock; /* protects tt_global_hash */
+	spinlock_t tt_changes_list_lock; /* protects tt_changes */
+	spinlock_t tt_req_list_lock; /* protects tt_req_list */
+	spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
 	spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
 	spinlock_t vis_hash_lock; /* protects vis_hash */
 	spinlock_t vis_list_lock; /* protects vis_info::recv_list */
 	spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
 	spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
-	int16_t num_local_tt;
-	atomic_t tt_local_changed;
+	atomic_t num_local_tt;
+	/* Checksum of the local table, recomputed before sending a new OGM */
+	atomic_t tt_crc;
+	unsigned char *tt_buff;
+	int16_t tt_buff_len;
+	spinlock_t tt_buff_lock; /* protects tt_buff */
 	struct delayed_work tt_work;
 	struct delayed_work orig_work;
 	struct delayed_work vis_work;
 	struct gw_node __rcu *curr_gw;  /* rcu protected pointer */
+	atomic_t gw_reselect;
 	struct hard_iface __rcu *primary_if;  /* rcu protected pointer */
 	struct vis_info *my_vis_info;
 };
@@ -196,13 +225,38 @@
 	uint8_t addr[ETH_ALEN];
 	unsigned long last_seen;
 	char never_purge;
+	atomic_t refcount;
+	struct rcu_head rcu;
 	struct hlist_node hash_entry;
 };
 
 struct tt_global_entry {
 	uint8_t addr[ETH_ALEN];
 	struct orig_node *orig_node;
-	struct hlist_node hash_entry;
+	uint8_t ttvn;
+	uint8_t flags; /* only TT_GLOBAL_ROAM is used */
+	unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
+	atomic_t refcount;
+	struct rcu_head rcu;
+	struct hlist_node hash_entry; /* entry in the global table */
+};
+
+struct tt_change_node {
+	struct list_head list;
+	struct tt_change change;
+};
+
+struct tt_req_node {
+	uint8_t addr[ETH_ALEN];
+	unsigned long issued_at;
+	struct list_head list;
+};
+
+struct tt_roam_node {
+	uint8_t addr[ETH_ALEN];
+	atomic_t counter;
+	unsigned long first_time;
+	struct list_head list;
 };
 
 /**
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 6eabf42..32b125f 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -325,6 +325,9 @@
 	unicast_packet->ttl = TTL;
 	/* copy the destination for faster routing */
 	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+	/* set the destination tt version number */
+	unicast_packet->ttvn =
+		(uint8_t)atomic_read(&orig_node->last_ttvn);
 
 	if (atomic_read(&bat_priv->fragmentation) &&
 	    data_len + sizeof(*unicast_packet) >
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 355c6e5..8a1b985 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -665,11 +665,12 @@
 
 	hash = bat_priv->tt_local_hash;
 
-	spin_lock_bh(&bat_priv->tt_lhash_lock);
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
 
-		hlist_for_each_entry(tt_local_entry, node, head, hash_entry) {
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(tt_local_entry, node, head,
+					 hash_entry) {
 			entry = (struct vis_info_entry *)
 					skb_put(info->skb_packet,
 						sizeof(*entry));
@@ -678,14 +679,12 @@
 			entry->quality = 0; /* 0 means TT */
 			packet->entries++;
 
-			if (vis_packet_full(info)) {
-				spin_unlock_bh(&bat_priv->tt_lhash_lock);
-				return 0;
-			}
+			if (vis_packet_full(info))
+				goto unlock;
 		}
+		rcu_read_unlock();
 	}
 
-	spin_unlock_bh(&bat_priv->tt_lhash_lock);
 	return 0;
 
 unlock: