blob: 4fa2fdda174d0139e669bd282ab4362aea41f512 [file] [log] [blame]
Alexander Aring92aa7c62015-01-09 16:42:57 +01001/*
2 * 6LoWPAN next header compression
3 *
4 *
5 * Authors:
6 * Alexander Aring <aar@pengutronix.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/netdevice.h>
15
16#include <net/ipv6.h>
17
18#include "nhc.h"
19
20static struct rb_root rb_root = RB_ROOT;
21static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX];
22static DEFINE_SPINLOCK(lowpan_nhc_lock);
23
24static int lowpan_nhc_insert(struct lowpan_nhc *nhc)
25{
26 struct rb_node **new = &rb_root.rb_node, *parent = NULL;
27
28 /* Figure out where to put new node */
29 while (*new) {
Geliang Tang530cef22017-01-20 22:36:53 +080030 struct lowpan_nhc *this = rb_entry(*new, struct lowpan_nhc,
31 node);
Alexander Aring92aa7c62015-01-09 16:42:57 +010032 int result, len_dif, len;
33
34 len_dif = nhc->idlen - this->idlen;
35
36 if (nhc->idlen < this->idlen)
37 len = nhc->idlen;
38 else
39 len = this->idlen;
40
41 result = memcmp(nhc->id, this->id, len);
42 if (!result)
43 result = len_dif;
44
45 parent = *new;
46 if (result < 0)
47 new = &((*new)->rb_left);
48 else if (result > 0)
49 new = &((*new)->rb_right);
50 else
51 return -EEXIST;
52 }
53
54 /* Add new node and rebalance tree. */
55 rb_link_node(&nhc->node, parent, new);
56 rb_insert_color(&nhc->node, &rb_root);
57
58 return 0;
59}
60
61static void lowpan_nhc_remove(struct lowpan_nhc *nhc)
62{
63 rb_erase(&nhc->node, &rb_root);
64}
65
66static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
67{
68 struct rb_node *node = rb_root.rb_node;
69 const u8 *nhcid_skb_ptr = skb->data;
70
71 while (node) {
Geliang Tang530cef22017-01-20 22:36:53 +080072 struct lowpan_nhc *nhc = rb_entry(node, struct lowpan_nhc,
73 node);
Alexander Aring92aa7c62015-01-09 16:42:57 +010074 u8 nhcid_skb_ptr_masked[LOWPAN_NHC_MAX_ID_LEN];
75 int result, i;
76
77 if (nhcid_skb_ptr + nhc->idlen > skb->data + skb->len)
78 return NULL;
79
80 /* copy and mask afterwards the nhid value from skb */
81 memcpy(nhcid_skb_ptr_masked, nhcid_skb_ptr, nhc->idlen);
82 for (i = 0; i < nhc->idlen; i++)
83 nhcid_skb_ptr_masked[i] &= nhc->idmask[i];
84
85 result = memcmp(nhcid_skb_ptr_masked, nhc->id, nhc->idlen);
86 if (result < 0)
87 node = node->rb_left;
88 else if (result > 0)
89 node = node->rb_right;
90 else
91 return nhc;
92 }
93
94 return NULL;
95}
96
97int lowpan_nhc_check_compression(struct sk_buff *skb,
Alexander Aring607b0bd2015-10-20 08:31:21 +020098 const struct ipv6hdr *hdr, u8 **hc_ptr)
Alexander Aring92aa7c62015-01-09 16:42:57 +010099{
100 struct lowpan_nhc *nhc;
Alexander Aring607b0bd2015-10-20 08:31:21 +0200101 int ret = 0;
Alexander Aring92aa7c62015-01-09 16:42:57 +0100102
103 spin_lock_bh(&lowpan_nhc_lock);
104
105 nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
Alexander Aring607b0bd2015-10-20 08:31:21 +0200106 if (!(nhc && nhc->compress))
107 ret = -ENOENT;
Alexander Aring92aa7c62015-01-09 16:42:57 +0100108
109 spin_unlock_bh(&lowpan_nhc_lock);
110
Alexander Aring607b0bd2015-10-20 08:31:21 +0200111 return ret;
Alexander Aring92aa7c62015-01-09 16:42:57 +0100112}
113
114int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
115 u8 **hc_ptr)
116{
117 int ret;
118 struct lowpan_nhc *nhc;
119
120 spin_lock_bh(&lowpan_nhc_lock);
121
122 nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
123 /* check if the nhc module was removed in unlocked part.
124 * TODO: this is a workaround we should prevent unloading
125 * of nhc modules while unlocked part, this will always drop
126 * the lowpan packet but it's very unlikely.
127 *
128 * Solution isn't easy because we need to decide at
129 * lowpan_nhc_check_compression if we do a compression or not.
130 * Because the inline data which is added to skb, we can't move this
131 * handling.
132 */
133 if (unlikely(!nhc || !nhc->compress)) {
134 ret = -EINVAL;
135 goto out;
136 }
137
138 /* In the case of RAW sockets the transport header is not set by
139 * the ip6 stack so we must set it ourselves
140 */
141 if (skb->transport_header == skb->network_header)
142 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
143
144 ret = nhc->compress(skb, hc_ptr);
145 if (ret < 0)
146 goto out;
147
148 /* skip the transport header */
149 skb_pull(skb, nhc->nexthdrlen);
150
151out:
152 spin_unlock_bh(&lowpan_nhc_lock);
153
154 return ret;
155}
156
Alexander Aring8911d772015-10-13 13:42:58 +0200157int lowpan_nhc_do_uncompression(struct sk_buff *skb,
158 const struct net_device *dev,
Alexander Aring92aa7c62015-01-09 16:42:57 +0100159 struct ipv6hdr *hdr)
160{
161 struct lowpan_nhc *nhc;
162 int ret;
163
164 spin_lock_bh(&lowpan_nhc_lock);
165
166 nhc = lowpan_nhc_by_nhcid(skb);
167 if (nhc) {
168 if (nhc->uncompress) {
169 ret = nhc->uncompress(skb, sizeof(struct ipv6hdr) +
170 nhc->nexthdrlen);
171 if (ret < 0) {
172 spin_unlock_bh(&lowpan_nhc_lock);
173 return ret;
174 }
175 } else {
176 spin_unlock_bh(&lowpan_nhc_lock);
177 netdev_warn(dev, "received nhc id for %s which is not implemented.\n",
178 nhc->name);
179 return -ENOTSUPP;
180 }
181 } else {
182 spin_unlock_bh(&lowpan_nhc_lock);
183 netdev_warn(dev, "received unknown nhc id which was not found.\n");
184 return -ENOENT;
185 }
186
187 hdr->nexthdr = nhc->nexthdr;
188 skb_reset_transport_header(skb);
189 raw_dump_table(__func__, "raw transport header dump",
190 skb_transport_header(skb), nhc->nexthdrlen);
191
192 spin_unlock_bh(&lowpan_nhc_lock);
193
194 return 0;
195}
196
197int lowpan_nhc_add(struct lowpan_nhc *nhc)
198{
199 int ret;
200
201 if (!nhc->idlen || !nhc->idsetup)
202 return -EINVAL;
203
204 WARN_ONCE(nhc->idlen > LOWPAN_NHC_MAX_ID_LEN,
205 "LOWPAN_NHC_MAX_ID_LEN should be updated to %zd.\n",
206 nhc->idlen);
207
208 nhc->idsetup(nhc);
209
210 spin_lock_bh(&lowpan_nhc_lock);
211
212 if (lowpan_nexthdr_nhcs[nhc->nexthdr]) {
213 ret = -EEXIST;
214 goto out;
215 }
216
217 ret = lowpan_nhc_insert(nhc);
218 if (ret < 0)
219 goto out;
220
221 lowpan_nexthdr_nhcs[nhc->nexthdr] = nhc;
222out:
223 spin_unlock_bh(&lowpan_nhc_lock);
224 return ret;
225}
226EXPORT_SYMBOL(lowpan_nhc_add);
227
228void lowpan_nhc_del(struct lowpan_nhc *nhc)
229{
230 spin_lock_bh(&lowpan_nhc_lock);
231
232 lowpan_nhc_remove(nhc);
233 lowpan_nexthdr_nhcs[nhc->nexthdr] = NULL;
234
235 spin_unlock_bh(&lowpan_nhc_lock);
236
237 synchronize_net();
238}
239EXPORT_SYMBOL(lowpan_nhc_del);