blob: 282b16d8093a54e142b717b2c56bbd21f95c711f [file] [log] [blame]
Paul Durrant40d8abd2016-05-13 09:37:27 +01001/*
2 * Copyright (c) 2016 Citrix Systems Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version 2
6 * as published by the Free Softare Foundation; or, when distributed
7 * separately from the Linux kernel or incorporated into other
8 * software packages, subject to the following license:
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this source file (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use, copy, modify,
13 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
14 * and to permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * IN THE SOFTWARE.
27 */
28
29#define XEN_NETIF_DEFINE_TOEPLITZ
30
31#include "common.h"
32#include <linux/vmalloc.h>
33#include <linux/rculist.h>
34
35static void xenvif_del_hash(struct rcu_head *rcu)
36{
37 struct xenvif_hash_cache_entry *entry;
38
39 entry = container_of(rcu, struct xenvif_hash_cache_entry, rcu);
40
41 kfree(entry);
42}
43
44static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
45 unsigned int len, u32 val)
46{
47 struct xenvif_hash_cache_entry *new, *entry, *oldest;
48 unsigned long flags;
49 bool found;
50
51 new = kmalloc(sizeof(*entry), GFP_KERNEL);
52 if (!new)
53 return;
54
55 memcpy(new->tag, tag, len);
56 new->len = len;
57 new->val = val;
58
59 spin_lock_irqsave(&vif->hash.cache.lock, flags);
60
61 found = false;
62 oldest = NULL;
63 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
64 /* Make sure we don't add duplicate entries */
65 if (entry->len == len &&
66 memcmp(entry->tag, tag, len) == 0)
67 found = true;
68 if (!oldest || entry->seq < oldest->seq)
69 oldest = entry;
70 }
71
72 if (!found) {
73 new->seq = atomic_inc_return(&vif->hash.cache.seq);
74 list_add_rcu(&new->link, &vif->hash.cache.list);
75
76 if (++vif->hash.cache.count > xenvif_hash_cache_size) {
77 list_del_rcu(&oldest->link);
78 vif->hash.cache.count--;
79 call_rcu(&oldest->rcu, xenvif_del_hash);
80 }
81 }
82
83 spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
84
85 if (found)
86 kfree(new);
87}
88
89static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
90 unsigned int len)
91{
92 u32 val;
93
94 val = xen_netif_toeplitz_hash(vif->hash.key,
95 sizeof(vif->hash.key),
96 data, len);
97
98 if (xenvif_hash_cache_size != 0)
99 xenvif_add_hash(vif, data, len, val);
100
101 return val;
102}
103
104static void xenvif_flush_hash(struct xenvif *vif)
105{
106 struct xenvif_hash_cache_entry *entry;
107 unsigned long flags;
108
109 if (xenvif_hash_cache_size == 0)
110 return;
111
112 spin_lock_irqsave(&vif->hash.cache.lock, flags);
113
114 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
115 list_del_rcu(&entry->link);
116 vif->hash.cache.count--;
117 call_rcu(&entry->rcu, xenvif_del_hash);
118 }
119
120 spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
121}
122
123static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
124 unsigned int len)
125{
126 struct xenvif_hash_cache_entry *entry;
127 u32 val;
128 bool found;
129
130 if (len >= XEN_NETBK_HASH_TAG_SIZE)
131 return 0;
132
133 if (xenvif_hash_cache_size == 0)
134 return xenvif_new_hash(vif, data, len);
135
136 rcu_read_lock();
137
138 found = false;
139
140 list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
141 if (entry->len == len &&
142 memcmp(entry->tag, data, len) == 0) {
143 val = entry->val;
144 entry->seq = atomic_inc_return(&vif->hash.cache.seq);
145 found = true;
146 break;
147 }
148 }
149
150 rcu_read_unlock();
151
152 if (!found)
153 val = xenvif_new_hash(vif, data, len);
154
155 return val;
156}
157
158void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
159{
160 struct flow_keys flow;
161 u32 hash = 0;
162 enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
163 u32 flags = vif->hash.flags;
164 bool has_tcp_hdr;
165
166 /* Quick rejection test: If the network protocol doesn't
167 * correspond to any enabled hash type then there's no point
168 * in parsing the packet header.
169 */
170 switch (skb->protocol) {
171 case htons(ETH_P_IP):
172 if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
173 XEN_NETIF_CTRL_HASH_TYPE_IPV4))
174 break;
175
176 goto done;
177
178 case htons(ETH_P_IPV6):
179 if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
180 XEN_NETIF_CTRL_HASH_TYPE_IPV6))
181 break;
182
183 goto done;
184
185 default:
186 goto done;
187 }
188
189 memset(&flow, 0, sizeof(flow));
190 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
191 goto done;
192
193 has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
194 !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
195
196 switch (skb->protocol) {
197 case htons(ETH_P_IP):
198 if (has_tcp_hdr &&
199 (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
200 u8 data[12];
201
202 memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
203 memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
204 memcpy(&data[8], &flow.ports.src, 2);
205 memcpy(&data[10], &flow.ports.dst, 2);
206
207 hash = xenvif_find_hash(vif, data, sizeof(data));
208 type = PKT_HASH_TYPE_L4;
209 } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
210 u8 data[8];
211
212 memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
213 memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
214
215 hash = xenvif_find_hash(vif, data, sizeof(data));
216 type = PKT_HASH_TYPE_L3;
217 }
218
219 break;
220
221 case htons(ETH_P_IPV6):
222 if (has_tcp_hdr &&
223 (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
224 u8 data[36];
225
226 memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
227 memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
228 memcpy(&data[32], &flow.ports.src, 2);
229 memcpy(&data[34], &flow.ports.dst, 2);
230
231 hash = xenvif_find_hash(vif, data, sizeof(data));
232 type = PKT_HASH_TYPE_L4;
233 } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
234 u8 data[32];
235
236 memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
237 memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
238
239 hash = xenvif_find_hash(vif, data, sizeof(data));
240 type = PKT_HASH_TYPE_L3;
241 }
242
243 break;
244 }
245
246done:
247 if (type == PKT_HASH_TYPE_NONE)
248 skb_clear_hash(skb);
249 else
250 __skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
251}
252
253u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
254{
255 switch (alg) {
256 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
257 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
258 break;
259
260 default:
261 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
262 }
263
264 vif->hash.alg = alg;
265
266 return XEN_NETIF_CTRL_STATUS_SUCCESS;
267}
268
269u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
270{
271 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
272 return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
273
274 *flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
275 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
276 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
277 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
278
279 return XEN_NETIF_CTRL_STATUS_SUCCESS;
280}
281
282u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
283{
284 if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
285 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
286 XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
287 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
288 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
289
290 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
291 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
292
293 vif->hash.flags = flags;
294
295 return XEN_NETIF_CTRL_STATUS_SUCCESS;
296}
297
298u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
299{
300 u8 *key = vif->hash.key;
301 struct gnttab_copy copy_op = {
302 .source.u.ref = gref,
303 .source.domid = vif->domid,
304 .dest.u.gmfn = virt_to_gfn(key),
305 .dest.domid = DOMID_SELF,
306 .dest.offset = xen_offset_in_page(key),
307 .len = len,
308 .flags = GNTCOPY_source_gref
309 };
310
311 if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
312 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
313
Paul Durrantf86911e2016-05-18 08:53:01 +0100314 if (copy_op.len != 0) {
Paul Durrant40d8abd2016-05-13 09:37:27 +0100315 gnttab_batch_copy(&copy_op, 1);
316
317 if (copy_op.status != GNTST_okay)
318 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
319 }
320
321 /* Clear any remaining key octets */
322 if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
323 memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
324
325 xenvif_flush_hash(vif);
326
327 return XEN_NETIF_CTRL_STATUS_SUCCESS;
328}
329
330u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
331{
332 if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
333 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
334
335 vif->hash.size = size;
336 memset(vif->hash.mapping, 0, sizeof(u32) * size);
337
338 return XEN_NETIF_CTRL_STATUS_SUCCESS;
339}
340
341u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
342 u32 off)
343{
344 u32 *mapping = &vif->hash.mapping[off];
345 struct gnttab_copy copy_op = {
346 .source.u.ref = gref,
347 .source.domid = vif->domid,
348 .dest.u.gmfn = virt_to_gfn(mapping),
349 .dest.domid = DOMID_SELF,
350 .dest.offset = xen_offset_in_page(mapping),
351 .len = len * sizeof(u32),
352 .flags = GNTCOPY_source_gref
353 };
354
355 if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
356 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
357
358 while (len-- != 0)
359 if (mapping[off++] >= vif->num_queues)
360 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
361
Paul Durrantf86911e2016-05-18 08:53:01 +0100362 if (copy_op.len != 0) {
Paul Durrant40d8abd2016-05-13 09:37:27 +0100363 gnttab_batch_copy(&copy_op, 1);
364
365 if (copy_op.status != GNTST_okay)
366 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
367 }
368
369 return XEN_NETIF_CTRL_STATUS_SUCCESS;
370}
371
Paul Durrantc0c64c12016-08-17 16:13:29 +0100372#ifdef CONFIG_DEBUG_FS
373void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
374{
375 unsigned int i;
376
377 switch (vif->hash.alg) {
378 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
379 seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
380 break;
381
382 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
383 seq_puts(m, "Hash Algorithm: NONE\n");
384 /* FALLTHRU */
385 default:
386 return;
387 }
388
389 if (vif->hash.flags) {
390 seq_puts(m, "\nHash Flags:\n");
391
392 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
393 seq_puts(m, "- IPv4\n");
394 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
395 seq_puts(m, "- IPv4 + TCP\n");
396 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
397 seq_puts(m, "- IPv6\n");
398 if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
399 seq_puts(m, "- IPv6 + TCP\n");
400 }
401
402 seq_puts(m, "\nHash Key:\n");
403
404 for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
405 unsigned int j, n;
406
407 n = 8;
408 if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
409 n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
410
411 seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
412
413 for (j = 0; j < n; j++, i++)
414 seq_printf(m, "%02x ", vif->hash.key[i]);
415
416 seq_puts(m, "\n");
417 }
418
419 if (vif->hash.size != 0) {
420 seq_puts(m, "\nHash Mapping:\n");
421
422 for (i = 0; i < vif->hash.size; ) {
423 unsigned int j, n;
424
425 n = 8;
426 if (i + n >= vif->hash.size)
427 n = vif->hash.size - i;
428
429 seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
430
431 for (j = 0; j < n; j++, i++)
432 seq_printf(m, "%4u ", vif->hash.mapping[i]);
433
434 seq_puts(m, "\n");
435 }
436 }
437}
438#endif /* CONFIG_DEBUG_FS */
439
Paul Durrant40d8abd2016-05-13 09:37:27 +0100440void xenvif_init_hash(struct xenvif *vif)
441{
442 if (xenvif_hash_cache_size == 0)
443 return;
444
445 spin_lock_init(&vif->hash.cache.lock);
446 INIT_LIST_HEAD(&vif->hash.cache.list);
447}
448
449void xenvif_deinit_hash(struct xenvif *vif)
450{
451 xenvif_flush_hash(vif);
452}