blob: 37a3efb7cc9d164767b4dbf70167d73ff248661f [file] [log] [blame]
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +00001/*
2 * gw.c - CAN frame Gateway/Router/Bridge with netlink interface
3 *
4 * Copyright (c) 2011 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +000040 */
41
42#include <linux/module.h>
43#include <linux/init.h>
44#include <linux/types.h>
45#include <linux/list.h>
46#include <linux/spinlock.h>
47#include <linux/rcupdate.h>
48#include <linux/rculist.h>
49#include <linux/net.h>
50#include <linux/netdevice.h>
51#include <linux/if_arp.h>
52#include <linux/skbuff.h>
53#include <linux/can.h>
54#include <linux/can/core.h>
Oliver Hartkoppd904d3e2013-01-17 18:43:41 +010055#include <linux/can/skb.h>
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +000056#include <linux/can/gw.h>
57#include <net/rtnetlink.h>
58#include <net/net_namespace.h>
59#include <net/sock.h>
60
61#define CAN_GW_VERSION "20101209"
Andi Kleen6299b662012-10-04 17:12:08 -070062static __initconst const char banner[] =
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +000063 KERN_INFO "can: netlink gateway (rev " CAN_GW_VERSION ")\n";
64
65MODULE_DESCRIPTION("PF_CAN netlink gateway");
66MODULE_LICENSE("Dual BSD/GPL");
67MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
68MODULE_ALIAS("can-gw");
69
Daniel Balutaa75afd42012-03-26 02:05:50 +030070static HLIST_HEAD(cgw_list);
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +000071static struct notifier_block notifier;
72
73static struct kmem_cache *cgw_cache __read_mostly;
74
75/* structure that contains the (on-the-fly) CAN frame modifications */
76struct cf_mod {
77 struct {
78 struct can_frame and;
79 struct can_frame or;
80 struct can_frame xor;
81 struct can_frame set;
82 } modframe;
83 struct {
84 u8 and;
85 u8 or;
86 u8 xor;
87 u8 set;
88 } modtype;
89 void (*modfunc[MAX_MODFUNCTIONS])(struct can_frame *cf,
90 struct cf_mod *mod);
91
92 /* CAN frame checksum calculation after CAN frame modifications */
93 struct {
94 struct cgw_csum_xor xor;
95 struct cgw_csum_crc8 crc8;
96 } csum;
97 struct {
98 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
99 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
100 } csumfunc;
101};
102
103
104/*
105 * So far we just support CAN -> CAN routing and frame modifications.
106 *
107 * The internal can_can_gw structure contains data and attributes for
108 * a CAN -> CAN gateway job.
109 */
110struct can_can_gw {
111 struct can_filter filter;
112 int src_idx;
113 int dst_idx;
114};
115
116/* list entry for CAN gateways jobs */
117struct cgw_job {
118 struct hlist_node list;
119 struct rcu_head rcu;
120 u32 handled_frames;
121 u32 dropped_frames;
122 struct cf_mod mod;
123 union {
124 /* CAN frame data source */
125 struct net_device *dev;
126 } src;
127 union {
128 /* CAN frame data destination */
129 struct net_device *dev;
130 } dst;
131 union {
132 struct can_can_gw ccgw;
133 /* tbc */
134 };
135 u8 gwtype;
136 u16 flags;
137};
138
139/* modification functions that are invoked in the hot path in can_can_gw_rcv */
140
141#define MODFUNC(func, op) static void func(struct can_frame *cf, \
142 struct cf_mod *mod) { op ; }
143
144MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
145MODFUNC(mod_and_dlc, cf->can_dlc &= mod->modframe.and.can_dlc)
146MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
147MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
148MODFUNC(mod_or_dlc, cf->can_dlc |= mod->modframe.or.can_dlc)
149MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
150MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
151MODFUNC(mod_xor_dlc, cf->can_dlc ^= mod->modframe.xor.can_dlc)
152MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
153MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
154MODFUNC(mod_set_dlc, cf->can_dlc = mod->modframe.set.can_dlc)
155MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
156
157static inline void canframecpy(struct can_frame *dst, struct can_frame *src)
158{
159 /*
160 * Copy the struct members separately to ensure that no uninitialized
161 * data are copied in the 3 bytes hole of the struct. This is needed
162 * to make easy compares of the data in the struct cf_mod.
163 */
164
165 dst->can_id = src->can_id;
166 dst->can_dlc = src->can_dlc;
167 *(u64 *)dst->data = *(u64 *)src->data;
168}
169
170static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re)
171{
172 /*
173 * absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
174 * relative to received dlc -1 .. -8 :
175 * e.g. for received dlc = 8
176 * -1 => index = 7 (data[7])
177 * -3 => index = 5 (data[5])
178 * -8 => index = 0 (data[0])
179 */
180
181 if (fr > -9 && fr < 8 &&
182 to > -9 && to < 8 &&
183 re > -9 && re < 8)
184 return 0;
185 else
186 return -EINVAL;
187}
188
189static inline int calc_idx(int idx, int rx_dlc)
190{
191 if (idx < 0)
192 return rx_dlc + idx;
193 else
194 return idx;
195}
196
197static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor)
198{
199 int from = calc_idx(xor->from_idx, cf->can_dlc);
200 int to = calc_idx(xor->to_idx, cf->can_dlc);
201 int res = calc_idx(xor->result_idx, cf->can_dlc);
202 u8 val = xor->init_xor_val;
203 int i;
204
205 if (from < 0 || to < 0 || res < 0)
206 return;
207
208 if (from <= to) {
209 for (i = from; i <= to; i++)
210 val ^= cf->data[i];
211 } else {
212 for (i = from; i >= to; i--)
213 val ^= cf->data[i];
214 }
215
216 cf->data[res] = val;
217}
218
219static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor)
220{
221 u8 val = xor->init_xor_val;
222 int i;
223
224 for (i = xor->from_idx; i <= xor->to_idx; i++)
225 val ^= cf->data[i];
226
227 cf->data[xor->result_idx] = val;
228}
229
230static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor)
231{
232 u8 val = xor->init_xor_val;
233 int i;
234
235 for (i = xor->from_idx; i >= xor->to_idx; i--)
236 val ^= cf->data[i];
237
238 cf->data[xor->result_idx] = val;
239}
240
241static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
242{
243 int from = calc_idx(crc8->from_idx, cf->can_dlc);
244 int to = calc_idx(crc8->to_idx, cf->can_dlc);
245 int res = calc_idx(crc8->result_idx, cf->can_dlc);
246 u8 crc = crc8->init_crc_val;
247 int i;
248
249 if (from < 0 || to < 0 || res < 0)
250 return;
251
252 if (from <= to) {
253 for (i = crc8->from_idx; i <= crc8->to_idx; i++)
254 crc = crc8->crctab[crc^cf->data[i]];
255 } else {
256 for (i = crc8->from_idx; i >= crc8->to_idx; i--)
257 crc = crc8->crctab[crc^cf->data[i]];
258 }
259
260 switch (crc8->profile) {
261
262 case CGW_CRC8PRF_1U8:
263 crc = crc8->crctab[crc^crc8->profile_data[0]];
264 break;
265
266 case CGW_CRC8PRF_16U8:
267 crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
268 break;
269
270 case CGW_CRC8PRF_SFFID_XOR:
271 crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
272 (cf->can_id >> 8 & 0xFF)];
273 break;
274
275 }
276
277 cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
278}
279
280static void cgw_csum_crc8_pos(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
281{
282 u8 crc = crc8->init_crc_val;
283 int i;
284
285 for (i = crc8->from_idx; i <= crc8->to_idx; i++)
286 crc = crc8->crctab[crc^cf->data[i]];
287
288 switch (crc8->profile) {
289
290 case CGW_CRC8PRF_1U8:
291 crc = crc8->crctab[crc^crc8->profile_data[0]];
292 break;
293
294 case CGW_CRC8PRF_16U8:
295 crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
296 break;
297
298 case CGW_CRC8PRF_SFFID_XOR:
299 crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
300 (cf->can_id >> 8 & 0xFF)];
301 break;
302 }
303
304 cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
305}
306
307static void cgw_csum_crc8_neg(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
308{
309 u8 crc = crc8->init_crc_val;
310 int i;
311
312 for (i = crc8->from_idx; i >= crc8->to_idx; i--)
313 crc = crc8->crctab[crc^cf->data[i]];
314
315 switch (crc8->profile) {
316
317 case CGW_CRC8PRF_1U8:
318 crc = crc8->crctab[crc^crc8->profile_data[0]];
319 break;
320
321 case CGW_CRC8PRF_16U8:
322 crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
323 break;
324
325 case CGW_CRC8PRF_SFFID_XOR:
326 crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
327 (cf->can_id >> 8 & 0xFF)];
328 break;
329 }
330
331 cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
332}
333
334/* the receive & process & send function */
335static void can_can_gw_rcv(struct sk_buff *skb, void *data)
336{
337 struct cgw_job *gwj = (struct cgw_job *)data;
338 struct can_frame *cf;
339 struct sk_buff *nskb;
340 int modidx = 0;
341
342 /* do not handle already routed frames - see comment below */
343 if (skb_mac_header_was_set(skb))
344 return;
345
346 if (!(gwj->dst.dev->flags & IFF_UP)) {
347 gwj->dropped_frames++;
348 return;
349 }
350
Oliver Hartkoppd904d3e2013-01-17 18:43:41 +0100351 /* is sending the skb back to the incoming interface not allowed? */
352 if (!(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK) &&
353 skb_headroom(skb) == sizeof(struct can_skb_priv) &&
354 (((struct can_skb_priv *)(skb->head))->ifindex ==
355 gwj->dst.dev->ifindex))
356 return;
357
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000358 /*
359 * clone the given skb, which has not been done in can_rcv()
360 *
361 * When there is at least one modification function activated,
362 * we need to copy the skb as we want to modify skb->data.
363 */
364 if (gwj->mod.modfunc[0])
365 nskb = skb_copy(skb, GFP_ATOMIC);
366 else
367 nskb = skb_clone(skb, GFP_ATOMIC);
368
369 if (!nskb) {
370 gwj->dropped_frames++;
371 return;
372 }
373
374 /*
375 * Mark routed frames by setting some mac header length which is
376 * not relevant for the CAN frames located in the skb->data section.
377 *
378 * As dev->header_ops is not set in CAN netdevices no one is ever
379 * accessing the various header offsets in the CAN skbuffs anyway.
380 * E.g. using the packet socket to read CAN frames is still working.
381 */
382 skb_set_mac_header(nskb, 8);
383 nskb->dev = gwj->dst.dev;
384
385 /* pointer to modifiable CAN frame */
386 cf = (struct can_frame *)nskb->data;
387
388 /* perform preprocessed modification functions if there are any */
389 while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
390 (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
391
392 /* check for checksum updates when the CAN frame has been modified */
393 if (modidx) {
394 if (gwj->mod.csumfunc.crc8)
395 (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
396
397 if (gwj->mod.csumfunc.xor)
398 (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
399 }
400
401 /* clear the skb timestamp if not configured the other way */
402 if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
403 nskb->tstamp.tv64 = 0;
404
405 /* send to netdevice */
406 if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
407 gwj->dropped_frames++;
408 else
409 gwj->handled_frames++;
410}
411
412static inline int cgw_register_filter(struct cgw_job *gwj)
413{
414 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
415 gwj->ccgw.filter.can_mask, can_can_gw_rcv,
416 gwj, "gw");
417}
418
419static inline void cgw_unregister_filter(struct cgw_job *gwj)
420{
421 can_rx_unregister(gwj->src.dev, gwj->ccgw.filter.can_id,
422 gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
423}
424
425static int cgw_notifier(struct notifier_block *nb,
426 unsigned long msg, void *data)
427{
428 struct net_device *dev = (struct net_device *)data;
429
430 if (!net_eq(dev_net(dev), &init_net))
431 return NOTIFY_DONE;
432 if (dev->type != ARPHRD_CAN)
433 return NOTIFY_DONE;
434
435 if (msg == NETDEV_UNREGISTER) {
436
437 struct cgw_job *gwj = NULL;
438 struct hlist_node *n, *nx;
439
440 ASSERT_RTNL();
441
442 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
443
444 if (gwj->src.dev == dev || gwj->dst.dev == dev) {
445 hlist_del(&gwj->list);
446 cgw_unregister_filter(gwj);
447 kfree(gwj);
448 }
449 }
450 }
451
452 return NOTIFY_DONE;
453}
454
Thomas Graf1da0faa32012-07-05 14:19:57 +0200455static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
456 u32 pid, u32 seq, int flags)
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000457{
458 struct cgw_frame_mod mb;
459 struct rtcanmsg *rtcan;
Thomas Graf1da0faa32012-07-05 14:19:57 +0200460 struct nlmsghdr *nlh;
461
462 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000463 if (!nlh)
464 return -EMSGSIZE;
465
466 rtcan = nlmsg_data(nlh);
467 rtcan->can_family = AF_CAN;
468 rtcan->gwtype = gwj->gwtype;
469 rtcan->flags = gwj->flags;
470
471 /* add statistics if available */
472
473 if (gwj->handled_frames) {
474 if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
475 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000476 }
477
478 if (gwj->dropped_frames) {
479 if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
480 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000481 }
482
483 /* check non default settings of attributes */
484
485 if (gwj->mod.modtype.and) {
486 memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
487 mb.modtype = gwj->mod.modtype.and;
488 if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
489 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000490 }
491
492 if (gwj->mod.modtype.or) {
493 memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
494 mb.modtype = gwj->mod.modtype.or;
495 if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
496 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000497 }
498
499 if (gwj->mod.modtype.xor) {
500 memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
501 mb.modtype = gwj->mod.modtype.xor;
502 if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
503 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000504 }
505
506 if (gwj->mod.modtype.set) {
507 memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
508 mb.modtype = gwj->mod.modtype.set;
509 if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
510 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000511 }
512
513 if (gwj->mod.csumfunc.crc8) {
514 if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
515 &gwj->mod.csum.crc8) < 0)
516 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000517 }
518
519 if (gwj->mod.csumfunc.xor) {
520 if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
521 &gwj->mod.csum.xor) < 0)
522 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000523 }
524
525 if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
526
527 if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
528 if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
529 &gwj->ccgw.filter) < 0)
530 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000531 }
532
533 if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
534 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000535
536 if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
537 goto cancel;
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000538 }
539
Thomas Graf6eaf53c2012-07-05 14:19:55 +0200540 return nlmsg_end(skb, nlh);
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000541
542cancel:
543 nlmsg_cancel(skb, nlh);
544 return -EMSGSIZE;
545}
546
547/* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
548static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
549{
550 struct cgw_job *gwj = NULL;
551 struct hlist_node *n;
552 int idx = 0;
553 int s_idx = cb->args[0];
554
555 rcu_read_lock();
556 hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) {
557 if (idx < s_idx)
558 goto cont;
559
Eric W. Biederman15e47302012-09-07 20:12:54 +0000560 if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).portid,
Thomas Graf1da0faa32012-07-05 14:19:57 +0200561 cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000562 break;
563cont:
564 idx++;
565 }
566 rcu_read_unlock();
567
568 cb->args[0] = idx;
569
570 return skb->len;
571}
572
Thomas Graf732d35f2012-07-05 14:19:56 +0200573static const struct nla_policy cgw_policy[CGW_MAX+1] = {
574 [CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) },
575 [CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) },
576 [CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) },
577 [CGW_MOD_SET] = { .len = sizeof(struct cgw_frame_mod) },
578 [CGW_CS_XOR] = { .len = sizeof(struct cgw_csum_xor) },
579 [CGW_CS_CRC8] = { .len = sizeof(struct cgw_csum_crc8) },
580 [CGW_SRC_IF] = { .type = NLA_U32 },
581 [CGW_DST_IF] = { .type = NLA_U32 },
582 [CGW_FILTER] = { .len = sizeof(struct can_filter) },
583};
584
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000585/* check for common and gwtype specific attributes */
586static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
587 u8 gwtype, void *gwtypeattr)
588{
589 struct nlattr *tb[CGW_MAX+1];
590 struct cgw_frame_mod mb;
591 int modidx = 0;
592 int err = 0;
593
594 /* initialize modification & checksum data space */
595 memset(mod, 0, sizeof(*mod));
596
Thomas Graf732d35f2012-07-05 14:19:56 +0200597 err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX,
598 cgw_policy);
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000599 if (err < 0)
600 return err;
601
602 /* check for AND/OR/XOR/SET modifications */
603
Thomas Graf732d35f2012-07-05 14:19:56 +0200604 if (tb[CGW_MOD_AND]) {
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000605 nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
606
607 canframecpy(&mod->modframe.and, &mb.cf);
608 mod->modtype.and = mb.modtype;
609
610 if (mb.modtype & CGW_MOD_ID)
611 mod->modfunc[modidx++] = mod_and_id;
612
613 if (mb.modtype & CGW_MOD_DLC)
614 mod->modfunc[modidx++] = mod_and_dlc;
615
616 if (mb.modtype & CGW_MOD_DATA)
617 mod->modfunc[modidx++] = mod_and_data;
618 }
619
Thomas Graf732d35f2012-07-05 14:19:56 +0200620 if (tb[CGW_MOD_OR]) {
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000621 nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
622
623 canframecpy(&mod->modframe.or, &mb.cf);
624 mod->modtype.or = mb.modtype;
625
626 if (mb.modtype & CGW_MOD_ID)
627 mod->modfunc[modidx++] = mod_or_id;
628
629 if (mb.modtype & CGW_MOD_DLC)
630 mod->modfunc[modidx++] = mod_or_dlc;
631
632 if (mb.modtype & CGW_MOD_DATA)
633 mod->modfunc[modidx++] = mod_or_data;
634 }
635
Thomas Graf732d35f2012-07-05 14:19:56 +0200636 if (tb[CGW_MOD_XOR]) {
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000637 nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
638
639 canframecpy(&mod->modframe.xor, &mb.cf);
640 mod->modtype.xor = mb.modtype;
641
642 if (mb.modtype & CGW_MOD_ID)
643 mod->modfunc[modidx++] = mod_xor_id;
644
645 if (mb.modtype & CGW_MOD_DLC)
646 mod->modfunc[modidx++] = mod_xor_dlc;
647
648 if (mb.modtype & CGW_MOD_DATA)
649 mod->modfunc[modidx++] = mod_xor_data;
650 }
651
Thomas Graf732d35f2012-07-05 14:19:56 +0200652 if (tb[CGW_MOD_SET]) {
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000653 nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
654
655 canframecpy(&mod->modframe.set, &mb.cf);
656 mod->modtype.set = mb.modtype;
657
658 if (mb.modtype & CGW_MOD_ID)
659 mod->modfunc[modidx++] = mod_set_id;
660
661 if (mb.modtype & CGW_MOD_DLC)
662 mod->modfunc[modidx++] = mod_set_dlc;
663
664 if (mb.modtype & CGW_MOD_DATA)
665 mod->modfunc[modidx++] = mod_set_data;
666 }
667
668 /* check for checksum operations after CAN frame modifications */
669 if (modidx) {
670
Thomas Graf732d35f2012-07-05 14:19:56 +0200671 if (tb[CGW_CS_CRC8]) {
Thomas Graf5d91efa2012-07-05 14:19:58 +0200672 struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000673
674 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
675 c->result_idx);
676 if (err)
677 return err;
678
679 nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
680 CGW_CS_CRC8_LEN);
681
682 /*
683 * select dedicated processing function to reduce
684 * runtime operations in receive hot path.
685 */
686 if (c->from_idx < 0 || c->to_idx < 0 ||
687 c->result_idx < 0)
688 mod->csumfunc.crc8 = cgw_csum_crc8_rel;
689 else if (c->from_idx <= c->to_idx)
690 mod->csumfunc.crc8 = cgw_csum_crc8_pos;
691 else
692 mod->csumfunc.crc8 = cgw_csum_crc8_neg;
693 }
694
Thomas Graf732d35f2012-07-05 14:19:56 +0200695 if (tb[CGW_CS_XOR]) {
Thomas Graf5d91efa2012-07-05 14:19:58 +0200696 struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000697
698 err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
699 c->result_idx);
700 if (err)
701 return err;
702
703 nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
704 CGW_CS_XOR_LEN);
705
706 /*
707 * select dedicated processing function to reduce
708 * runtime operations in receive hot path.
709 */
710 if (c->from_idx < 0 || c->to_idx < 0 ||
711 c->result_idx < 0)
712 mod->csumfunc.xor = cgw_csum_xor_rel;
713 else if (c->from_idx <= c->to_idx)
714 mod->csumfunc.xor = cgw_csum_xor_pos;
715 else
716 mod->csumfunc.xor = cgw_csum_xor_neg;
717 }
718 }
719
720 if (gwtype == CGW_TYPE_CAN_CAN) {
721
722 /* check CGW_TYPE_CAN_CAN specific attributes */
723
724 struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
725 memset(ccgw, 0, sizeof(*ccgw));
726
727 /* check for can_filter in attributes */
Thomas Graf732d35f2012-07-05 14:19:56 +0200728 if (tb[CGW_FILTER])
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000729 nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
730 sizeof(struct can_filter));
731
732 err = -ENODEV;
733
734 /* specifying two interfaces is mandatory */
735 if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
736 return err;
737
Thomas Graf732d35f2012-07-05 14:19:56 +0200738 ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
739 ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000740
741 /* both indices set to 0 for flushing all routing entries */
742 if (!ccgw->src_idx && !ccgw->dst_idx)
743 return 0;
744
745 /* only one index set to 0 is an error */
746 if (!ccgw->src_idx || !ccgw->dst_idx)
747 return err;
748 }
749
750 /* add the checks for other gwtypes here */
751
752 return 0;
753}
754
755static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
756 void *arg)
757{
758 struct rtcanmsg *r;
759 struct cgw_job *gwj;
760 int err = 0;
761
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +0000762 if (!capable(CAP_NET_ADMIN))
763 return -EPERM;
764
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000765 if (nlmsg_len(nlh) < sizeof(*r))
766 return -EINVAL;
767
768 r = nlmsg_data(nlh);
769 if (r->can_family != AF_CAN)
770 return -EPFNOSUPPORT;
771
772 /* so far we only support CAN -> CAN routings */
773 if (r->gwtype != CGW_TYPE_CAN_CAN)
774 return -EINVAL;
775
776 gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
777 if (!gwj)
778 return -ENOMEM;
779
780 gwj->handled_frames = 0;
781 gwj->dropped_frames = 0;
782 gwj->flags = r->flags;
783 gwj->gwtype = r->gwtype;
784
785 err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw);
786 if (err < 0)
787 goto out;
788
789 err = -ENODEV;
790
791 /* ifindex == 0 is not allowed for job creation */
792 if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx)
793 goto out;
794
795 gwj->src.dev = dev_get_by_index(&init_net, gwj->ccgw.src_idx);
796
797 if (!gwj->src.dev)
798 goto out;
799
800 /* check for CAN netdev not using header_ops - see gw_rcv() */
801 if (gwj->src.dev->type != ARPHRD_CAN || gwj->src.dev->header_ops)
802 goto put_src_out;
803
804 gwj->dst.dev = dev_get_by_index(&init_net, gwj->ccgw.dst_idx);
805
806 if (!gwj->dst.dev)
807 goto put_src_out;
808
809 /* check for CAN netdev not using header_ops - see gw_rcv() */
810 if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops)
811 goto put_src_dst_out;
812
813 ASSERT_RTNL();
814
815 err = cgw_register_filter(gwj);
816 if (!err)
817 hlist_add_head_rcu(&gwj->list, &cgw_list);
818
819put_src_dst_out:
820 dev_put(gwj->dst.dev);
821put_src_out:
822 dev_put(gwj->src.dev);
823out:
824 if (err)
825 kmem_cache_free(cgw_cache, gwj);
826
827 return err;
828}
829
830static void cgw_remove_all_jobs(void)
831{
832 struct cgw_job *gwj = NULL;
833 struct hlist_node *n, *nx;
834
835 ASSERT_RTNL();
836
837 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
838 hlist_del(&gwj->list);
839 cgw_unregister_filter(gwj);
840 kfree(gwj);
841 }
842}
843
844static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
845{
846 struct cgw_job *gwj = NULL;
847 struct hlist_node *n, *nx;
848 struct rtcanmsg *r;
849 struct cf_mod mod;
850 struct can_can_gw ccgw;
851 int err = 0;
852
Eric W. Biedermandfc47ef2012-11-16 03:03:00 +0000853 if (!capable(CAP_NET_ADMIN))
854 return -EPERM;
855
Oliver Hartkoppc1aabdf2011-09-01 04:23:23 +0000856 if (nlmsg_len(nlh) < sizeof(*r))
857 return -EINVAL;
858
859 r = nlmsg_data(nlh);
860 if (r->can_family != AF_CAN)
861 return -EPFNOSUPPORT;
862
863 /* so far we only support CAN -> CAN routings */
864 if (r->gwtype != CGW_TYPE_CAN_CAN)
865 return -EINVAL;
866
867 err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw);
868 if (err < 0)
869 return err;
870
871 /* two interface indices both set to 0 => remove all entries */
872 if (!ccgw.src_idx && !ccgw.dst_idx) {
873 cgw_remove_all_jobs();
874 return 0;
875 }
876
877 err = -EINVAL;
878
879 ASSERT_RTNL();
880
881 /* remove only the first matching entry */
882 hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
883
884 if (gwj->flags != r->flags)
885 continue;
886
887 if (memcmp(&gwj->mod, &mod, sizeof(mod)))
888 continue;
889
890 /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
891 if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
892 continue;
893
894 hlist_del(&gwj->list);
895 cgw_unregister_filter(gwj);
896 kfree(gwj);
897 err = 0;
898 break;
899 }
900
901 return err;
902}
903
904static __init int cgw_module_init(void)
905{
906 printk(banner);
907
908 cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
909 0, 0, NULL);
910
911 if (!cgw_cache)
912 return -ENOMEM;
913
914 /* set notifier */
915 notifier.notifier_call = cgw_notifier;
916 register_netdevice_notifier(&notifier);
917
918 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
919 unregister_netdevice_notifier(&notifier);
920 kmem_cache_destroy(cgw_cache);
921 return -ENOBUFS;
922 }
923
924 /* Only the first call to __rtnl_register can fail */
925 __rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, NULL);
926 __rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, NULL);
927
928 return 0;
929}
930
931static __exit void cgw_module_exit(void)
932{
933 rtnl_unregister_all(PF_CAN);
934
935 unregister_netdevice_notifier(&notifier);
936
937 rtnl_lock();
938 cgw_remove_all_jobs();
939 rtnl_unlock();
940
941 rcu_barrier(); /* Wait for completion of call_rcu()'s */
942
943 kmem_cache_destroy(cgw_cache);
944}
945
946module_init(cgw_module_init);
947module_exit(cgw_module_exit);