blob: 907dc871fac8bcf5e6d3cbce2c79f9a53706ccfa [file] [log] [blame]
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001/*
2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3 *
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 * Send feedback to <socketcan-users@lists.berlios.de>
41 *
42 */
43
44#include <linux/module.h>
45#include <linux/init.h>
Oliver Hartkopp73e87e02008-04-15 19:29:14 -070046#include <linux/hrtimer.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080047#include <linux/list.h>
48#include <linux/proc_fs.h>
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +000049#include <linux/seq_file.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080050#include <linux/uio.h>
51#include <linux/net.h>
52#include <linux/netdevice.h>
53#include <linux/socket.h>
54#include <linux/if_arp.h>
55#include <linux/skbuff.h>
56#include <linux/can.h>
57#include <linux/can/core.h>
58#include <linux/can/bcm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090059#include <linux/slab.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080060#include <net/sock.h>
61#include <net/net_namespace.h>
62
63/* use of last_frames[index].can_dlc */
64#define RX_RECV 0x40 /* received data for this element */
65#define RX_THR 0x80 /* element not been sent due to throttle feature */
66#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
67
68/* get best masking value for can_rx_register() for a given single can_id */
Oliver Hartkoppd253eee2008-12-03 15:52:35 -080069#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
70 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
71 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080072
Oliver Hartkoppd253eee2008-12-03 15:52:35 -080073#define CAN_BCM_VERSION CAN_VERSION
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080074static __initdata const char banner[] = KERN_INFO
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -080075 "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080076
77MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
78MODULE_LICENSE("Dual BSD/GPL");
79MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
Lothar Waßmannb13bb2e2009-07-14 23:12:25 +000080MODULE_ALIAS("can-proto-2");
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080081
82/* easy access to can_frame payload */
83static inline u64 GET_U64(const struct can_frame *cp)
84{
85 return *(u64 *)cp->data;
86}
87
88struct bcm_op {
89 struct list_head list;
90 int ifindex;
91 canid_t can_id;
92 int flags;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080093 unsigned long frames_abs, frames_filtered;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080094 struct timeval ival1, ival2;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -070095 struct hrtimer timer, thrtimer;
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -080096 struct tasklet_struct tsklet, thrtsklet;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -070097 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080098 int rx_ifindex;
99 int count;
100 int nframes;
101 int currframe;
102 struct can_frame *frames;
103 struct can_frame *last_frames;
104 struct can_frame sframe;
105 struct can_frame last_sframe;
106 struct sock *sk;
107 struct net_device *rx_reg_dev;
108};
109
110static struct proc_dir_entry *proc_dir;
111
112struct bcm_sock {
113 struct sock sk;
114 int bound;
115 int ifindex;
116 struct notifier_block notifier;
117 struct list_head rx_ops;
118 struct list_head tx_ops;
119 unsigned long dropped_usr_msgs;
120 struct proc_dir_entry *bcm_proc_read;
121 char procname [9]; /* pointer printed in ASCII with \0 */
122};
123
124static inline struct bcm_sock *bcm_sk(const struct sock *sk)
125{
126 return (struct bcm_sock *)sk;
127}
128
129#define CFSIZ sizeof(struct can_frame)
130#define OPSIZ sizeof(struct bcm_op)
131#define MHSIZ sizeof(struct bcm_msg_head)
132
133/*
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800134 * procfs functions
135 */
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000136static char *bcm_proc_getifname(char *result, int ifindex)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800137{
138 struct net_device *dev;
139
140 if (!ifindex)
141 return "any";
142
stephen hemmingerff879eb2009-11-10 07:54:56 +0000143 rcu_read_lock();
144 dev = dev_get_by_index_rcu(&init_net, ifindex);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800145 if (dev)
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000146 strcpy(result, dev->name);
147 else
148 strcpy(result, "???");
stephen hemmingerff879eb2009-11-10 07:54:56 +0000149 rcu_read_unlock();
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800150
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000151 return result;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800152}
153
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000154static int bcm_proc_show(struct seq_file *m, void *v)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800155{
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000156 char ifname[IFNAMSIZ];
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000157 struct sock *sk = (struct sock *)m->private;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800158 struct bcm_sock *bo = bcm_sk(sk);
159 struct bcm_op *op;
160
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000161 seq_printf(m, ">>> socket %p", sk->sk_socket);
162 seq_printf(m, " / sk %p", sk);
163 seq_printf(m, " / bo %p", bo);
164 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000165 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000166 seq_printf(m, " <<<\n");
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800167
168 list_for_each_entry(op, &bo->rx_ops, list) {
169
170 unsigned long reduction;
171
172 /* print only active entries & prevent division by zero */
173 if (!op->frames_abs)
174 continue;
175
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000176 seq_printf(m, "rx_op: %03X %-5s ",
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000177 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000178 seq_printf(m, "[%d]%c ", op->nframes,
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800179 (op->flags & RX_CHECK_DLC)?'d':' ');
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700180 if (op->kt_ival1.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000181 seq_printf(m, "timeo=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700182 (long long)
183 ktime_to_us(op->kt_ival1));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800184
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700185 if (op->kt_ival2.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000186 seq_printf(m, "thr=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700187 (long long)
188 ktime_to_us(op->kt_ival2));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800189
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000190 seq_printf(m, "# recv %ld (%ld) => reduction: ",
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800191 op->frames_filtered, op->frames_abs);
192
193 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
194
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000195 seq_printf(m, "%s%ld%%\n",
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800196 (reduction == 100)?"near ":"", reduction);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800197 }
198
199 list_for_each_entry(op, &bo->tx_ops, list) {
200
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000201 seq_printf(m, "tx_op: %03X %s [%d] ",
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000202 op->can_id,
203 bcm_proc_getifname(ifname, op->ifindex),
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800204 op->nframes);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800205
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700206 if (op->kt_ival1.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000207 seq_printf(m, "t1=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700208 (long long) ktime_to_us(op->kt_ival1));
209
210 if (op->kt_ival2.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000211 seq_printf(m, "t2=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700212 (long long) ktime_to_us(op->kt_ival2));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800213
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000214 seq_printf(m, "# sent %ld\n", op->frames_abs);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800215 }
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000216 seq_putc(m, '\n');
217 return 0;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800218}
219
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000220static int bcm_proc_open(struct inode *inode, struct file *file)
221{
222 return single_open(file, bcm_proc_show, PDE(inode)->data);
223}
224
225static const struct file_operations bcm_proc_fops = {
226 .owner = THIS_MODULE,
227 .open = bcm_proc_open,
228 .read = seq_read,
229 .llseek = seq_lseek,
230 .release = single_release,
231};
232
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800233/*
234 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
235 * of the given bcm tx op
236 */
237static void bcm_can_tx(struct bcm_op *op)
238{
239 struct sk_buff *skb;
240 struct net_device *dev;
241 struct can_frame *cf = &op->frames[op->currframe];
242
243 /* no target device? => exit */
244 if (!op->ifindex)
245 return;
246
247 dev = dev_get_by_index(&init_net, op->ifindex);
248 if (!dev) {
249 /* RFC: should this bcm_op remove itself here? */
250 return;
251 }
252
253 skb = alloc_skb(CFSIZ, gfp_any());
254 if (!skb)
255 goto out;
256
257 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
258
259 /* send with loopback */
260 skb->dev = dev;
261 skb->sk = op->sk;
262 can_send(skb, 1);
263
264 /* update statistics */
265 op->currframe++;
266 op->frames_abs++;
267
268 /* reached last frame? */
269 if (op->currframe >= op->nframes)
270 op->currframe = 0;
271 out:
272 dev_put(dev);
273}
274
275/*
276 * bcm_send_to_user - send a BCM message to the userspace
277 * (consisting of bcm_msg_head + x CAN frames)
278 */
279static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
280 struct can_frame *frames, int has_timestamp)
281{
282 struct sk_buff *skb;
283 struct can_frame *firstframe;
284 struct sockaddr_can *addr;
285 struct sock *sk = op->sk;
286 int datalen = head->nframes * CFSIZ;
287 int err;
288
289 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
290 if (!skb)
291 return;
292
293 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
294
295 if (head->nframes) {
296 /* can_frames starting here */
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -0700297 firstframe = (struct can_frame *)skb_tail_pointer(skb);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800298
299 memcpy(skb_put(skb, datalen), frames, datalen);
300
301 /*
302 * the BCM uses the can_dlc-element of the can_frame
303 * structure for internal purposes. This is only
304 * relevant for updates that are generated by the
305 * BCM, where nframes is 1
306 */
307 if (head->nframes == 1)
308 firstframe->can_dlc &= BCM_CAN_DLC_MASK;
309 }
310
311 if (has_timestamp) {
312 /* restore rx timestamp */
313 skb->tstamp = op->rx_stamp;
314 }
315
316 /*
317 * Put the datagram to the queue so that bcm_recvmsg() can
318 * get it from there. We need to pass the interface index to
319 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
320 * containing the interface index.
321 */
322
323 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
324 addr = (struct sockaddr_can *)skb->cb;
325 memset(addr, 0, sizeof(*addr));
326 addr->can_family = AF_CAN;
327 addr->can_ifindex = op->rx_ifindex;
328
329 err = sock_queue_rcv_skb(sk, skb);
330 if (err < 0) {
331 struct bcm_sock *bo = bcm_sk(sk);
332
333 kfree_skb(skb);
334 /* don't care about overflows in this statistic */
335 bo->dropped_usr_msgs++;
336 }
337}
338
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800339static void bcm_tx_timeout_tsklet(unsigned long data)
340{
341 struct bcm_op *op = (struct bcm_op *)data;
342 struct bcm_msg_head msg_head;
343
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800344 if (op->kt_ival1.tv64 && (op->count > 0)) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800345
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800346 op->count--;
347 if (!op->count && (op->flags & TX_COUNTEVT)) {
348
349 /* create notification to user */
350 msg_head.opcode = TX_EXPIRED;
351 msg_head.flags = op->flags;
352 msg_head.count = op->count;
353 msg_head.ival1 = op->ival1;
354 msg_head.ival2 = op->ival2;
355 msg_head.can_id = op->can_id;
356 msg_head.nframes = 0;
357
358 bcm_send_to_user(op, &msg_head, NULL, 0);
359 }
360 }
361
362 if (op->kt_ival1.tv64 && (op->count > 0)) {
363
364 /* send (next) frame */
365 bcm_can_tx(op);
366 hrtimer_start(&op->timer,
367 ktime_add(ktime_get(), op->kt_ival1),
368 HRTIMER_MODE_ABS);
369
370 } else {
371 if (op->kt_ival2.tv64) {
372
373 /* send (next) frame */
374 bcm_can_tx(op);
375 hrtimer_start(&op->timer,
376 ktime_add(ktime_get(), op->kt_ival2),
377 HRTIMER_MODE_ABS);
378 }
379 }
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800380}
381
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800382/*
383 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
384 */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700385static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800386{
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700387 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800388
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800389 tasklet_schedule(&op->tsklet);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800390
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800391 return HRTIMER_NORESTART;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800392}
393
394/*
395 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
396 */
397static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
398{
399 struct bcm_msg_head head;
400
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800401 /* update statistics */
402 op->frames_filtered++;
403
404 /* prevent statistics overflow */
405 if (op->frames_filtered > ULONG_MAX/100)
406 op->frames_filtered = op->frames_abs = 0;
407
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800408 /* this element is not throttled anymore */
409 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
410
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800411 head.opcode = RX_CHANGED;
412 head.flags = op->flags;
413 head.count = op->count;
414 head.ival1 = op->ival1;
415 head.ival2 = op->ival2;
416 head.can_id = op->can_id;
417 head.nframes = 1;
418
419 bcm_send_to_user(op, &head, data, 1);
420}
421
422/*
423 * bcm_rx_update_and_send - process a detected relevant receive content change
424 * 1. update the last received data
425 * 2. send a notification to the user (if possible)
426 */
427static void bcm_rx_update_and_send(struct bcm_op *op,
428 struct can_frame *lastdata,
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800429 const struct can_frame *rxdata)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800430{
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800431 memcpy(lastdata, rxdata, CFSIZ);
432
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800433 /* mark as used and throttled by default */
434 lastdata->can_dlc |= (RX_RECV|RX_THR);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800435
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800436 /* throtteling mode inactive ? */
437 if (!op->kt_ival2.tv64) {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800438 /* send RX_CHANGED to the user immediately */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800439 bcm_rx_changed(op, lastdata);
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700440 return;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800441 }
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700442
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800443 /* with active throttling timer we are just done here */
444 if (hrtimer_active(&op->thrtimer))
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700445 return;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700446
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800447 /* first receiption with enabled throttling mode */
448 if (!op->kt_lastmsg.tv64)
449 goto rx_changed_settime;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700450
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800451 /* got a second frame inside a potential throttle period? */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700452 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
453 ktime_to_us(op->kt_ival2)) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800454 /* do not send the saved data - only start throttle timer */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700455 hrtimer_start(&op->thrtimer,
456 ktime_add(op->kt_lastmsg, op->kt_ival2),
457 HRTIMER_MODE_ABS);
458 return;
459 }
460
461 /* the gap was that big, that throttling was not needed here */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800462rx_changed_settime:
463 bcm_rx_changed(op, lastdata);
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700464 op->kt_lastmsg = ktime_get();
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800465}
466
467/*
468 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
469 * received data stored in op->last_frames[]
470 */
471static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800472 const struct can_frame *rxdata)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800473{
474 /*
475 * no one uses the MSBs of can_dlc for comparation,
476 * so we use it here to detect the first time of reception
477 */
478
479 if (!(op->last_frames[index].can_dlc & RX_RECV)) {
480 /* received data for the first time => send update to user */
481 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
482 return;
483 }
484
485 /* do a real check in can_frame data section */
486
487 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
488 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
489 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
490 return;
491 }
492
493 if (op->flags & RX_CHECK_DLC) {
494 /* do a real check in can_frame dlc */
495 if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
496 BCM_CAN_DLC_MASK)) {
497 bcm_rx_update_and_send(op, &op->last_frames[index],
498 rxdata);
499 return;
500 }
501 }
502}
503
504/*
505 * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
506 */
507static void bcm_rx_starttimer(struct bcm_op *op)
508{
509 if (op->flags & RX_NO_AUTOTIMER)
510 return;
511
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700512 if (op->kt_ival1.tv64)
513 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800514}
515
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800516static void bcm_rx_timeout_tsklet(unsigned long data)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800517{
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800518 struct bcm_op *op = (struct bcm_op *)data;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800519 struct bcm_msg_head msg_head;
520
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800521 /* create notification to user */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800522 msg_head.opcode = RX_TIMEOUT;
523 msg_head.flags = op->flags;
524 msg_head.count = op->count;
525 msg_head.ival1 = op->ival1;
526 msg_head.ival2 = op->ival2;
527 msg_head.can_id = op->can_id;
528 msg_head.nframes = 0;
529
530 bcm_send_to_user(op, &msg_head, NULL, 0);
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800531}
532
533/*
534 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
535 */
536static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
537{
538 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
539
540 /* schedule before NET_RX_SOFTIRQ */
541 tasklet_hi_schedule(&op->tsklet);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800542
543 /* no restart of the timer is done here! */
544
545 /* if user wants to be informed, when cyclic CAN-Messages come back */
546 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
547 /* clear received can_frames to indicate 'nothing received' */
548 memset(op->last_frames, 0, op->nframes * CFSIZ);
549 }
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700550
551 return HRTIMER_NORESTART;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800552}
553
554/*
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800555 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800556 */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800557static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
558{
559 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
560 if (update)
561 bcm_rx_changed(op, &op->last_frames[index]);
562 return 1;
563 }
564 return 0;
565}
566
567/*
568 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
569 *
570 * update == 0 : just check if throttled data is available (any irq context)
571 * update == 1 : check and send throttled data to userspace (soft_irq context)
572 */
573static int bcm_rx_thr_flush(struct bcm_op *op, int update)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800574{
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700575 int updated = 0;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800576
577 if (op->nframes > 1) {
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700578 int i;
579
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800580 /* for MUX filter we start at index 1 */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800581 for (i = 1; i < op->nframes; i++)
582 updated += bcm_rx_do_flush(op, update, i);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800583
584 } else {
585 /* for RX_FILTER_ID and simple filter */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800586 updated += bcm_rx_do_flush(op, update, 0);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800587 }
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700588
589 return updated;
590}
591
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800592static void bcm_rx_thr_tsklet(unsigned long data)
593{
594 struct bcm_op *op = (struct bcm_op *)data;
595
596 /* push the changed data to the userspace */
597 bcm_rx_thr_flush(op, 1);
598}
599
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700600/*
601 * bcm_rx_thr_handler - the time for blocked content updates is over now:
602 * Check for throttled data and send it to the userspace
603 */
604static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
605{
606 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
607
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800608 tasklet_schedule(&op->thrtsklet);
609
610 if (bcm_rx_thr_flush(op, 0)) {
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700611 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
612 return HRTIMER_RESTART;
613 } else {
614 /* rearm throttle handling */
615 op->kt_lastmsg = ktime_set(0, 0);
616 return HRTIMER_NORESTART;
617 }
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800618}
619
620/*
621 * bcm_rx_handler - handle a CAN frame receiption
622 */
623static void bcm_rx_handler(struct sk_buff *skb, void *data)
624{
625 struct bcm_op *op = (struct bcm_op *)data;
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800626 const struct can_frame *rxframe = (struct can_frame *)skb->data;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800627 int i;
628
629 /* disable timeout */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700630 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800631
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800632 if (op->can_id != rxframe->can_id)
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800633 return;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800634
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800635 /* save rx timestamp */
636 op->rx_stamp = skb->tstamp;
637 /* save originator for recvfrom() */
638 op->rx_ifindex = skb->dev->ifindex;
639 /* update statistics */
640 op->frames_abs++;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800641
642 if (op->flags & RX_RTR_FRAME) {
643 /* send reply for RTR-request (placed in op->frames[0]) */
644 bcm_can_tx(op);
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800645 return;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800646 }
647
648 if (op->flags & RX_FILTER_ID) {
649 /* the easiest case */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800650 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800651 goto rx_starttimer;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800652 }
653
654 if (op->nframes == 1) {
655 /* simple compare with index 0 */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800656 bcm_rx_cmp_to_index(op, 0, rxframe);
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800657 goto rx_starttimer;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800658 }
659
660 if (op->nframes > 1) {
661 /*
662 * multiplex compare
663 *
664 * find the first multiplex mask that fits.
665 * Remark: The MUX-mask is stored in index 0
666 */
667
668 for (i = 1; i < op->nframes; i++) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800669 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800670 (GET_U64(&op->frames[0]) &
671 GET_U64(&op->frames[i]))) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800672 bcm_rx_cmp_to_index(op, i, rxframe);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800673 break;
674 }
675 }
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800676 }
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800677
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800678rx_starttimer:
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800679 bcm_rx_starttimer(op);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800680}
681
682/*
683 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
684 */
685static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
686 int ifindex)
687{
688 struct bcm_op *op;
689
690 list_for_each_entry(op, ops, list) {
691 if ((op->can_id == can_id) && (op->ifindex == ifindex))
692 return op;
693 }
694
695 return NULL;
696}
697
698static void bcm_remove_op(struct bcm_op *op)
699{
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700700 hrtimer_cancel(&op->timer);
701 hrtimer_cancel(&op->thrtimer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800702
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800703 if (op->tsklet.func)
704 tasklet_kill(&op->tsklet);
705
706 if (op->thrtsklet.func)
707 tasklet_kill(&op->thrtsklet);
708
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800709 if ((op->frames) && (op->frames != &op->sframe))
710 kfree(op->frames);
711
712 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
713 kfree(op->last_frames);
714
715 kfree(op);
716
717 return;
718}
719
720static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
721{
722 if (op->rx_reg_dev == dev) {
723 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
724 bcm_rx_handler, op);
725
726 /* mark as removed subscription */
727 op->rx_reg_dev = NULL;
728 } else
729 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
730 "mismatch %p %p\n", op->rx_reg_dev, dev);
731}
732
733/*
734 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
735 */
736static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
737{
738 struct bcm_op *op, *n;
739
740 list_for_each_entry_safe(op, n, ops, list) {
741 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
742
743 /*
744 * Don't care if we're bound or not (due to netdev
745 * problems) can_rx_unregister() is always a save
746 * thing to do here.
747 */
748 if (op->ifindex) {
749 /*
750 * Only remove subscriptions that had not
751 * been removed due to NETDEV_UNREGISTER
752 * in bcm_notifier()
753 */
754 if (op->rx_reg_dev) {
755 struct net_device *dev;
756
757 dev = dev_get_by_index(&init_net,
758 op->ifindex);
759 if (dev) {
760 bcm_rx_unreg(dev, op);
761 dev_put(dev);
762 }
763 }
764 } else
765 can_rx_unregister(NULL, op->can_id,
766 REGMASK(op->can_id),
767 bcm_rx_handler, op);
768
769 list_del(&op->list);
770 bcm_remove_op(op);
771 return 1; /* done */
772 }
773 }
774
775 return 0; /* not found */
776}
777
778/*
779 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
780 */
781static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
782{
783 struct bcm_op *op, *n;
784
785 list_for_each_entry_safe(op, n, ops, list) {
786 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
787 list_del(&op->list);
788 bcm_remove_op(op);
789 return 1; /* done */
790 }
791 }
792
793 return 0; /* not found */
794}
795
796/*
797 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
798 */
799static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
800 int ifindex)
801{
802 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
803
804 if (!op)
805 return -EINVAL;
806
807 /* put current values into msg_head */
808 msg_head->flags = op->flags;
809 msg_head->count = op->count;
810 msg_head->ival1 = op->ival1;
811 msg_head->ival2 = op->ival2;
812 msg_head->nframes = op->nframes;
813
814 bcm_send_to_user(op, msg_head, op->frames, 0);
815
816 return MHSIZ;
817}
818
819/*
820 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
821 */
822static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
823 int ifindex, struct sock *sk)
824{
825 struct bcm_sock *bo = bcm_sk(sk);
826 struct bcm_op *op;
827 int i, err;
828
829 /* we need a real device to send frames */
830 if (!ifindex)
831 return -ENODEV;
832
833 /* we need at least one can_frame */
834 if (msg_head->nframes < 1)
835 return -EINVAL;
836
837 /* check the given can_id */
838 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
839
840 if (op) {
841 /* update existing BCM operation */
842
843 /*
844 * Do we need more space for the can_frames than currently
845 * allocated? -> This is a _really_ unusual use-case and
846 * therefore (complexity / locking) it is not supported.
847 */
848 if (msg_head->nframes > op->nframes)
849 return -E2BIG;
850
851 /* update can_frames content */
852 for (i = 0; i < msg_head->nframes; i++) {
853 err = memcpy_fromiovec((u8 *)&op->frames[i],
854 msg->msg_iov, CFSIZ);
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -0700855
856 if (op->frames[i].can_dlc > 8)
857 err = -EINVAL;
858
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800859 if (err < 0)
860 return err;
861
862 if (msg_head->flags & TX_CP_CAN_ID) {
863 /* copy can_id into frame */
864 op->frames[i].can_id = msg_head->can_id;
865 }
866 }
867
868 } else {
869 /* insert new BCM operation for the given can_id */
870
871 op = kzalloc(OPSIZ, GFP_KERNEL);
872 if (!op)
873 return -ENOMEM;
874
875 op->can_id = msg_head->can_id;
876
877 /* create array for can_frames and copy the data */
878 if (msg_head->nframes > 1) {
879 op->frames = kmalloc(msg_head->nframes * CFSIZ,
880 GFP_KERNEL);
881 if (!op->frames) {
882 kfree(op);
883 return -ENOMEM;
884 }
885 } else
886 op->frames = &op->sframe;
887
888 for (i = 0; i < msg_head->nframes; i++) {
889 err = memcpy_fromiovec((u8 *)&op->frames[i],
890 msg->msg_iov, CFSIZ);
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -0700891
892 if (op->frames[i].can_dlc > 8)
893 err = -EINVAL;
894
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800895 if (err < 0) {
896 if (op->frames != &op->sframe)
897 kfree(op->frames);
898 kfree(op);
899 return err;
900 }
901
902 if (msg_head->flags & TX_CP_CAN_ID) {
903 /* copy can_id into frame */
904 op->frames[i].can_id = msg_head->can_id;
905 }
906 }
907
908 /* tx_ops never compare with previous received messages */
909 op->last_frames = NULL;
910
911 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
912 op->sk = sk;
913 op->ifindex = ifindex;
914
915 /* initialize uninitialized (kzalloc) structure */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700916 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
917 op->timer.function = bcm_tx_timeout_handler;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800918
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800919 /* initialize tasklet for tx countevent notification */
920 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
921 (unsigned long) op);
922
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800923 /* currently unused in tx_ops */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700924 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800925
926 /* add this bcm_op to the list of the tx_ops */
927 list_add(&op->list, &bo->tx_ops);
928
929 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
930
931 if (op->nframes != msg_head->nframes) {
932 op->nframes = msg_head->nframes;
933 /* start multiple frame transmission with index 0 */
934 op->currframe = 0;
935 }
936
937 /* check flags */
938
939 op->flags = msg_head->flags;
940
941 if (op->flags & TX_RESET_MULTI_IDX) {
942 /* start multiple frame transmission with index 0 */
943 op->currframe = 0;
944 }
945
946 if (op->flags & SETTIMER) {
947 /* set timer values */
948 op->count = msg_head->count;
949 op->ival1 = msg_head->ival1;
950 op->ival2 = msg_head->ival2;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700951 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
952 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800953
954 /* disable an active timer due to zero values? */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700955 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
956 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800957 }
958
959 if ((op->flags & STARTTIMER) &&
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700960 ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800961
962 /* spec: send can_frame when starting timer */
963 op->flags |= TX_ANNOUNCE;
964
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700965 if (op->kt_ival1.tv64 && (op->count > 0)) {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800966 /* op->count-- is done in bcm_tx_timeout_handler */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700967 hrtimer_start(&op->timer, op->kt_ival1,
968 HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800969 } else
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700970 hrtimer_start(&op->timer, op->kt_ival2,
971 HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800972 }
973
974 if (op->flags & TX_ANNOUNCE)
975 bcm_can_tx(op);
976
977 return msg_head->nframes * CFSIZ + MHSIZ;
978}
979
980/*
981 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
982 */
983static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
984 int ifindex, struct sock *sk)
985{
986 struct bcm_sock *bo = bcm_sk(sk);
987 struct bcm_op *op;
988 int do_rx_register;
989 int err = 0;
990
991 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
992 /* be robust against wrong usage ... */
993 msg_head->flags |= RX_FILTER_ID;
994 /* ignore trailing garbage */
995 msg_head->nframes = 0;
996 }
997
998 if ((msg_head->flags & RX_RTR_FRAME) &&
999 ((msg_head->nframes != 1) ||
1000 (!(msg_head->can_id & CAN_RTR_FLAG))))
1001 return -EINVAL;
1002
1003 /* check the given can_id */
1004 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1005 if (op) {
1006 /* update existing BCM operation */
1007
1008 /*
1009 * Do we need more space for the can_frames than currently
1010 * allocated? -> This is a _really_ unusual use-case and
1011 * therefore (complexity / locking) it is not supported.
1012 */
1013 if (msg_head->nframes > op->nframes)
1014 return -E2BIG;
1015
1016 if (msg_head->nframes) {
1017 /* update can_frames content */
1018 err = memcpy_fromiovec((u8 *)op->frames,
1019 msg->msg_iov,
1020 msg_head->nframes * CFSIZ);
1021 if (err < 0)
1022 return err;
1023
1024 /* clear last_frames to indicate 'nothing received' */
1025 memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1026 }
1027
1028 op->nframes = msg_head->nframes;
1029
1030 /* Only an update -> do not call can_rx_register() */
1031 do_rx_register = 0;
1032
1033 } else {
1034 /* insert new BCM operation for the given can_id */
1035 op = kzalloc(OPSIZ, GFP_KERNEL);
1036 if (!op)
1037 return -ENOMEM;
1038
1039 op->can_id = msg_head->can_id;
1040 op->nframes = msg_head->nframes;
1041
1042 if (msg_head->nframes > 1) {
1043 /* create array for can_frames and copy the data */
1044 op->frames = kmalloc(msg_head->nframes * CFSIZ,
1045 GFP_KERNEL);
1046 if (!op->frames) {
1047 kfree(op);
1048 return -ENOMEM;
1049 }
1050
1051 /* create and init array for received can_frames */
1052 op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1053 GFP_KERNEL);
1054 if (!op->last_frames) {
1055 kfree(op->frames);
1056 kfree(op);
1057 return -ENOMEM;
1058 }
1059
1060 } else {
1061 op->frames = &op->sframe;
1062 op->last_frames = &op->last_sframe;
1063 }
1064
1065 if (msg_head->nframes) {
1066 err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
1067 msg_head->nframes * CFSIZ);
1068 if (err < 0) {
1069 if (op->frames != &op->sframe)
1070 kfree(op->frames);
1071 if (op->last_frames != &op->last_sframe)
1072 kfree(op->last_frames);
1073 kfree(op);
1074 return err;
1075 }
1076 }
1077
1078 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1079 op->sk = sk;
1080 op->ifindex = ifindex;
1081
1082 /* initialize uninitialized (kzalloc) structure */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001083 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1084 op->timer.function = bcm_rx_timeout_handler;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001085
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -08001086 /* initialize tasklet for rx timeout notification */
1087 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1088 (unsigned long) op);
1089
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001090 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1091 op->thrtimer.function = bcm_rx_thr_handler;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001092
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -08001093 /* initialize tasklet for rx throttle handling */
1094 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1095 (unsigned long) op);
1096
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001097 /* add this bcm_op to the list of the rx_ops */
1098 list_add(&op->list, &bo->rx_ops);
1099
1100 /* call can_rx_register() */
1101 do_rx_register = 1;
1102
1103 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1104
1105 /* check flags */
1106 op->flags = msg_head->flags;
1107
1108 if (op->flags & RX_RTR_FRAME) {
1109
1110 /* no timers in RTR-mode */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001111 hrtimer_cancel(&op->thrtimer);
1112 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001113
1114 /*
1115 * funny feature in RX(!)_SETUP only for RTR-mode:
1116 * copy can_id into frame BUT without RTR-flag to
1117 * prevent a full-load-loopback-test ... ;-]
1118 */
1119 if ((op->flags & TX_CP_CAN_ID) ||
1120 (op->frames[0].can_id == op->can_id))
1121 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1122
1123 } else {
1124 if (op->flags & SETTIMER) {
1125
1126 /* set timer value */
1127 op->ival1 = msg_head->ival1;
1128 op->ival2 = msg_head->ival2;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001129 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1130 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001131
1132 /* disable an active timer due to zero value? */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001133 if (!op->kt_ival1.tv64)
1134 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001135
1136 /*
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001137 * In any case cancel the throttle timer, flush
1138 * potentially blocked msgs and reset throttle handling
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001139 */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001140 op->kt_lastmsg = ktime_set(0, 0);
1141 hrtimer_cancel(&op->thrtimer);
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -08001142 bcm_rx_thr_flush(op, 1);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001143 }
1144
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001145 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1146 hrtimer_start(&op->timer, op->kt_ival1,
1147 HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001148 }
1149
1150 /* now we can register for can_ids, if we added a new bcm_op */
1151 if (do_rx_register) {
1152 if (ifindex) {
1153 struct net_device *dev;
1154
1155 dev = dev_get_by_index(&init_net, ifindex);
1156 if (dev) {
1157 err = can_rx_register(dev, op->can_id,
1158 REGMASK(op->can_id),
1159 bcm_rx_handler, op,
1160 "bcm");
1161
1162 op->rx_reg_dev = dev;
1163 dev_put(dev);
1164 }
1165
1166 } else
1167 err = can_rx_register(NULL, op->can_id,
1168 REGMASK(op->can_id),
1169 bcm_rx_handler, op, "bcm");
1170 if (err) {
1171 /* this bcm rx op is broken -> remove it */
1172 list_del(&op->list);
1173 bcm_remove_op(op);
1174 return err;
1175 }
1176 }
1177
1178 return msg_head->nframes * CFSIZ + MHSIZ;
1179}
1180
1181/*
1182 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1183 */
1184static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1185{
1186 struct sk_buff *skb;
1187 struct net_device *dev;
1188 int err;
1189
1190 /* we need a real device to send frames */
1191 if (!ifindex)
1192 return -ENODEV;
1193
1194 skb = alloc_skb(CFSIZ, GFP_KERNEL);
1195
1196 if (!skb)
1197 return -ENOMEM;
1198
1199 err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1200 if (err < 0) {
1201 kfree_skb(skb);
1202 return err;
1203 }
1204
1205 dev = dev_get_by_index(&init_net, ifindex);
1206 if (!dev) {
1207 kfree_skb(skb);
1208 return -ENODEV;
1209 }
1210
1211 skb->dev = dev;
1212 skb->sk = sk;
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001213 err = can_send(skb, 1); /* send with loopback */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001214 dev_put(dev);
1215
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001216 if (err)
1217 return err;
1218
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001219 return CFSIZ + MHSIZ;
1220}
1221
1222/*
1223 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1224 */
1225static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1226 struct msghdr *msg, size_t size)
1227{
1228 struct sock *sk = sock->sk;
1229 struct bcm_sock *bo = bcm_sk(sk);
1230 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1231 struct bcm_msg_head msg_head;
1232 int ret; /* read bytes or error codes as return value */
1233
1234 if (!bo->bound)
1235 return -ENOTCONN;
1236
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001237 /* check for valid message length from userspace */
1238 if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1239 return -EINVAL;
1240
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001241 /* check for alternative ifindex for this bcm_op */
1242
1243 if (!ifindex && msg->msg_name) {
1244 /* no bound device as default => check msg_name */
1245 struct sockaddr_can *addr =
1246 (struct sockaddr_can *)msg->msg_name;
1247
1248 if (addr->can_family != AF_CAN)
1249 return -EINVAL;
1250
1251 /* ifindex from sendto() */
1252 ifindex = addr->can_ifindex;
1253
1254 if (ifindex) {
1255 struct net_device *dev;
1256
1257 dev = dev_get_by_index(&init_net, ifindex);
1258 if (!dev)
1259 return -ENODEV;
1260
1261 if (dev->type != ARPHRD_CAN) {
1262 dev_put(dev);
1263 return -ENODEV;
1264 }
1265
1266 dev_put(dev);
1267 }
1268 }
1269
1270 /* read message head information */
1271
1272 ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
1273 if (ret < 0)
1274 return ret;
1275
1276 lock_sock(sk);
1277
1278 switch (msg_head.opcode) {
1279
1280 case TX_SETUP:
1281 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1282 break;
1283
1284 case RX_SETUP:
1285 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1286 break;
1287
1288 case TX_DELETE:
1289 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1290 ret = MHSIZ;
1291 else
1292 ret = -EINVAL;
1293 break;
1294
1295 case RX_DELETE:
1296 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1297 ret = MHSIZ;
1298 else
1299 ret = -EINVAL;
1300 break;
1301
1302 case TX_READ:
1303 /* reuse msg_head for the reply to TX_READ */
1304 msg_head.opcode = TX_STATUS;
1305 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1306 break;
1307
1308 case RX_READ:
1309 /* reuse msg_head for the reply to RX_READ */
1310 msg_head.opcode = RX_STATUS;
1311 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1312 break;
1313
1314 case TX_SEND:
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001315 /* we need exactly one can_frame behind the msg head */
1316 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001317 ret = -EINVAL;
1318 else
1319 ret = bcm_tx_send(msg, ifindex, sk);
1320 break;
1321
1322 default:
1323 ret = -EINVAL;
1324 break;
1325 }
1326
1327 release_sock(sk);
1328
1329 return ret;
1330}
1331
1332/*
1333 * notification handler for netdevice status changes
1334 */
1335static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1336 void *data)
1337{
1338 struct net_device *dev = (struct net_device *)data;
1339 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1340 struct sock *sk = &bo->sk;
1341 struct bcm_op *op;
1342 int notify_enodev = 0;
1343
YOSHIFUJI Hideaki721499e2008-07-19 22:34:43 -07001344 if (!net_eq(dev_net(dev), &init_net))
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001345 return NOTIFY_DONE;
1346
1347 if (dev->type != ARPHRD_CAN)
1348 return NOTIFY_DONE;
1349
1350 switch (msg) {
1351
1352 case NETDEV_UNREGISTER:
1353 lock_sock(sk);
1354
1355 /* remove device specific receive entries */
1356 list_for_each_entry(op, &bo->rx_ops, list)
1357 if (op->rx_reg_dev == dev)
1358 bcm_rx_unreg(dev, op);
1359
1360 /* remove device reference, if this is our bound device */
1361 if (bo->bound && bo->ifindex == dev->ifindex) {
1362 bo->bound = 0;
1363 bo->ifindex = 0;
1364 notify_enodev = 1;
1365 }
1366
1367 release_sock(sk);
1368
1369 if (notify_enodev) {
1370 sk->sk_err = ENODEV;
1371 if (!sock_flag(sk, SOCK_DEAD))
1372 sk->sk_error_report(sk);
1373 }
1374 break;
1375
1376 case NETDEV_DOWN:
1377 if (bo->bound && bo->ifindex == dev->ifindex) {
1378 sk->sk_err = ENETDOWN;
1379 if (!sock_flag(sk, SOCK_DEAD))
1380 sk->sk_error_report(sk);
1381 }
1382 }
1383
1384 return NOTIFY_DONE;
1385}
1386
1387/*
1388 * initial settings for all BCM sockets to be set at socket creation time
1389 */
1390static int bcm_init(struct sock *sk)
1391{
1392 struct bcm_sock *bo = bcm_sk(sk);
1393
1394 bo->bound = 0;
1395 bo->ifindex = 0;
1396 bo->dropped_usr_msgs = 0;
1397 bo->bcm_proc_read = NULL;
1398
1399 INIT_LIST_HEAD(&bo->tx_ops);
1400 INIT_LIST_HEAD(&bo->rx_ops);
1401
1402 /* set notifier */
1403 bo->notifier.notifier_call = bcm_notifier;
1404
1405 register_netdevice_notifier(&bo->notifier);
1406
1407 return 0;
1408}
1409
1410/*
1411 * standard socket functions
1412 */
1413static int bcm_release(struct socket *sock)
1414{
1415 struct sock *sk = sock->sk;
1416 struct bcm_sock *bo = bcm_sk(sk);
1417 struct bcm_op *op, *next;
1418
1419 /* remove bcm_ops, timer, rx_unregister(), etc. */
1420
1421 unregister_netdevice_notifier(&bo->notifier);
1422
1423 lock_sock(sk);
1424
1425 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1426 bcm_remove_op(op);
1427
1428 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1429 /*
1430 * Don't care if we're bound or not (due to netdev problems)
1431 * can_rx_unregister() is always a save thing to do here.
1432 */
1433 if (op->ifindex) {
1434 /*
1435 * Only remove subscriptions that had not
1436 * been removed due to NETDEV_UNREGISTER
1437 * in bcm_notifier()
1438 */
1439 if (op->rx_reg_dev) {
1440 struct net_device *dev;
1441
1442 dev = dev_get_by_index(&init_net, op->ifindex);
1443 if (dev) {
1444 bcm_rx_unreg(dev, op);
1445 dev_put(dev);
1446 }
1447 }
1448 } else
1449 can_rx_unregister(NULL, op->can_id,
1450 REGMASK(op->can_id),
1451 bcm_rx_handler, op);
1452
1453 bcm_remove_op(op);
1454 }
1455
1456 /* remove procfs entry */
1457 if (proc_dir && bo->bcm_proc_read)
1458 remove_proc_entry(bo->procname, proc_dir);
1459
1460 /* remove device reference */
1461 if (bo->bound) {
1462 bo->bound = 0;
1463 bo->ifindex = 0;
1464 }
1465
Lothar Waßmannf7e5cc02009-07-14 23:10:21 +00001466 sock_orphan(sk);
1467 sock->sk = NULL;
1468
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001469 release_sock(sk);
1470 sock_put(sk);
1471
1472 return 0;
1473}
1474
1475static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1476 int flags)
1477{
1478 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1479 struct sock *sk = sock->sk;
1480 struct bcm_sock *bo = bcm_sk(sk);
1481
Changli Gao6503d962010-03-31 22:58:26 +00001482 if (len < sizeof(*addr))
1483 return -EINVAL;
1484
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001485 if (bo->bound)
1486 return -EISCONN;
1487
1488 /* bind a device to this socket */
1489 if (addr->can_ifindex) {
1490 struct net_device *dev;
1491
1492 dev = dev_get_by_index(&init_net, addr->can_ifindex);
1493 if (!dev)
1494 return -ENODEV;
1495
1496 if (dev->type != ARPHRD_CAN) {
1497 dev_put(dev);
1498 return -ENODEV;
1499 }
1500
1501 bo->ifindex = dev->ifindex;
1502 dev_put(dev);
1503
1504 } else {
1505 /* no interface reference for ifindex = 0 ('any' CAN device) */
1506 bo->ifindex = 0;
1507 }
1508
1509 bo->bound = 1;
1510
1511 if (proc_dir) {
1512 /* unique socket address as filename */
1513 sprintf(bo->procname, "%p", sock);
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +00001514 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1515 proc_dir,
1516 &bcm_proc_fops, sk);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001517 }
1518
1519 return 0;
1520}
1521
1522static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1523 struct msghdr *msg, size_t size, int flags)
1524{
1525 struct sock *sk = sock->sk;
1526 struct sk_buff *skb;
1527 int error = 0;
1528 int noblock;
1529 int err;
1530
1531 noblock = flags & MSG_DONTWAIT;
1532 flags &= ~MSG_DONTWAIT;
1533 skb = skb_recv_datagram(sk, flags, noblock, &error);
1534 if (!skb)
1535 return error;
1536
1537 if (skb->len < size)
1538 size = skb->len;
1539
1540 err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1541 if (err < 0) {
1542 skb_free_datagram(sk, skb);
1543 return err;
1544 }
1545
Neil Horman3b885782009-10-12 13:26:31 -07001546 sock_recv_ts_and_drops(msg, sk, skb);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001547
1548 if (msg->msg_name) {
1549 msg->msg_namelen = sizeof(struct sockaddr_can);
1550 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1551 }
1552
1553 skb_free_datagram(sk, skb);
1554
1555 return size;
1556}
1557
1558static struct proto_ops bcm_ops __read_mostly = {
1559 .family = PF_CAN,
1560 .release = bcm_release,
1561 .bind = sock_no_bind,
1562 .connect = bcm_connect,
1563 .socketpair = sock_no_socketpair,
1564 .accept = sock_no_accept,
1565 .getname = sock_no_getname,
1566 .poll = datagram_poll,
1567 .ioctl = NULL, /* use can_ioctl() from af_can.c */
1568 .listen = sock_no_listen,
1569 .shutdown = sock_no_shutdown,
1570 .setsockopt = sock_no_setsockopt,
1571 .getsockopt = sock_no_getsockopt,
1572 .sendmsg = bcm_sendmsg,
1573 .recvmsg = bcm_recvmsg,
1574 .mmap = sock_no_mmap,
1575 .sendpage = sock_no_sendpage,
1576};
1577
1578static struct proto bcm_proto __read_mostly = {
1579 .name = "CAN_BCM",
1580 .owner = THIS_MODULE,
1581 .obj_size = sizeof(struct bcm_sock),
1582 .init = bcm_init,
1583};
1584
1585static struct can_proto bcm_can_proto __read_mostly = {
1586 .type = SOCK_DGRAM,
1587 .protocol = CAN_BCM,
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001588 .ops = &bcm_ops,
1589 .prot = &bcm_proto,
1590};
1591
1592static int __init bcm_module_init(void)
1593{
1594 int err;
1595
1596 printk(banner);
1597
1598 err = can_proto_register(&bcm_can_proto);
1599 if (err < 0) {
1600 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1601 return err;
1602 }
1603
1604 /* create /proc/net/can-bcm directory */
1605 proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001606 return 0;
1607}
1608
1609static void __exit bcm_module_exit(void)
1610{
1611 can_proto_unregister(&bcm_can_proto);
1612
1613 if (proc_dir)
1614 proc_net_remove(&init_net, "can-bcm");
1615}
1616
1617module_init(bcm_module_init);
1618module_exit(bcm_module_exit);