blob: 6863310d6973ba616323f33a5d57a04a583a5430 [file] [log] [blame]
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001/*
2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3 *
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080040 */
41
42#include <linux/module.h>
43#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000044#include <linux/interrupt.h>
Oliver Hartkopp73e87e02008-04-15 19:29:14 -070045#include <linux/hrtimer.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080046#include <linux/list.h>
47#include <linux/proc_fs.h>
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +000048#include <linux/seq_file.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080049#include <linux/uio.h>
50#include <linux/net.h>
51#include <linux/netdevice.h>
52#include <linux/socket.h>
53#include <linux/if_arp.h>
54#include <linux/skbuff.h>
55#include <linux/can.h>
56#include <linux/can/core.h>
Oliver Hartkopp156c2bb2013-01-17 18:43:39 +010057#include <linux/can/skb.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080058#include <linux/can/bcm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090059#include <linux/slab.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080060#include <net/sock.h>
61#include <net/net_namespace.h>
62
Oliver Hartkopp5b75c492010-08-11 16:12:35 -070063/*
64 * To send multiple CAN frame content within TX_SETUP or to filter
65 * CAN messages with multiplex index within RX_SETUP, the number of
66 * different filters is limited to 256 due to the one byte index value.
67 */
68#define MAX_NFRAMES 256
69
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080070/* use of last_frames[index].can_dlc */
71#define RX_RECV 0x40 /* received data for this element */
72#define RX_THR 0x80 /* element not been sent due to throttle feature */
73#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
74
75/* get best masking value for can_rx_register() for a given single can_id */
Oliver Hartkoppd253eee2008-12-03 15:52:35 -080076#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080079
Oliver Hartkoppd253eee2008-12-03 15:52:35 -080080#define CAN_BCM_VERSION CAN_VERSION
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080081
82MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
83MODULE_LICENSE("Dual BSD/GPL");
84MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
Lothar Waßmannb13bb2e2009-07-14 23:12:25 +000085MODULE_ALIAS("can-proto-2");
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080086
87/* easy access to can_frame payload */
88static inline u64 GET_U64(const struct can_frame *cp)
89{
90 return *(u64 *)cp->data;
91}
92
93struct bcm_op {
94 struct list_head list;
95 int ifindex;
96 canid_t can_id;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -070097 u32 flags;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080098 unsigned long frames_abs, frames_filtered;
Arnd Bergmannba61a8d2015-09-30 13:26:42 +020099 struct bcm_timeval ival1, ival2;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700100 struct hrtimer timer, thrtimer;
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800101 struct tasklet_struct tsklet, thrtsklet;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700102 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800103 int rx_ifindex;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700104 u32 count;
105 u32 nframes;
106 u32 currframe;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800107 struct can_frame *frames;
108 struct can_frame *last_frames;
109 struct can_frame sframe;
110 struct can_frame last_sframe;
111 struct sock *sk;
112 struct net_device *rx_reg_dev;
113};
114
115static struct proc_dir_entry *proc_dir;
116
117struct bcm_sock {
118 struct sock sk;
119 int bound;
120 int ifindex;
121 struct notifier_block notifier;
122 struct list_head rx_ops;
123 struct list_head tx_ops;
124 unsigned long dropped_usr_msgs;
125 struct proc_dir_entry *bcm_proc_read;
Dan Rosenberg9f260e02010-12-26 06:54:53 +0000126 char procname [32]; /* inode number in decimal with \0 */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800127};
128
129static inline struct bcm_sock *bcm_sk(const struct sock *sk)
130{
131 return (struct bcm_sock *)sk;
132}
133
Arnd Bergmannba61a8d2015-09-30 13:26:42 +0200134static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
135{
136 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
137}
138
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800139#define CFSIZ sizeof(struct can_frame)
140#define OPSIZ sizeof(struct bcm_op)
141#define MHSIZ sizeof(struct bcm_msg_head)
142
143/*
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800144 * procfs functions
145 */
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000146static char *bcm_proc_getifname(char *result, int ifindex)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800147{
148 struct net_device *dev;
149
150 if (!ifindex)
151 return "any";
152
stephen hemmingerff879eb2009-11-10 07:54:56 +0000153 rcu_read_lock();
154 dev = dev_get_by_index_rcu(&init_net, ifindex);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800155 if (dev)
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000156 strcpy(result, dev->name);
157 else
158 strcpy(result, "???");
stephen hemmingerff879eb2009-11-10 07:54:56 +0000159 rcu_read_unlock();
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800160
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000161 return result;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800162}
163
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000164static int bcm_proc_show(struct seq_file *m, void *v)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800165{
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000166 char ifname[IFNAMSIZ];
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000167 struct sock *sk = (struct sock *)m->private;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800168 struct bcm_sock *bo = bcm_sk(sk);
169 struct bcm_op *op;
170
Dan Rosenberg71338aa2011-05-23 12:17:35 +0000171 seq_printf(m, ">>> socket %pK", sk->sk_socket);
172 seq_printf(m, " / sk %pK", sk);
173 seq_printf(m, " / bo %pK", bo);
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000174 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000175 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000176 seq_printf(m, " <<<\n");
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800177
178 list_for_each_entry(op, &bo->rx_ops, list) {
179
180 unsigned long reduction;
181
182 /* print only active entries & prevent division by zero */
183 if (!op->frames_abs)
184 continue;
185
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000186 seq_printf(m, "rx_op: %03X %-5s ",
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000187 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700188 seq_printf(m, "[%u]%c ", op->nframes,
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800189 (op->flags & RX_CHECK_DLC)?'d':' ');
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700190 if (op->kt_ival1.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000191 seq_printf(m, "timeo=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700192 (long long)
193 ktime_to_us(op->kt_ival1));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800194
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700195 if (op->kt_ival2.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000196 seq_printf(m, "thr=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700197 (long long)
198 ktime_to_us(op->kt_ival2));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800199
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000200 seq_printf(m, "# recv %ld (%ld) => reduction: ",
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800201 op->frames_filtered, op->frames_abs);
202
203 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
204
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000205 seq_printf(m, "%s%ld%%\n",
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800206 (reduction == 100)?"near ":"", reduction);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800207 }
208
209 list_for_each_entry(op, &bo->tx_ops, list) {
210
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700211 seq_printf(m, "tx_op: %03X %s [%u] ",
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000212 op->can_id,
213 bcm_proc_getifname(ifname, op->ifindex),
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800214 op->nframes);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800215
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700216 if (op->kt_ival1.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000217 seq_printf(m, "t1=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700218 (long long) ktime_to_us(op->kt_ival1));
219
220 if (op->kt_ival2.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000221 seq_printf(m, "t2=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700222 (long long) ktime_to_us(op->kt_ival2));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800223
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000224 seq_printf(m, "# sent %ld\n", op->frames_abs);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800225 }
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000226 seq_putc(m, '\n');
227 return 0;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800228}
229
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000230static int bcm_proc_open(struct inode *inode, struct file *file)
231{
Al Virod9dda782013-03-31 18:16:14 -0400232 return single_open(file, bcm_proc_show, PDE_DATA(inode));
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000233}
234
235static const struct file_operations bcm_proc_fops = {
236 .owner = THIS_MODULE,
237 .open = bcm_proc_open,
238 .read = seq_read,
239 .llseek = seq_lseek,
240 .release = single_release,
241};
242
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800243/*
244 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
245 * of the given bcm tx op
246 */
247static void bcm_can_tx(struct bcm_op *op)
248{
249 struct sk_buff *skb;
250 struct net_device *dev;
251 struct can_frame *cf = &op->frames[op->currframe];
252
253 /* no target device? => exit */
254 if (!op->ifindex)
255 return;
256
257 dev = dev_get_by_index(&init_net, op->ifindex);
258 if (!dev) {
259 /* RFC: should this bcm_op remove itself here? */
260 return;
261 }
262
Oliver Hartkopp156c2bb2013-01-17 18:43:39 +0100263 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), gfp_any());
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800264 if (!skb)
265 goto out;
266
Oliver Hartkopp2bf34402013-01-28 08:33:33 +0000267 can_skb_reserve(skb);
268 can_skb_prv(skb)->ifindex = dev->ifindex;
Oliver Hartkoppd3b58c42015-06-26 11:58:19 +0200269 can_skb_prv(skb)->skbcnt = 0;
Oliver Hartkopp156c2bb2013-01-17 18:43:39 +0100270
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800271 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
272
273 /* send with loopback */
274 skb->dev = dev;
Oliver Hartkopp0ae89be2014-01-30 10:11:28 +0100275 can_skb_set_owner(skb, op->sk);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800276 can_send(skb, 1);
277
278 /* update statistics */
279 op->currframe++;
280 op->frames_abs++;
281
282 /* reached last frame? */
283 if (op->currframe >= op->nframes)
284 op->currframe = 0;
285 out:
286 dev_put(dev);
287}
288
289/*
290 * bcm_send_to_user - send a BCM message to the userspace
291 * (consisting of bcm_msg_head + x CAN frames)
292 */
293static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
294 struct can_frame *frames, int has_timestamp)
295{
296 struct sk_buff *skb;
297 struct can_frame *firstframe;
298 struct sockaddr_can *addr;
299 struct sock *sk = op->sk;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700300 unsigned int datalen = head->nframes * CFSIZ;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800301 int err;
302
303 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
304 if (!skb)
305 return;
306
307 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
308
309 if (head->nframes) {
310 /* can_frames starting here */
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -0700311 firstframe = (struct can_frame *)skb_tail_pointer(skb);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800312
313 memcpy(skb_put(skb, datalen), frames, datalen);
314
315 /*
316 * the BCM uses the can_dlc-element of the can_frame
317 * structure for internal purposes. This is only
318 * relevant for updates that are generated by the
319 * BCM, where nframes is 1
320 */
321 if (head->nframes == 1)
322 firstframe->can_dlc &= BCM_CAN_DLC_MASK;
323 }
324
325 if (has_timestamp) {
326 /* restore rx timestamp */
327 skb->tstamp = op->rx_stamp;
328 }
329
330 /*
331 * Put the datagram to the queue so that bcm_recvmsg() can
332 * get it from there. We need to pass the interface index to
333 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
334 * containing the interface index.
335 */
336
Eyal Birgerb4772ef2015-03-01 14:58:29 +0200337 sock_skb_cb_check_size(sizeof(struct sockaddr_can));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800338 addr = (struct sockaddr_can *)skb->cb;
339 memset(addr, 0, sizeof(*addr));
340 addr->can_family = AF_CAN;
341 addr->can_ifindex = op->rx_ifindex;
342
343 err = sock_queue_rcv_skb(sk, skb);
344 if (err < 0) {
345 struct bcm_sock *bo = bcm_sk(sk);
346
347 kfree_skb(skb);
348 /* don't care about overflows in this statistic */
349 bo->dropped_usr_msgs++;
350 }
351}
352
Oliver Hartkopp12d0d0d2011-09-29 15:33:47 -0400353static void bcm_tx_start_timer(struct bcm_op *op)
354{
355 if (op->kt_ival1.tv64 && op->count)
356 hrtimer_start(&op->timer,
357 ktime_add(ktime_get(), op->kt_ival1),
358 HRTIMER_MODE_ABS);
359 else if (op->kt_ival2.tv64)
360 hrtimer_start(&op->timer,
361 ktime_add(ktime_get(), op->kt_ival2),
362 HRTIMER_MODE_ABS);
363}
364
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800365static void bcm_tx_timeout_tsklet(unsigned long data)
366{
367 struct bcm_op *op = (struct bcm_op *)data;
368 struct bcm_msg_head msg_head;
369
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800370 if (op->kt_ival1.tv64 && (op->count > 0)) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800371
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800372 op->count--;
373 if (!op->count && (op->flags & TX_COUNTEVT)) {
374
375 /* create notification to user */
376 msg_head.opcode = TX_EXPIRED;
377 msg_head.flags = op->flags;
378 msg_head.count = op->count;
379 msg_head.ival1 = op->ival1;
380 msg_head.ival2 = op->ival2;
381 msg_head.can_id = op->can_id;
382 msg_head.nframes = 0;
383
384 bcm_send_to_user(op, &msg_head, NULL, 0);
385 }
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800386 bcm_can_tx(op);
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800387
Oliver Hartkopp12d0d0d2011-09-29 15:33:47 -0400388 } else if (op->kt_ival2.tv64)
389 bcm_can_tx(op);
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800390
Oliver Hartkopp12d0d0d2011-09-29 15:33:47 -0400391 bcm_tx_start_timer(op);
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800392}
393
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800394/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300395 * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800396 */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700397static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800398{
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700399 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800400
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800401 tasklet_schedule(&op->tsklet);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800402
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800403 return HRTIMER_NORESTART;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800404}
405
406/*
407 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
408 */
409static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
410{
411 struct bcm_msg_head head;
412
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800413 /* update statistics */
414 op->frames_filtered++;
415
416 /* prevent statistics overflow */
417 if (op->frames_filtered > ULONG_MAX/100)
418 op->frames_filtered = op->frames_abs = 0;
419
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800420 /* this element is not throttled anymore */
421 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
422
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800423 head.opcode = RX_CHANGED;
424 head.flags = op->flags;
425 head.count = op->count;
426 head.ival1 = op->ival1;
427 head.ival2 = op->ival2;
428 head.can_id = op->can_id;
429 head.nframes = 1;
430
431 bcm_send_to_user(op, &head, data, 1);
432}
433
434/*
435 * bcm_rx_update_and_send - process a detected relevant receive content change
436 * 1. update the last received data
437 * 2. send a notification to the user (if possible)
438 */
439static void bcm_rx_update_and_send(struct bcm_op *op,
440 struct can_frame *lastdata,
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800441 const struct can_frame *rxdata)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800442{
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800443 memcpy(lastdata, rxdata, CFSIZ);
444
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800445 /* mark as used and throttled by default */
446 lastdata->can_dlc |= (RX_RECV|RX_THR);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800447
Jeremiah Mahler069f8452014-12-05 09:54:38 -0800448 /* throttling mode inactive ? */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800449 if (!op->kt_ival2.tv64) {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800450 /* send RX_CHANGED to the user immediately */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800451 bcm_rx_changed(op, lastdata);
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700452 return;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800453 }
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700454
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800455 /* with active throttling timer we are just done here */
456 if (hrtimer_active(&op->thrtimer))
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700457 return;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700458
Jeremiah Mahler069f8452014-12-05 09:54:38 -0800459 /* first reception with enabled throttling mode */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800460 if (!op->kt_lastmsg.tv64)
461 goto rx_changed_settime;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700462
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800463 /* got a second frame inside a potential throttle period? */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700464 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
465 ktime_to_us(op->kt_ival2)) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800466 /* do not send the saved data - only start throttle timer */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700467 hrtimer_start(&op->thrtimer,
468 ktime_add(op->kt_lastmsg, op->kt_ival2),
469 HRTIMER_MODE_ABS);
470 return;
471 }
472
473 /* the gap was that big, that throttling was not needed here */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800474rx_changed_settime:
475 bcm_rx_changed(op, lastdata);
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700476 op->kt_lastmsg = ktime_get();
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800477}
478
479/*
480 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
481 * received data stored in op->last_frames[]
482 */
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700483static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800484 const struct can_frame *rxdata)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800485{
486 /*
Jeremiah Mahler069f8452014-12-05 09:54:38 -0800487 * no one uses the MSBs of can_dlc for comparison,
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800488 * so we use it here to detect the first time of reception
489 */
490
491 if (!(op->last_frames[index].can_dlc & RX_RECV)) {
492 /* received data for the first time => send update to user */
493 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
494 return;
495 }
496
497 /* do a real check in can_frame data section */
498
499 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
500 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
501 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
502 return;
503 }
504
505 if (op->flags & RX_CHECK_DLC) {
506 /* do a real check in can_frame dlc */
507 if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
508 BCM_CAN_DLC_MASK)) {
509 bcm_rx_update_and_send(op, &op->last_frames[index],
510 rxdata);
511 return;
512 }
513 }
514}
515
516/*
Jeremiah Mahler069f8452014-12-05 09:54:38 -0800517 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800518 */
519static void bcm_rx_starttimer(struct bcm_op *op)
520{
521 if (op->flags & RX_NO_AUTOTIMER)
522 return;
523
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700524 if (op->kt_ival1.tv64)
525 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800526}
527
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800528static void bcm_rx_timeout_tsklet(unsigned long data)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800529{
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800530 struct bcm_op *op = (struct bcm_op *)data;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800531 struct bcm_msg_head msg_head;
532
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800533 /* create notification to user */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800534 msg_head.opcode = RX_TIMEOUT;
535 msg_head.flags = op->flags;
536 msg_head.count = op->count;
537 msg_head.ival1 = op->ival1;
538 msg_head.ival2 = op->ival2;
539 msg_head.can_id = op->can_id;
540 msg_head.nframes = 0;
541
542 bcm_send_to_user(op, &msg_head, NULL, 0);
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800543}
544
545/*
Jeremiah Mahler069f8452014-12-05 09:54:38 -0800546 * bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800547 */
548static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
549{
550 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
551
552 /* schedule before NET_RX_SOFTIRQ */
553 tasklet_hi_schedule(&op->tsklet);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800554
555 /* no restart of the timer is done here! */
556
557 /* if user wants to be informed, when cyclic CAN-Messages come back */
558 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
559 /* clear received can_frames to indicate 'nothing received' */
560 memset(op->last_frames, 0, op->nframes * CFSIZ);
561 }
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700562
563 return HRTIMER_NORESTART;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800564}
565
566/*
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800567 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800568 */
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700569static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
570 unsigned int index)
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800571{
572 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
573 if (update)
574 bcm_rx_changed(op, &op->last_frames[index]);
575 return 1;
576 }
577 return 0;
578}
579
580/*
581 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
582 *
583 * update == 0 : just check if throttled data is available (any irq context)
584 * update == 1 : check and send throttled data to userspace (soft_irq context)
585 */
586static int bcm_rx_thr_flush(struct bcm_op *op, int update)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800587{
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700588 int updated = 0;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800589
590 if (op->nframes > 1) {
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700591 unsigned int i;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700592
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800593 /* for MUX filter we start at index 1 */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800594 for (i = 1; i < op->nframes; i++)
595 updated += bcm_rx_do_flush(op, update, i);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800596
597 } else {
598 /* for RX_FILTER_ID and simple filter */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800599 updated += bcm_rx_do_flush(op, update, 0);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800600 }
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700601
602 return updated;
603}
604
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800605static void bcm_rx_thr_tsklet(unsigned long data)
606{
607 struct bcm_op *op = (struct bcm_op *)data;
608
609 /* push the changed data to the userspace */
610 bcm_rx_thr_flush(op, 1);
611}
612
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700613/*
614 * bcm_rx_thr_handler - the time for blocked content updates is over now:
615 * Check for throttled data and send it to the userspace
616 */
617static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
618{
619 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
620
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800621 tasklet_schedule(&op->thrtsklet);
622
623 if (bcm_rx_thr_flush(op, 0)) {
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700624 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
625 return HRTIMER_RESTART;
626 } else {
627 /* rearm throttle handling */
628 op->kt_lastmsg = ktime_set(0, 0);
629 return HRTIMER_NORESTART;
630 }
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800631}
632
633/*
Jeremiah Mahler069f8452014-12-05 09:54:38 -0800634 * bcm_rx_handler - handle a CAN frame reception
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800635 */
636static void bcm_rx_handler(struct sk_buff *skb, void *data)
637{
638 struct bcm_op *op = (struct bcm_op *)data;
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800639 const struct can_frame *rxframe = (struct can_frame *)skb->data;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700640 unsigned int i;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800641
642 /* disable timeout */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700643 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800644
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800645 if (op->can_id != rxframe->can_id)
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800646 return;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800647
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800648 /* save rx timestamp */
649 op->rx_stamp = skb->tstamp;
650 /* save originator for recvfrom() */
651 op->rx_ifindex = skb->dev->ifindex;
652 /* update statistics */
653 op->frames_abs++;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800654
655 if (op->flags & RX_RTR_FRAME) {
656 /* send reply for RTR-request (placed in op->frames[0]) */
657 bcm_can_tx(op);
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800658 return;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800659 }
660
661 if (op->flags & RX_FILTER_ID) {
662 /* the easiest case */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800663 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800664 goto rx_starttimer;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800665 }
666
667 if (op->nframes == 1) {
668 /* simple compare with index 0 */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800669 bcm_rx_cmp_to_index(op, 0, rxframe);
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800670 goto rx_starttimer;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800671 }
672
673 if (op->nframes > 1) {
674 /*
675 * multiplex compare
676 *
677 * find the first multiplex mask that fits.
678 * Remark: The MUX-mask is stored in index 0
679 */
680
681 for (i = 1; i < op->nframes; i++) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800682 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800683 (GET_U64(&op->frames[0]) &
684 GET_U64(&op->frames[i]))) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800685 bcm_rx_cmp_to_index(op, i, rxframe);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800686 break;
687 }
688 }
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800689 }
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800690
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800691rx_starttimer:
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800692 bcm_rx_starttimer(op);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800693}
694
695/*
696 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
697 */
698static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
699 int ifindex)
700{
701 struct bcm_op *op;
702
703 list_for_each_entry(op, ops, list) {
704 if ((op->can_id == can_id) && (op->ifindex == ifindex))
705 return op;
706 }
707
708 return NULL;
709}
710
711static void bcm_remove_op(struct bcm_op *op)
712{
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700713 hrtimer_cancel(&op->timer);
714 hrtimer_cancel(&op->thrtimer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800715
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800716 if (op->tsklet.func)
717 tasklet_kill(&op->tsklet);
718
719 if (op->thrtsklet.func)
720 tasklet_kill(&op->thrtsklet);
721
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800722 if ((op->frames) && (op->frames != &op->sframe))
723 kfree(op->frames);
724
725 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
726 kfree(op->last_frames);
727
728 kfree(op);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800729}
730
731static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
732{
733 if (op->rx_reg_dev == dev) {
734 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
735 bcm_rx_handler, op);
736
737 /* mark as removed subscription */
738 op->rx_reg_dev = NULL;
739 } else
740 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
741 "mismatch %p %p\n", op->rx_reg_dev, dev);
742}
743
744/*
745 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
746 */
747static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
748{
749 struct bcm_op *op, *n;
750
751 list_for_each_entry_safe(op, n, ops, list) {
752 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
753
754 /*
755 * Don't care if we're bound or not (due to netdev
756 * problems) can_rx_unregister() is always a save
757 * thing to do here.
758 */
759 if (op->ifindex) {
760 /*
761 * Only remove subscriptions that had not
762 * been removed due to NETDEV_UNREGISTER
763 * in bcm_notifier()
764 */
765 if (op->rx_reg_dev) {
766 struct net_device *dev;
767
768 dev = dev_get_by_index(&init_net,
769 op->ifindex);
770 if (dev) {
771 bcm_rx_unreg(dev, op);
772 dev_put(dev);
773 }
774 }
775 } else
776 can_rx_unregister(NULL, op->can_id,
777 REGMASK(op->can_id),
778 bcm_rx_handler, op);
779
780 list_del(&op->list);
781 bcm_remove_op(op);
782 return 1; /* done */
783 }
784 }
785
786 return 0; /* not found */
787}
788
789/*
790 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
791 */
792static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
793{
794 struct bcm_op *op, *n;
795
796 list_for_each_entry_safe(op, n, ops, list) {
797 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
798 list_del(&op->list);
799 bcm_remove_op(op);
800 return 1; /* done */
801 }
802 }
803
804 return 0; /* not found */
805}
806
807/*
808 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
809 */
810static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
811 int ifindex)
812{
813 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
814
815 if (!op)
816 return -EINVAL;
817
818 /* put current values into msg_head */
819 msg_head->flags = op->flags;
820 msg_head->count = op->count;
821 msg_head->ival1 = op->ival1;
822 msg_head->ival2 = op->ival2;
823 msg_head->nframes = op->nframes;
824
825 bcm_send_to_user(op, msg_head, op->frames, 0);
826
827 return MHSIZ;
828}
829
830/*
831 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
832 */
833static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
834 int ifindex, struct sock *sk)
835{
836 struct bcm_sock *bo = bcm_sk(sk);
837 struct bcm_op *op;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700838 unsigned int i;
839 int err;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800840
841 /* we need a real device to send frames */
842 if (!ifindex)
843 return -ENODEV;
844
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700845 /* check nframes boundaries - we need at least one can_frame */
846 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800847 return -EINVAL;
848
849 /* check the given can_id */
850 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
851
852 if (op) {
853 /* update existing BCM operation */
854
855 /*
856 * Do we need more space for the can_frames than currently
857 * allocated? -> This is a _really_ unusual use-case and
858 * therefore (complexity / locking) it is not supported.
859 */
860 if (msg_head->nframes > op->nframes)
861 return -E2BIG;
862
863 /* update can_frames content */
864 for (i = 0; i < msg_head->nframes; i++) {
Al Viro6ce8e9c2014-04-06 21:25:44 -0400865 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -0700866
867 if (op->frames[i].can_dlc > 8)
868 err = -EINVAL;
869
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800870 if (err < 0)
871 return err;
872
873 if (msg_head->flags & TX_CP_CAN_ID) {
874 /* copy can_id into frame */
875 op->frames[i].can_id = msg_head->can_id;
876 }
877 }
878
879 } else {
880 /* insert new BCM operation for the given can_id */
881
882 op = kzalloc(OPSIZ, GFP_KERNEL);
883 if (!op)
884 return -ENOMEM;
885
886 op->can_id = msg_head->can_id;
887
888 /* create array for can_frames and copy the data */
889 if (msg_head->nframes > 1) {
890 op->frames = kmalloc(msg_head->nframes * CFSIZ,
891 GFP_KERNEL);
892 if (!op->frames) {
893 kfree(op);
894 return -ENOMEM;
895 }
896 } else
897 op->frames = &op->sframe;
898
899 for (i = 0; i < msg_head->nframes; i++) {
Al Viro6ce8e9c2014-04-06 21:25:44 -0400900 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -0700901
902 if (op->frames[i].can_dlc > 8)
903 err = -EINVAL;
904
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800905 if (err < 0) {
906 if (op->frames != &op->sframe)
907 kfree(op->frames);
908 kfree(op);
909 return err;
910 }
911
912 if (msg_head->flags & TX_CP_CAN_ID) {
913 /* copy can_id into frame */
914 op->frames[i].can_id = msg_head->can_id;
915 }
916 }
917
918 /* tx_ops never compare with previous received messages */
919 op->last_frames = NULL;
920
921 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
922 op->sk = sk;
923 op->ifindex = ifindex;
924
925 /* initialize uninitialized (kzalloc) structure */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700926 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
927 op->timer.function = bcm_tx_timeout_handler;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800928
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800929 /* initialize tasklet for tx countevent notification */
930 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
931 (unsigned long) op);
932
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800933 /* currently unused in tx_ops */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700934 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800935
936 /* add this bcm_op to the list of the tx_ops */
937 list_add(&op->list, &bo->tx_ops);
938
939 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
940
941 if (op->nframes != msg_head->nframes) {
942 op->nframes = msg_head->nframes;
943 /* start multiple frame transmission with index 0 */
944 op->currframe = 0;
945 }
946
947 /* check flags */
948
949 op->flags = msg_head->flags;
950
951 if (op->flags & TX_RESET_MULTI_IDX) {
952 /* start multiple frame transmission with index 0 */
953 op->currframe = 0;
954 }
955
956 if (op->flags & SETTIMER) {
957 /* set timer values */
958 op->count = msg_head->count;
959 op->ival1 = msg_head->ival1;
960 op->ival2 = msg_head->ival2;
Arnd Bergmannba61a8d2015-09-30 13:26:42 +0200961 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
962 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800963
964 /* disable an active timer due to zero values? */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700965 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
966 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800967 }
968
Oliver Hartkopp12d0d0d2011-09-29 15:33:47 -0400969 if (op->flags & STARTTIMER) {
970 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800971 /* spec: send can_frame when starting timer */
972 op->flags |= TX_ANNOUNCE;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800973 }
974
Oliver Hartkoppaabdcb02011-09-23 08:23:47 +0000975 if (op->flags & TX_ANNOUNCE) {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800976 bcm_can_tx(op);
Oliver Hartkopp12d0d0d2011-09-29 15:33:47 -0400977 if (op->count)
Oliver Hartkoppaabdcb02011-09-23 08:23:47 +0000978 op->count--;
979 }
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800980
Oliver Hartkopp12d0d0d2011-09-29 15:33:47 -0400981 if (op->flags & STARTTIMER)
982 bcm_tx_start_timer(op);
983
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800984 return msg_head->nframes * CFSIZ + MHSIZ;
985}
986
987/*
988 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
989 */
990static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
991 int ifindex, struct sock *sk)
992{
993 struct bcm_sock *bo = bcm_sk(sk);
994 struct bcm_op *op;
995 int do_rx_register;
996 int err = 0;
997
998 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
999 /* be robust against wrong usage ... */
1000 msg_head->flags |= RX_FILTER_ID;
1001 /* ignore trailing garbage */
1002 msg_head->nframes = 0;
1003 }
1004
Oliver Hartkopp5b75c492010-08-11 16:12:35 -07001005 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1006 if (msg_head->nframes > MAX_NFRAMES + 1)
1007 return -EINVAL;
1008
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001009 if ((msg_head->flags & RX_RTR_FRAME) &&
1010 ((msg_head->nframes != 1) ||
1011 (!(msg_head->can_id & CAN_RTR_FLAG))))
1012 return -EINVAL;
1013
1014 /* check the given can_id */
1015 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1016 if (op) {
1017 /* update existing BCM operation */
1018
1019 /*
1020 * Do we need more space for the can_frames than currently
1021 * allocated? -> This is a _really_ unusual use-case and
1022 * therefore (complexity / locking) it is not supported.
1023 */
1024 if (msg_head->nframes > op->nframes)
1025 return -E2BIG;
1026
1027 if (msg_head->nframes) {
1028 /* update can_frames content */
Al Viro6ce8e9c2014-04-06 21:25:44 -04001029 err = memcpy_from_msg((u8 *)op->frames, msg,
1030 msg_head->nframes * CFSIZ);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001031 if (err < 0)
1032 return err;
1033
1034 /* clear last_frames to indicate 'nothing received' */
1035 memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1036 }
1037
1038 op->nframes = msg_head->nframes;
1039
1040 /* Only an update -> do not call can_rx_register() */
1041 do_rx_register = 0;
1042
1043 } else {
1044 /* insert new BCM operation for the given can_id */
1045 op = kzalloc(OPSIZ, GFP_KERNEL);
1046 if (!op)
1047 return -ENOMEM;
1048
1049 op->can_id = msg_head->can_id;
1050 op->nframes = msg_head->nframes;
1051
1052 if (msg_head->nframes > 1) {
1053 /* create array for can_frames and copy the data */
1054 op->frames = kmalloc(msg_head->nframes * CFSIZ,
1055 GFP_KERNEL);
1056 if (!op->frames) {
1057 kfree(op);
1058 return -ENOMEM;
1059 }
1060
1061 /* create and init array for received can_frames */
1062 op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1063 GFP_KERNEL);
1064 if (!op->last_frames) {
1065 kfree(op->frames);
1066 kfree(op);
1067 return -ENOMEM;
1068 }
1069
1070 } else {
1071 op->frames = &op->sframe;
1072 op->last_frames = &op->last_sframe;
1073 }
1074
1075 if (msg_head->nframes) {
Al Viro6ce8e9c2014-04-06 21:25:44 -04001076 err = memcpy_from_msg((u8 *)op->frames, msg,
1077 msg_head->nframes * CFSIZ);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001078 if (err < 0) {
1079 if (op->frames != &op->sframe)
1080 kfree(op->frames);
1081 if (op->last_frames != &op->last_sframe)
1082 kfree(op->last_frames);
1083 kfree(op);
1084 return err;
1085 }
1086 }
1087
1088 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1089 op->sk = sk;
1090 op->ifindex = ifindex;
1091
Oliver Hartkopp81b40112012-11-26 22:24:23 +01001092 /* ifindex for timeout events w/o previous frame reception */
1093 op->rx_ifindex = ifindex;
1094
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001095 /* initialize uninitialized (kzalloc) structure */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001096 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1097 op->timer.function = bcm_rx_timeout_handler;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001098
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -08001099 /* initialize tasklet for rx timeout notification */
1100 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1101 (unsigned long) op);
1102
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001103 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1104 op->thrtimer.function = bcm_rx_thr_handler;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001105
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -08001106 /* initialize tasklet for rx throttle handling */
1107 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1108 (unsigned long) op);
1109
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001110 /* add this bcm_op to the list of the rx_ops */
1111 list_add(&op->list, &bo->rx_ops);
1112
1113 /* call can_rx_register() */
1114 do_rx_register = 1;
1115
1116 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1117
1118 /* check flags */
1119 op->flags = msg_head->flags;
1120
1121 if (op->flags & RX_RTR_FRAME) {
1122
1123 /* no timers in RTR-mode */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001124 hrtimer_cancel(&op->thrtimer);
1125 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001126
1127 /*
1128 * funny feature in RX(!)_SETUP only for RTR-mode:
1129 * copy can_id into frame BUT without RTR-flag to
1130 * prevent a full-load-loopback-test ... ;-]
1131 */
1132 if ((op->flags & TX_CP_CAN_ID) ||
1133 (op->frames[0].can_id == op->can_id))
1134 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1135
1136 } else {
1137 if (op->flags & SETTIMER) {
1138
1139 /* set timer value */
1140 op->ival1 = msg_head->ival1;
1141 op->ival2 = msg_head->ival2;
Arnd Bergmannba61a8d2015-09-30 13:26:42 +02001142 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1143 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001144
1145 /* disable an active timer due to zero value? */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001146 if (!op->kt_ival1.tv64)
1147 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001148
1149 /*
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001150 * In any case cancel the throttle timer, flush
1151 * potentially blocked msgs and reset throttle handling
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001152 */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001153 op->kt_lastmsg = ktime_set(0, 0);
1154 hrtimer_cancel(&op->thrtimer);
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -08001155 bcm_rx_thr_flush(op, 1);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001156 }
1157
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001158 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1159 hrtimer_start(&op->timer, op->kt_ival1,
1160 HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001161 }
1162
1163 /* now we can register for can_ids, if we added a new bcm_op */
1164 if (do_rx_register) {
1165 if (ifindex) {
1166 struct net_device *dev;
1167
1168 dev = dev_get_by_index(&init_net, ifindex);
1169 if (dev) {
1170 err = can_rx_register(dev, op->can_id,
1171 REGMASK(op->can_id),
1172 bcm_rx_handler, op,
1173 "bcm");
1174
1175 op->rx_reg_dev = dev;
1176 dev_put(dev);
1177 }
1178
1179 } else
1180 err = can_rx_register(NULL, op->can_id,
1181 REGMASK(op->can_id),
1182 bcm_rx_handler, op, "bcm");
1183 if (err) {
1184 /* this bcm rx op is broken -> remove it */
1185 list_del(&op->list);
1186 bcm_remove_op(op);
1187 return err;
1188 }
1189 }
1190
1191 return msg_head->nframes * CFSIZ + MHSIZ;
1192}
1193
1194/*
1195 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1196 */
1197static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1198{
1199 struct sk_buff *skb;
1200 struct net_device *dev;
1201 int err;
1202
1203 /* we need a real device to send frames */
1204 if (!ifindex)
1205 return -ENODEV;
1206
Oliver Hartkopp156c2bb2013-01-17 18:43:39 +01001207 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), GFP_KERNEL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001208 if (!skb)
1209 return -ENOMEM;
1210
Oliver Hartkopp2bf34402013-01-28 08:33:33 +00001211 can_skb_reserve(skb);
Oliver Hartkopp156c2bb2013-01-17 18:43:39 +01001212
Al Viro6ce8e9c2014-04-06 21:25:44 -04001213 err = memcpy_from_msg(skb_put(skb, CFSIZ), msg, CFSIZ);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001214 if (err < 0) {
1215 kfree_skb(skb);
1216 return err;
1217 }
1218
1219 dev = dev_get_by_index(&init_net, ifindex);
1220 if (!dev) {
1221 kfree_skb(skb);
1222 return -ENODEV;
1223 }
1224
Oliver Hartkopp2bf34402013-01-28 08:33:33 +00001225 can_skb_prv(skb)->ifindex = dev->ifindex;
Oliver Hartkoppd3b58c42015-06-26 11:58:19 +02001226 can_skb_prv(skb)->skbcnt = 0;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001227 skb->dev = dev;
Oliver Hartkopp0ae89be2014-01-30 10:11:28 +01001228 can_skb_set_owner(skb, sk);
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001229 err = can_send(skb, 1); /* send with loopback */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001230 dev_put(dev);
1231
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001232 if (err)
1233 return err;
1234
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001235 return CFSIZ + MHSIZ;
1236}
1237
1238/*
1239 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1240 */
Ying Xue1b784142015-03-02 15:37:48 +08001241static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001242{
1243 struct sock *sk = sock->sk;
1244 struct bcm_sock *bo = bcm_sk(sk);
1245 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1246 struct bcm_msg_head msg_head;
1247 int ret; /* read bytes or error codes as return value */
1248
1249 if (!bo->bound)
1250 return -ENOTCONN;
1251
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001252 /* check for valid message length from userspace */
1253 if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1254 return -EINVAL;
1255
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001256 /* check for alternative ifindex for this bcm_op */
1257
1258 if (!ifindex && msg->msg_name) {
1259 /* no bound device as default => check msg_name */
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001260 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001261
Kurt Van Dijck5e507322011-01-15 20:56:42 -08001262 if (msg->msg_namelen < sizeof(*addr))
1263 return -EINVAL;
1264
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001265 if (addr->can_family != AF_CAN)
1266 return -EINVAL;
1267
1268 /* ifindex from sendto() */
1269 ifindex = addr->can_ifindex;
1270
1271 if (ifindex) {
1272 struct net_device *dev;
1273
1274 dev = dev_get_by_index(&init_net, ifindex);
1275 if (!dev)
1276 return -ENODEV;
1277
1278 if (dev->type != ARPHRD_CAN) {
1279 dev_put(dev);
1280 return -ENODEV;
1281 }
1282
1283 dev_put(dev);
1284 }
1285 }
1286
1287 /* read message head information */
1288
Al Viro6ce8e9c2014-04-06 21:25:44 -04001289 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001290 if (ret < 0)
1291 return ret;
1292
1293 lock_sock(sk);
1294
1295 switch (msg_head.opcode) {
1296
1297 case TX_SETUP:
1298 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1299 break;
1300
1301 case RX_SETUP:
1302 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1303 break;
1304
1305 case TX_DELETE:
1306 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1307 ret = MHSIZ;
1308 else
1309 ret = -EINVAL;
1310 break;
1311
1312 case RX_DELETE:
1313 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1314 ret = MHSIZ;
1315 else
1316 ret = -EINVAL;
1317 break;
1318
1319 case TX_READ:
1320 /* reuse msg_head for the reply to TX_READ */
1321 msg_head.opcode = TX_STATUS;
1322 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1323 break;
1324
1325 case RX_READ:
1326 /* reuse msg_head for the reply to RX_READ */
1327 msg_head.opcode = RX_STATUS;
1328 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1329 break;
1330
1331 case TX_SEND:
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001332 /* we need exactly one can_frame behind the msg head */
1333 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001334 ret = -EINVAL;
1335 else
1336 ret = bcm_tx_send(msg, ifindex, sk);
1337 break;
1338
1339 default:
1340 ret = -EINVAL;
1341 break;
1342 }
1343
1344 release_sock(sk);
1345
1346 return ret;
1347}
1348
1349/*
1350 * notification handler for netdevice status changes
1351 */
1352static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
Jiri Pirko351638e2013-05-28 01:30:21 +00001353 void *ptr)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001354{
Jiri Pirko351638e2013-05-28 01:30:21 +00001355 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001356 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1357 struct sock *sk = &bo->sk;
1358 struct bcm_op *op;
1359 int notify_enodev = 0;
1360
YOSHIFUJI Hideaki721499e2008-07-19 22:34:43 -07001361 if (!net_eq(dev_net(dev), &init_net))
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001362 return NOTIFY_DONE;
1363
1364 if (dev->type != ARPHRD_CAN)
1365 return NOTIFY_DONE;
1366
1367 switch (msg) {
1368
1369 case NETDEV_UNREGISTER:
1370 lock_sock(sk);
1371
1372 /* remove device specific receive entries */
1373 list_for_each_entry(op, &bo->rx_ops, list)
1374 if (op->rx_reg_dev == dev)
1375 bcm_rx_unreg(dev, op);
1376
1377 /* remove device reference, if this is our bound device */
1378 if (bo->bound && bo->ifindex == dev->ifindex) {
1379 bo->bound = 0;
1380 bo->ifindex = 0;
1381 notify_enodev = 1;
1382 }
1383
1384 release_sock(sk);
1385
1386 if (notify_enodev) {
1387 sk->sk_err = ENODEV;
1388 if (!sock_flag(sk, SOCK_DEAD))
1389 sk->sk_error_report(sk);
1390 }
1391 break;
1392
1393 case NETDEV_DOWN:
1394 if (bo->bound && bo->ifindex == dev->ifindex) {
1395 sk->sk_err = ENETDOWN;
1396 if (!sock_flag(sk, SOCK_DEAD))
1397 sk->sk_error_report(sk);
1398 }
1399 }
1400
1401 return NOTIFY_DONE;
1402}
1403
1404/*
1405 * initial settings for all BCM sockets to be set at socket creation time
1406 */
1407static int bcm_init(struct sock *sk)
1408{
1409 struct bcm_sock *bo = bcm_sk(sk);
1410
1411 bo->bound = 0;
1412 bo->ifindex = 0;
1413 bo->dropped_usr_msgs = 0;
1414 bo->bcm_proc_read = NULL;
1415
1416 INIT_LIST_HEAD(&bo->tx_ops);
1417 INIT_LIST_HEAD(&bo->rx_ops);
1418
1419 /* set notifier */
1420 bo->notifier.notifier_call = bcm_notifier;
1421
1422 register_netdevice_notifier(&bo->notifier);
1423
1424 return 0;
1425}
1426
1427/*
1428 * standard socket functions
1429 */
1430static int bcm_release(struct socket *sock)
1431{
1432 struct sock *sk = sock->sk;
Dave Jonesc6914a62011-04-19 20:36:59 -07001433 struct bcm_sock *bo;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001434 struct bcm_op *op, *next;
1435
Dave Jonesc6914a62011-04-19 20:36:59 -07001436 if (sk == NULL)
1437 return 0;
1438
1439 bo = bcm_sk(sk);
1440
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001441 /* remove bcm_ops, timer, rx_unregister(), etc. */
1442
1443 unregister_netdevice_notifier(&bo->notifier);
1444
1445 lock_sock(sk);
1446
1447 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1448 bcm_remove_op(op);
1449
1450 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1451 /*
1452 * Don't care if we're bound or not (due to netdev problems)
1453 * can_rx_unregister() is always a save thing to do here.
1454 */
1455 if (op->ifindex) {
1456 /*
1457 * Only remove subscriptions that had not
1458 * been removed due to NETDEV_UNREGISTER
1459 * in bcm_notifier()
1460 */
1461 if (op->rx_reg_dev) {
1462 struct net_device *dev;
1463
1464 dev = dev_get_by_index(&init_net, op->ifindex);
1465 if (dev) {
1466 bcm_rx_unreg(dev, op);
1467 dev_put(dev);
1468 }
1469 }
1470 } else
1471 can_rx_unregister(NULL, op->can_id,
1472 REGMASK(op->can_id),
1473 bcm_rx_handler, op);
1474
1475 bcm_remove_op(op);
1476 }
1477
1478 /* remove procfs entry */
1479 if (proc_dir && bo->bcm_proc_read)
1480 remove_proc_entry(bo->procname, proc_dir);
1481
1482 /* remove device reference */
1483 if (bo->bound) {
1484 bo->bound = 0;
1485 bo->ifindex = 0;
1486 }
1487
Lothar Waßmannf7e5cc02009-07-14 23:10:21 +00001488 sock_orphan(sk);
1489 sock->sk = NULL;
1490
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001491 release_sock(sk);
1492 sock_put(sk);
1493
1494 return 0;
1495}
1496
1497static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1498 int flags)
1499{
1500 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1501 struct sock *sk = sock->sk;
1502 struct bcm_sock *bo = bcm_sk(sk);
1503
Changli Gao6503d962010-03-31 22:58:26 +00001504 if (len < sizeof(*addr))
1505 return -EINVAL;
1506
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001507 if (bo->bound)
1508 return -EISCONN;
1509
1510 /* bind a device to this socket */
1511 if (addr->can_ifindex) {
1512 struct net_device *dev;
1513
1514 dev = dev_get_by_index(&init_net, addr->can_ifindex);
1515 if (!dev)
1516 return -ENODEV;
1517
1518 if (dev->type != ARPHRD_CAN) {
1519 dev_put(dev);
1520 return -ENODEV;
1521 }
1522
1523 bo->ifindex = dev->ifindex;
1524 dev_put(dev);
1525
1526 } else {
1527 /* no interface reference for ifindex = 0 ('any' CAN device) */
1528 bo->ifindex = 0;
1529 }
1530
1531 bo->bound = 1;
1532
1533 if (proc_dir) {
1534 /* unique socket address as filename */
Dan Rosenberg9f260e02010-12-26 06:54:53 +00001535 sprintf(bo->procname, "%lu", sock_i_ino(sk));
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +00001536 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1537 proc_dir,
1538 &bcm_proc_fops, sk);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001539 }
1540
1541 return 0;
1542}
1543
Ying Xue1b784142015-03-02 15:37:48 +08001544static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1545 int flags)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001546{
1547 struct sock *sk = sock->sk;
1548 struct sk_buff *skb;
1549 int error = 0;
1550 int noblock;
1551 int err;
1552
1553 noblock = flags & MSG_DONTWAIT;
1554 flags &= ~MSG_DONTWAIT;
1555 skb = skb_recv_datagram(sk, flags, noblock, &error);
1556 if (!skb)
1557 return error;
1558
1559 if (skb->len < size)
1560 size = skb->len;
1561
Al Viro7eab8d92014-04-06 21:51:23 -04001562 err = memcpy_to_msg(msg, skb->data, size);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001563 if (err < 0) {
1564 skb_free_datagram(sk, skb);
1565 return err;
1566 }
1567
Neil Horman3b885782009-10-12 13:26:31 -07001568 sock_recv_ts_and_drops(msg, sk, skb);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001569
1570 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +01001571 __sockaddr_check_size(sizeof(struct sockaddr_can));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001572 msg->msg_namelen = sizeof(struct sockaddr_can);
1573 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1574 }
1575
1576 skb_free_datagram(sk, skb);
1577
1578 return size;
1579}
1580
Oliver Hartkopp53914b62011-03-22 08:27:25 +00001581static const struct proto_ops bcm_ops = {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001582 .family = PF_CAN,
1583 .release = bcm_release,
1584 .bind = sock_no_bind,
1585 .connect = bcm_connect,
1586 .socketpair = sock_no_socketpair,
1587 .accept = sock_no_accept,
1588 .getname = sock_no_getname,
1589 .poll = datagram_poll,
Oliver Hartkopp53914b62011-03-22 08:27:25 +00001590 .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001591 .listen = sock_no_listen,
1592 .shutdown = sock_no_shutdown,
1593 .setsockopt = sock_no_setsockopt,
1594 .getsockopt = sock_no_getsockopt,
1595 .sendmsg = bcm_sendmsg,
1596 .recvmsg = bcm_recvmsg,
1597 .mmap = sock_no_mmap,
1598 .sendpage = sock_no_sendpage,
1599};
1600
1601static struct proto bcm_proto __read_mostly = {
1602 .name = "CAN_BCM",
1603 .owner = THIS_MODULE,
1604 .obj_size = sizeof(struct bcm_sock),
1605 .init = bcm_init,
1606};
1607
Kurt Van Dijck16506292011-05-03 18:40:57 +00001608static const struct can_proto bcm_can_proto = {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001609 .type = SOCK_DGRAM,
1610 .protocol = CAN_BCM,
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001611 .ops = &bcm_ops,
1612 .prot = &bcm_proto,
1613};
1614
1615static int __init bcm_module_init(void)
1616{
1617 int err;
1618
Jeremiah Mahlerb111b782014-11-21 23:42:35 -08001619 pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n");
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001620
1621 err = can_proto_register(&bcm_can_proto);
1622 if (err < 0) {
1623 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1624 return err;
1625 }
1626
1627 /* create /proc/net/can-bcm directory */
1628 proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001629 return 0;
1630}
1631
1632static void __exit bcm_module_exit(void)
1633{
1634 can_proto_unregister(&bcm_can_proto);
1635
1636 if (proc_dir)
Gao fengece31ff2013-02-18 01:34:56 +00001637 remove_proc_entry("can-bcm", init_net.proc_net);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001638}
1639
1640module_init(bcm_module_init);
1641module_exit(bcm_module_exit);