blob: c1a8684ed1c8f7f8fb6cc4bdf08f1e504eb18e71 [file] [log] [blame]
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080029#include <linux/module.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/if_arp.h>
34#include <linux/if_ether.h>
35#include <linux/list.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080036#include <linux/io.h>
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +053037#include <linux/pm_runtime.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080038
39#include <linux/can.h>
40#include <linux/can/dev.h>
41#include <linux/can/error.h>
Fabio Baltieri5090f802012-12-18 18:51:01 +010042#include <linux/can/led.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080043
44#include "c_can.h"
45
AnilKumar Ch33f81002012-05-29 11:13:15 +053046/* Number of interface registers */
47#define IF_ENUM_REG_LEN 11
48#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
49
AnilKumar Ch82120032012-09-21 15:29:01 +053050/* control extension register D_CAN specific */
51#define CONTROL_EX_PDR BIT(8)
52
Bhupesh Sharma881ff672011-02-13 22:51:44 -080053/* control register */
54#define CONTROL_TEST BIT(7)
55#define CONTROL_CCE BIT(6)
56#define CONTROL_DISABLE_AR BIT(5)
57#define CONTROL_ENABLE_AR (0 << 5)
58#define CONTROL_EIE BIT(3)
59#define CONTROL_SIE BIT(2)
60#define CONTROL_IE BIT(1)
61#define CONTROL_INIT BIT(0)
62
63/* test register */
64#define TEST_RX BIT(7)
65#define TEST_TX1 BIT(6)
66#define TEST_TX2 BIT(5)
67#define TEST_LBACK BIT(4)
68#define TEST_SILENT BIT(3)
69#define TEST_BASIC BIT(2)
70
71/* status register */
AnilKumar Ch82120032012-09-21 15:29:01 +053072#define STATUS_PDA BIT(10)
Bhupesh Sharma881ff672011-02-13 22:51:44 -080073#define STATUS_BOFF BIT(7)
74#define STATUS_EWARN BIT(6)
75#define STATUS_EPASS BIT(5)
76#define STATUS_RXOK BIT(4)
77#define STATUS_TXOK BIT(3)
78
79/* error counter register */
80#define ERR_CNT_TEC_MASK 0xff
81#define ERR_CNT_TEC_SHIFT 0
82#define ERR_CNT_REC_SHIFT 8
83#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
84#define ERR_CNT_RP_SHIFT 15
85#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
86
87/* bit-timing register */
88#define BTR_BRP_MASK 0x3f
89#define BTR_BRP_SHIFT 0
90#define BTR_SJW_SHIFT 6
91#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
92#define BTR_TSEG1_SHIFT 8
93#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
94#define BTR_TSEG2_SHIFT 12
95#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
96
97/* brp extension register */
98#define BRP_EXT_BRPE_MASK 0x0f
99#define BRP_EXT_BRPE_SHIFT 0
100
101/* IFx command request */
102#define IF_COMR_BUSY BIT(15)
103
104/* IFx command mask */
105#define IF_COMM_WR BIT(7)
106#define IF_COMM_MASK BIT(6)
107#define IF_COMM_ARB BIT(5)
108#define IF_COMM_CONTROL BIT(4)
109#define IF_COMM_CLR_INT_PND BIT(3)
110#define IF_COMM_TXRQST BIT(2)
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000111#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800112#define IF_COMM_DATAA BIT(1)
113#define IF_COMM_DATAB BIT(0)
114#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
115 IF_COMM_CONTROL | IF_COMM_TXRQST | \
116 IF_COMM_DATAA | IF_COMM_DATAB)
117
Thomas Gleixnerc0a9f4d2014-03-18 17:19:13 +0000118/* For the low buffers we clear the interrupt bit, but keep newdat */
119#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
120 IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
121 IF_COMM_DATAA | IF_COMM_DATAB)
122
123/* For the high buffers we clear the interrupt bit and newdat */
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000124#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
Thomas Gleixnerc0a9f4d2014-03-18 17:19:13 +0000125
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800126/* IFx arbitration */
127#define IF_ARB_MSGVAL BIT(15)
128#define IF_ARB_MSGXTD BIT(14)
129#define IF_ARB_TRANSMIT BIT(13)
130
131/* IFx message control */
132#define IF_MCONT_NEWDAT BIT(15)
133#define IF_MCONT_MSGLST BIT(14)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800134#define IF_MCONT_INTPND BIT(13)
135#define IF_MCONT_UMASK BIT(12)
136#define IF_MCONT_TXIE BIT(11)
137#define IF_MCONT_RXIE BIT(10)
138#define IF_MCONT_RMTEN BIT(9)
139#define IF_MCONT_TXRQST BIT(8)
140#define IF_MCONT_EOB BIT(7)
141#define IF_MCONT_DLC_MASK 0xf
142
143/*
Thomas Gleixner640916d2014-03-18 17:19:09 +0000144 * Use IF1 for RX and IF2 for TX
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800145 */
Thomas Gleixner640916d2014-03-18 17:19:09 +0000146#define IF_RX 0
147#define IF_TX 1
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800148
149/* status interrupt */
150#define STATUS_INTERRUPT 0x8000
151
152/* global interrupt masks */
153#define ENABLE_ALL_INTERRUPTS 1
154#define DISABLE_ALL_INTERRUPTS 0
155
156/* minimum timeout for checking BUSY status */
157#define MIN_TIMEOUT_VALUE 6
158
AnilKumar Ch82120032012-09-21 15:29:01 +0530159/* Wait for ~1 sec for INIT bit */
160#define INIT_WAIT_MS 1000
161
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800162/* napi related */
163#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
164
165/* c_can lec values */
166enum c_can_lec_type {
167 LEC_NO_ERROR = 0,
168 LEC_STUFF_ERROR,
169 LEC_FORM_ERROR,
170 LEC_ACK_ERROR,
171 LEC_BIT1_ERROR,
172 LEC_BIT0_ERROR,
173 LEC_CRC_ERROR,
174 LEC_UNUSED,
Thomas Gleixner097aec12014-04-11 08:13:13 +0000175 LEC_MASK = LEC_UNUSED,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800176};
177
178/*
179 * c_can error types:
180 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
181 */
182enum c_can_bus_error_types {
183 C_CAN_NO_ERROR = 0,
184 C_CAN_BUS_OFF,
185 C_CAN_ERROR_WARNING,
186 C_CAN_ERROR_PASSIVE,
187};
188
Marc Kleine-Budde194b9a42012-07-16 12:58:31 +0200189static const struct can_bittiming_const c_can_bittiming_const = {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800190 .name = KBUILD_MODNAME,
191 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
192 .tseg1_max = 16,
193 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
194 .tseg2_max = 8,
195 .sjw_max = 4,
196 .brp_min = 1,
197 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
198 .brp_inc = 1,
199};
200
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530201static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
202{
203 if (priv->device)
204 pm_runtime_enable(priv->device);
205}
206
207static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
208{
209 if (priv->device)
210 pm_runtime_disable(priv->device);
211}
212
213static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
214{
215 if (priv->device)
216 pm_runtime_get_sync(priv->device);
217}
218
219static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
220{
221 if (priv->device)
222 pm_runtime_put_sync(priv->device);
223}
224
AnilKumar Ch52cde852012-11-21 11:14:10 +0530225static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
226{
227 if (priv->raminit)
228 priv->raminit(priv, enable);
229}
230
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800231static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
232{
233 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
234 C_CAN_MSG_OBJ_TX_FIRST;
235}
236
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000237static inline int get_tx_echo_msg_obj(int txecho)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800238{
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000239 return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800240}
241
AnilKumar Ch33f81002012-05-29 11:13:15 +0530242static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800243{
AnilKumar Ch33f81002012-05-29 11:13:15 +0530244 u32 val = priv->read_reg(priv, index);
245 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800246 return val;
247}
248
249static void c_can_enable_all_interrupts(struct c_can_priv *priv,
250 int enable)
251{
252 unsigned int cntrl_save = priv->read_reg(priv,
AnilKumar Ch33f81002012-05-29 11:13:15 +0530253 C_CAN_CTRL_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800254
255 if (enable)
256 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
257 else
258 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
259
AnilKumar Ch33f81002012-05-29 11:13:15 +0530260 priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800261}
262
263static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
264{
265 int count = MIN_TIMEOUT_VALUE;
266
267 while (count && priv->read_reg(priv,
AnilKumar Ch33f81002012-05-29 11:13:15 +0530268 C_CAN_IFACE(COMREQ_REG, iface)) &
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800269 IF_COMR_BUSY) {
270 count--;
271 udelay(1);
272 }
273
274 if (!count)
275 return 1;
276
277 return 0;
278}
279
280static inline void c_can_object_get(struct net_device *dev,
281 int iface, int objno, int mask)
282{
283 struct c_can_priv *priv = netdev_priv(dev);
284
285 /*
286 * As per specs, after writting the message object number in the
287 * IF command request register the transfer b/w interface
288 * register and message RAM must be complete in 6 CAN-CLK
289 * period.
290 */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530291 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800292 IFX_WRITE_LOW_16BIT(mask));
AnilKumar Ch33f81002012-05-29 11:13:15 +0530293 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800294 IFX_WRITE_LOW_16BIT(objno));
295
296 if (c_can_msg_obj_is_busy(priv, iface))
297 netdev_err(dev, "timed out in object get\n");
298}
299
300static inline void c_can_object_put(struct net_device *dev,
301 int iface, int objno, int mask)
302{
303 struct c_can_priv *priv = netdev_priv(dev);
304
305 /*
306 * As per specs, after writting the message object number in the
307 * IF command request register the transfer b/w interface
308 * register and message RAM must be complete in 6 CAN-CLK
309 * period.
310 */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530311 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800312 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
AnilKumar Ch33f81002012-05-29 11:13:15 +0530313 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800314 IFX_WRITE_LOW_16BIT(objno));
315
316 if (c_can_msg_obj_is_busy(priv, iface))
317 netdev_err(dev, "timed out in object put\n");
318}
319
320static void c_can_write_msg_object(struct net_device *dev,
321 int iface, struct can_frame *frame, int objno)
322{
323 int i;
324 u16 flags = 0;
325 unsigned int id;
326 struct c_can_priv *priv = netdev_priv(dev);
327
328 if (!(frame->can_id & CAN_RTR_FLAG))
329 flags |= IF_ARB_TRANSMIT;
330
331 if (frame->can_id & CAN_EFF_FLAG) {
332 id = frame->can_id & CAN_EFF_MASK;
333 flags |= IF_ARB_MSGXTD;
334 } else
335 id = ((frame->can_id & CAN_SFF_MASK) << 18);
336
337 flags |= IF_ARB_MSGVAL;
338
AnilKumar Ch33f81002012-05-29 11:13:15 +0530339 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800340 IFX_WRITE_LOW_16BIT(id));
AnilKumar Ch33f81002012-05-29 11:13:15 +0530341 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800342 IFX_WRITE_HIGH_16BIT(id));
343
344 for (i = 0; i < frame->can_dlc; i += 2) {
AnilKumar Ch33f81002012-05-29 11:13:15 +0530345 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800346 frame->data[i] | (frame->data[i + 1] << 8));
347 }
348
349 /* enable interrupt for this message object */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530350 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800351 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
352 frame->can_dlc);
353 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
354}
355
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800356static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000357 int iface)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800358{
359 int i;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800360
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000361 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
362 c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800363}
364
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000365static int c_can_handle_lost_msg_obj(struct net_device *dev,
366 int iface, int objno, u32 ctrl)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800367{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800368 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000369 struct c_can_priv *priv = netdev_priv(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800370 struct can_frame *frame;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000371 struct sk_buff *skb;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800372
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000373 ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
374 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
Thomas Gleixner640916d2014-03-18 17:19:09 +0000375 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800376
Thomas Gleixner1da394d2014-04-11 08:13:13 +0000377 stats->rx_errors++;
378 stats->rx_over_errors++;
379
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800380 /* create an error msg */
381 skb = alloc_can_err_skb(dev, &frame);
382 if (unlikely(!skb))
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000383 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800384
385 frame->can_id |= CAN_ERR_CRTL;
386 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800387
388 netif_receive_skb(skb);
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000389 return 1;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800390}
391
392static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
393{
394 u16 flags, data;
395 int i;
396 unsigned int val;
397 struct c_can_priv *priv = netdev_priv(dev);
398 struct net_device_stats *stats = &dev->stats;
399 struct sk_buff *skb;
400 struct can_frame *frame;
401
402 skb = alloc_can_skb(dev, &frame);
403 if (!skb) {
404 stats->rx_dropped++;
405 return -ENOMEM;
406 }
407
408 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
409
AnilKumar Ch33f81002012-05-29 11:13:15 +0530410 flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
411 val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800412 (flags << 16);
413
414 if (flags & IF_ARB_MSGXTD)
415 frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
416 else
417 frame->can_id = (val >> 18) & CAN_SFF_MASK;
418
419 if (flags & IF_ARB_TRANSMIT)
420 frame->can_id |= CAN_RTR_FLAG;
421 else {
422 for (i = 0; i < frame->can_dlc; i += 2) {
423 data = priv->read_reg(priv,
AnilKumar Ch33f81002012-05-29 11:13:15 +0530424 C_CAN_IFACE(DATA1_REG, iface) + i / 2);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800425 frame->data[i] = data;
426 frame->data[i + 1] = data >> 8;
427 }
428 }
429
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800430 stats->rx_packets++;
431 stats->rx_bytes += frame->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000432
433 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800434 return 0;
435}
436
437static void c_can_setup_receive_object(struct net_device *dev, int iface,
438 int objno, unsigned int mask,
439 unsigned int id, unsigned int mcont)
440{
441 struct c_can_priv *priv = netdev_priv(dev);
442
AnilKumar Ch33f81002012-05-29 11:13:15 +0530443 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800444 IFX_WRITE_LOW_16BIT(mask));
Alexander Stein2bd3bc42012-12-13 10:06:10 +0100445
446 /* According to C_CAN documentation, the reserved bit
447 * in IFx_MASK2 register is fixed 1
448 */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530449 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
Alexander Stein2bd3bc42012-12-13 10:06:10 +0100450 IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800451
AnilKumar Ch33f81002012-05-29 11:13:15 +0530452 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800453 IFX_WRITE_LOW_16BIT(id));
AnilKumar Ch33f81002012-05-29 11:13:15 +0530454 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800455 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
456
AnilKumar Ch33f81002012-05-29 11:13:15 +0530457 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800458 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
459
460 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
AnilKumar Ch33f81002012-05-29 11:13:15 +0530461 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800462}
463
464static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
465{
466 struct c_can_priv *priv = netdev_priv(dev);
467
AnilKumar Ch33f81002012-05-29 11:13:15 +0530468 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
469 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
470 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800471
472 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
473
474 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
AnilKumar Ch33f81002012-05-29 11:13:15 +0530475 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800476}
477
478static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
479{
AnilKumar Ch33f81002012-05-29 11:13:15 +0530480 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800481
482 /*
483 * as transmission request register's bit n-1 corresponds to
484 * message object n, we need to handle the same properly.
485 */
486 if (val & (1 << (objno - 1)))
487 return 1;
488
489 return 0;
490}
491
492static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
493 struct net_device *dev)
494{
495 u32 msg_obj_no;
496 struct c_can_priv *priv = netdev_priv(dev);
497 struct can_frame *frame = (struct can_frame *)skb->data;
498
499 if (can_dropped_invalid_skb(dev, skb))
500 return NETDEV_TX_OK;
501
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000502 spin_lock_bh(&priv->xmit_lock);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800503 msg_obj_no = get_tx_next_msg_obj(priv);
504
505 /* prepare message object for transmission */
Thomas Gleixner640916d2014-03-18 17:19:09 +0000506 c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
Thomas Gleixner90247002014-03-18 17:19:14 +0000507 priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800508 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
509
510 /*
511 * we have to stop the queue in case of a wrap around or
512 * if the next TX message object is still in use
513 */
514 priv->tx_next++;
515 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
516 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
517 netif_stop_queue(dev);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000518 spin_unlock_bh(&priv->xmit_lock);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800519
520 return NETDEV_TX_OK;
521}
522
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000523static int c_can_wait_for_ctrl_init(struct net_device *dev,
524 struct c_can_priv *priv, u32 init)
525{
526 int retry = 0;
527
528 while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
529 udelay(10);
530 if (retry++ > 1000) {
531 netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
532 return -EIO;
533 }
534 }
535 return 0;
536}
537
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800538static int c_can_set_bittiming(struct net_device *dev)
539{
540 unsigned int reg_btr, reg_brpe, ctrl_save;
541 u8 brp, brpe, sjw, tseg1, tseg2;
542 u32 ten_bit_brp;
543 struct c_can_priv *priv = netdev_priv(dev);
544 const struct can_bittiming *bt = &priv->can.bittiming;
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000545 int res;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800546
547 /* c_can provides a 6-bit brp and 4-bit brpe fields */
548 ten_bit_brp = bt->brp - 1;
549 brp = ten_bit_brp & BTR_BRP_MASK;
550 brpe = ten_bit_brp >> 6;
551
552 sjw = bt->sjw - 1;
553 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
554 tseg2 = bt->phase_seg2 - 1;
555 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
556 (tseg2 << BTR_TSEG2_SHIFT);
557 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
558
559 netdev_info(dev,
560 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
561
AnilKumar Ch33f81002012-05-29 11:13:15 +0530562 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000563 ctrl_save &= ~CONTROL_INIT;
564 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
565 res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
566 if (res)
567 return res;
568
AnilKumar Ch33f81002012-05-29 11:13:15 +0530569 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
570 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
571 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800572
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000573 return c_can_wait_for_ctrl_init(dev, priv, 0);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800574}
575
576/*
577 * Configure C_CAN message objects for Tx and Rx purposes:
578 * C_CAN provides a total of 32 message objects that can be configured
579 * either for Tx or Rx purposes. Here the first 16 message objects are used as
580 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
581 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
582 * See user guide document for further details on configuring message
583 * objects.
584 */
585static void c_can_configure_msg_objects(struct net_device *dev)
586{
587 int i;
588
589 /* first invalidate all message objects */
590 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
Thomas Gleixner640916d2014-03-18 17:19:09 +0000591 c_can_inval_msg_object(dev, IF_RX, i);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800592
593 /* setup receive message objects */
594 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
Thomas Gleixner640916d2014-03-18 17:19:09 +0000595 c_can_setup_receive_object(dev, IF_RX, i, 0, 0,
Thomas Gleixnerfa39b542014-04-11 08:13:15 +0000596 IF_MCONT_RXIE | IF_MCONT_UMASK);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800597
Thomas Gleixner640916d2014-03-18 17:19:09 +0000598 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800599 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
600}
601
602/*
603 * Configure C_CAN chip:
604 * - enable/disable auto-retransmission
605 * - set operating mode
606 * - configure message objects
607 */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100608static int c_can_chip_config(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800609{
610 struct c_can_priv *priv = netdev_priv(dev);
611
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +0000612 /* enable automatic retransmission */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000613 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800614
Dan Carpenterd9cb9bd2012-06-15 00:20:44 +0000615 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
616 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800617 /* loopback + silent mode : useful for hot self-test */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000618 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
619 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800620 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
621 /* loopback mode : useful for self-test function */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000622 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530623 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800624 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
625 /* silent mode : bus-monitoring mode */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000626 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530627 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000628 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800629
630 /* configure message objects */
631 c_can_configure_msg_objects(dev);
632
633 /* set a `lec` value so that we can check for updates later */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530634 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800635
636 /* set bittiming params */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100637 return c_can_set_bittiming(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800638}
639
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100640static int c_can_start(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800641{
642 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100643 int err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800644
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800645 /* basic c_can configuration */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100646 err = c_can_chip_config(dev);
647 if (err)
648 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800649
650 priv->can.state = CAN_STATE_ERROR_ACTIVE;
651
Thomas Gleixnerfa39b542014-04-11 08:13:15 +0000652 /* reset tx helper pointers and the rx mask */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800653 priv->tx_next = priv->tx_echo = 0;
Thomas Gleixnerfa39b542014-04-11 08:13:15 +0000654 priv->rxmasked = 0;
Jan Altenberg4f2d56c2011-03-21 18:19:26 -0700655
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100656 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800657}
658
659static void c_can_stop(struct net_device *dev)
660{
661 struct c_can_priv *priv = netdev_priv(dev);
662
663 /* disable all interrupts */
664 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
665
666 /* set the state as STOPPED */
667 priv->can.state = CAN_STATE_STOPPED;
668}
669
670static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
671{
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000672 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100673 int err;
674
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800675 switch (mode) {
676 case CAN_MODE_START:
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100677 err = c_can_start(dev);
678 if (err)
679 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800680 netif_wake_queue(dev);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000681 /* enable status change, error and module interrupts */
682 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800683 break;
684 default:
685 return -EOPNOTSUPP;
686 }
687
688 return 0;
689}
690
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100691static int __c_can_get_berr_counter(const struct net_device *dev,
692 struct can_berr_counter *bec)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800693{
694 unsigned int reg_err_counter;
695 struct c_can_priv *priv = netdev_priv(dev);
696
AnilKumar Ch33f81002012-05-29 11:13:15 +0530697 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800698 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
699 ERR_CNT_REC_SHIFT;
700 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
701
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100702 return 0;
703}
704
705static int c_can_get_berr_counter(const struct net_device *dev,
706 struct can_berr_counter *bec)
707{
708 struct c_can_priv *priv = netdev_priv(dev);
709 int err;
710
711 c_can_pm_runtime_get_sync(priv);
712 err = __c_can_get_berr_counter(dev, bec);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530713 c_can_pm_runtime_put_sync(priv);
714
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100715 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800716}
717
718/*
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800719 * priv->tx_echo holds the number of the oldest can_frame put for
720 * transmission into the hardware, but not yet ACKed by the CAN tx
721 * complete IRQ.
722 *
723 * We iterate from priv->tx_echo to priv->tx_next and check if the
724 * packet has been transmitted, echo it back to the CAN framework.
AnilKumar Ch617cacc2012-05-23 17:45:09 +0530725 * If we discover a not yet transmitted packet, stop looking for more.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800726 */
727static void c_can_do_tx(struct net_device *dev)
728{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800729 struct c_can_priv *priv = netdev_priv(dev);
730 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000731 u32 val, obj, pkts = 0, bytes = 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800732
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000733 spin_lock_bh(&priv->xmit_lock);
734
735 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000736 obj = get_tx_echo_msg_obj(priv->tx_echo);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530737 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000738
739 if (val & (1 << (obj - 1)))
AnilKumar Ch617cacc2012-05-23 17:45:09 +0530740 break;
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000741
742 can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
743 bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
744 pkts++;
745 c_can_inval_msg_object(dev, IF_TX, obj);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800746 }
747
748 /* restart queue if wrap-up or if queue stalled on last pkt */
749 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
750 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
751 netif_wake_queue(dev);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000752
753 spin_unlock_bh(&priv->xmit_lock);
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000754
755 if (pkts) {
756 stats->tx_bytes += bytes;
757 stats->tx_packets += pkts;
758 can_led_event(dev, CAN_LED_EVENT_TX);
759 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800760}
761
762/*
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000763 * If we have a gap in the pending bits, that means we either
764 * raced with the hardware or failed to readout all upper
765 * objects in the last run due to quota limit.
766 */
767static u32 c_can_adjust_pending(u32 pend)
768{
769 u32 weight, lasts;
770
771 if (pend == RECEIVE_OBJECT_BITS)
772 return pend;
773
774 /*
775 * If the last set bit is larger than the number of pending
776 * bits we have a gap.
777 */
778 weight = hweight32(pend);
779 lasts = fls(pend);
780
781 /* If the bits are linear, nothing to do */
782 if (lasts == weight)
783 return pend;
784
785 /*
786 * Find the first set bit after the gap. We walk backwards
787 * from the last set bit.
788 */
789 for (lasts--; pend & (1 << (lasts - 1)); lasts--);
790
791 return pend & ~((1 << lasts) - 1);
792}
793
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000794static inline void c_can_rx_object_get(struct net_device *dev, u32 obj)
795{
796#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
797 if (obj < C_CAN_MSG_RX_LOW_LAST)
798 c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
799 else
800#endif
801 c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_HIGH);
802}
803
804static inline void c_can_rx_finalize(struct net_device *dev,
805 struct c_can_priv *priv, u32 obj)
806{
807#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
808 if (obj < C_CAN_MSG_RX_LOW_LAST)
809 priv->rxmasked |= BIT(obj - 1);
810 else if (obj == C_CAN_MSG_RX_LOW_LAST) {
811 priv->rxmasked = 0;
812 /* activate all lower message objects */
813 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
814 }
815#endif
816}
817
Thomas Gleixner520f5702014-03-18 19:27:42 +0100818static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
819 u32 pend, int quota)
820{
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000821 u32 pkts = 0, ctrl, obj;
Thomas Gleixner520f5702014-03-18 19:27:42 +0100822
823 while ((obj = ffs(pend)) && quota > 0) {
824 pend &= ~BIT(obj - 1);
825
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000826 c_can_rx_object_get(dev, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100827 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
828
829 if (ctrl & IF_MCONT_MSGLST) {
830 int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
831
832 pkts += n;
833 quota -= n;
834 continue;
835 }
836
837 /*
838 * This really should not happen, but this covers some
839 * odd HW behaviour. Do not remove that unless you
840 * want to brick your machine.
841 */
842 if (!(ctrl & IF_MCONT_NEWDAT))
843 continue;
844
845 /* read the data from the message object */
846 c_can_read_msg_object(dev, IF_RX, ctrl);
847
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000848 c_can_rx_finalize(dev, priv, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100849
850 pkts++;
851 quota--;
852 }
853
854 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800855}
856
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000857static inline u32 c_can_get_pending(struct c_can_priv *priv)
858{
859 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
860
861#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
862 pend &= ~priv->rxmasked;
863#endif
864 return pend;
865}
866
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800867/*
868 * theory of operation:
869 *
870 * c_can core saves a received CAN message into the first free message
871 * object it finds free (starting with the lowest). Bits NEWDAT and
872 * INTPND are set for this message object indicating that a new message
873 * has arrived. To work-around this issue, we keep two groups of message
874 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
875 *
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000876 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
877 *
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800878 * To ensure in-order frame reception we use the following
879 * approach while re-activating a message object to receive further
880 * frames:
881 * - if the current message object number is lower than
882 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
883 * the INTPND bit.
884 * - if the current message object number is equal to
885 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
886 * receive message objects.
887 * - if the current message object number is greater than
888 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
889 * only this message object.
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000890 *
891 * This can cause packet loss!
892 *
893 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
894 *
895 * We clear the newdat bit right away.
896 *
897 * This can result in packet reordering when the readout is slow.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800898 */
899static int c_can_do_rx_poll(struct net_device *dev, int quota)
900{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800901 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100902 u32 pkts = 0, pend = 0, toread, n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800903
Markus Pargmann4ce78a82013-11-01 10:36:36 +0100904 /*
905 * It is faster to read only one 16bit register. This is only possible
906 * for a maximum number of 16 objects.
907 */
908 BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
909 "Implementation does not support more message objects than 16");
910
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000911 while (quota > 0) {
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000912 if (!pend) {
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000913 pend = c_can_get_pending(priv);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000914 if (!pend)
Thomas Gleixner520f5702014-03-18 19:27:42 +0100915 break;
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000916 /*
917 * If the pending field has a gap, handle the
918 * bits above the gap first.
919 */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100920 toread = c_can_adjust_pending(pend);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000921 } else {
Thomas Gleixner520f5702014-03-18 19:27:42 +0100922 toread = pend;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800923 }
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000924 /* Remove the bits from pend */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100925 pend &= ~toread;
926 /* Read the objects */
927 n = c_can_read_objects(dev, priv, toread, quota);
928 pkts += n;
929 quota -= n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800930 }
931
Thomas Gleixnerb1d8e432014-03-18 17:19:15 +0000932 if (pkts)
933 can_led_event(dev, CAN_LED_EVENT_RX);
934
Thomas Gleixner520f5702014-03-18 19:27:42 +0100935 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800936}
937
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800938static int c_can_handle_state_change(struct net_device *dev,
939 enum c_can_bus_error_types error_type)
940{
941 unsigned int reg_err_counter;
942 unsigned int rx_err_passive;
943 struct c_can_priv *priv = netdev_priv(dev);
944 struct net_device_stats *stats = &dev->stats;
945 struct can_frame *cf;
946 struct sk_buff *skb;
947 struct can_berr_counter bec;
948
Thomas Gleixnerf058d542014-04-11 08:13:12 +0000949 switch (error_type) {
950 case C_CAN_ERROR_WARNING:
951 /* error warning state */
952 priv->can.can_stats.error_warning++;
953 priv->can.state = CAN_STATE_ERROR_WARNING;
954 break;
955 case C_CAN_ERROR_PASSIVE:
956 /* error passive state */
957 priv->can.can_stats.error_passive++;
958 priv->can.state = CAN_STATE_ERROR_PASSIVE;
959 break;
960 case C_CAN_BUS_OFF:
961 /* bus-off state */
962 priv->can.state = CAN_STATE_BUS_OFF;
963 can_bus_off(dev);
964 break;
965 default:
966 break;
967 }
968
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300969 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800970 skb = alloc_can_err_skb(dev, &cf);
971 if (unlikely(!skb))
972 return 0;
973
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100974 __c_can_get_berr_counter(dev, &bec);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530975 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800976 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
977 ERR_CNT_RP_SHIFT;
978
979 switch (error_type) {
980 case C_CAN_ERROR_WARNING:
981 /* error warning state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800982 cf->can_id |= CAN_ERR_CRTL;
983 cf->data[1] = (bec.txerr > bec.rxerr) ?
984 CAN_ERR_CRTL_TX_WARNING :
985 CAN_ERR_CRTL_RX_WARNING;
986 cf->data[6] = bec.txerr;
987 cf->data[7] = bec.rxerr;
988
989 break;
990 case C_CAN_ERROR_PASSIVE:
991 /* error passive state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800992 cf->can_id |= CAN_ERR_CRTL;
993 if (rx_err_passive)
994 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
995 if (bec.txerr > 127)
996 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
997
998 cf->data[6] = bec.txerr;
999 cf->data[7] = bec.rxerr;
1000 break;
1001 case C_CAN_BUS_OFF:
1002 /* bus-off state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001003 cf->can_id |= CAN_ERR_BUSOFF;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001004 can_bus_off(dev);
1005 break;
1006 default:
1007 break;
1008 }
1009
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001010 stats->rx_packets++;
1011 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +00001012 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001013
1014 return 1;
1015}
1016
1017static int c_can_handle_bus_err(struct net_device *dev,
1018 enum c_can_lec_type lec_type)
1019{
1020 struct c_can_priv *priv = netdev_priv(dev);
1021 struct net_device_stats *stats = &dev->stats;
1022 struct can_frame *cf;
1023 struct sk_buff *skb;
1024
1025 /*
1026 * early exit if no lec update or no error.
1027 * no lec update means that no CAN bus event has been detected
1028 * since CPU wrote 0x7 value to status reg.
1029 */
1030 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
1031 return 0;
1032
Thomas Gleixner097aec12014-04-11 08:13:13 +00001033 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
1034 return 0;
1035
Thomas Gleixner1da394d2014-04-11 08:13:13 +00001036 /* common for all type of bus errors */
1037 priv->can.can_stats.bus_error++;
1038 stats->rx_errors++;
1039
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001040 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001041 skb = alloc_can_err_skb(dev, &cf);
1042 if (unlikely(!skb))
1043 return 0;
1044
1045 /*
1046 * check for 'last error code' which tells us the
1047 * type of the last error to occur on the CAN bus
1048 */
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001049 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1050 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1051
1052 switch (lec_type) {
1053 case LEC_STUFF_ERROR:
1054 netdev_dbg(dev, "stuff error\n");
1055 cf->data[2] |= CAN_ERR_PROT_STUFF;
1056 break;
1057 case LEC_FORM_ERROR:
1058 netdev_dbg(dev, "form error\n");
1059 cf->data[2] |= CAN_ERR_PROT_FORM;
1060 break;
1061 case LEC_ACK_ERROR:
1062 netdev_dbg(dev, "ack error\n");
Olivier Sobrie6ea45882013-01-18 09:32:39 +01001063 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001064 CAN_ERR_PROT_LOC_ACK_DEL);
1065 break;
1066 case LEC_BIT1_ERROR:
1067 netdev_dbg(dev, "bit1 error\n");
1068 cf->data[2] |= CAN_ERR_PROT_BIT1;
1069 break;
1070 case LEC_BIT0_ERROR:
1071 netdev_dbg(dev, "bit0 error\n");
1072 cf->data[2] |= CAN_ERR_PROT_BIT0;
1073 break;
1074 case LEC_CRC_ERROR:
1075 netdev_dbg(dev, "CRC error\n");
Olivier Sobrie6ea45882013-01-18 09:32:39 +01001076 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001077 CAN_ERR_PROT_LOC_CRC_DEL);
1078 break;
1079 default:
1080 break;
1081 }
1082
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001083 stats->rx_packets++;
1084 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +00001085 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001086 return 1;
1087}
1088
1089static int c_can_poll(struct napi_struct *napi, int quota)
1090{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001091 struct net_device *dev = napi->dev;
1092 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001093 u16 curr, last = priv->last_status;
1094 int work_done = 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001095
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001096 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1097 /* Ack status on C_CAN. D_CAN is self clearing */
1098 if (priv->type != BOSCH_D_CAN)
1099 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001100
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001101 /* handle state changes */
1102 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1103 netdev_dbg(dev, "entered error warning state\n");
1104 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001105 }
1106
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001107 if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1108 netdev_dbg(dev, "entered error passive state\n");
1109 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1110 }
1111
1112 if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1113 netdev_dbg(dev, "entered bus off state\n");
1114 work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1115 goto end;
1116 }
1117
1118 /* handle bus recovery events */
1119 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1120 netdev_dbg(dev, "left bus off state\n");
1121 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1122 }
1123 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1124 netdev_dbg(dev, "left error passive state\n");
1125 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1126 }
1127
1128 /* handle lec errors on the bus */
1129 work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1130
1131 /* Handle Tx/Rx events. We do this unconditionally */
1132 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1133 c_can_do_tx(dev);
1134
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001135end:
1136 if (work_done < quota) {
1137 napi_complete(napi);
Thomas Gleixneref1d2e22014-04-11 08:13:11 +00001138 /* enable all IRQs if we are not in bus off state */
1139 if (priv->can.state != CAN_STATE_BUS_OFF)
1140 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001141 }
1142
1143 return work_done;
1144}
1145
1146static irqreturn_t c_can_isr(int irq, void *dev_id)
1147{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001148 struct net_device *dev = (struct net_device *)dev_id;
1149 struct c_can_priv *priv = netdev_priv(dev);
1150
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001151 if (!priv->read_reg(priv, C_CAN_INT_REG))
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001152 return IRQ_NONE;
1153
1154 /* disable all interrupts and schedule the NAPI */
1155 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1156 napi_schedule(&priv->napi);
1157
1158 return IRQ_HANDLED;
1159}
1160
1161static int c_can_open(struct net_device *dev)
1162{
1163 int err;
1164 struct c_can_priv *priv = netdev_priv(dev);
1165
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301166 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301167 c_can_reset_ram(priv, true);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301168
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001169 /* open the can device */
1170 err = open_candev(dev);
1171 if (err) {
1172 netdev_err(dev, "failed to open can device\n");
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301173 goto exit_open_fail;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001174 }
1175
1176 /* register interrupt handler */
1177 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1178 dev);
1179 if (err < 0) {
1180 netdev_err(dev, "failed to request interrupt\n");
1181 goto exit_irq_fail;
1182 }
1183
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001184 /* start the c_can controller */
1185 err = c_can_start(dev);
1186 if (err)
1187 goto exit_start_fail;
AnilKumar Chf461f272012-05-23 17:45:11 +05301188
Fabio Baltieri5090f802012-12-18 18:51:01 +01001189 can_led_event(dev, CAN_LED_EVENT_OPEN);
1190
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001191 napi_enable(&priv->napi);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001192 /* enable status change, error and module interrupts */
1193 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001194 netif_start_queue(dev);
1195
1196 return 0;
1197
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001198exit_start_fail:
1199 free_irq(dev->irq, dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001200exit_irq_fail:
1201 close_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301202exit_open_fail:
AnilKumar Ch52cde852012-11-21 11:14:10 +05301203 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301204 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001205 return err;
1206}
1207
1208static int c_can_close(struct net_device *dev)
1209{
1210 struct c_can_priv *priv = netdev_priv(dev);
1211
1212 netif_stop_queue(dev);
1213 napi_disable(&priv->napi);
1214 c_can_stop(dev);
1215 free_irq(dev->irq, dev);
1216 close_candev(dev);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301217
1218 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301219 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001220
Fabio Baltieri5090f802012-12-18 18:51:01 +01001221 can_led_event(dev, CAN_LED_EVENT_STOP);
1222
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001223 return 0;
1224}
1225
1226struct net_device *alloc_c_can_dev(void)
1227{
1228 struct net_device *dev;
1229 struct c_can_priv *priv;
1230
1231 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1232 if (!dev)
1233 return NULL;
1234
1235 priv = netdev_priv(dev);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +00001236 spin_lock_init(&priv->xmit_lock);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001237 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1238
1239 priv->dev = dev;
1240 priv->can.bittiming_const = &c_can_bittiming_const;
1241 priv->can.do_set_mode = c_can_set_mode;
1242 priv->can.do_get_berr_counter = c_can_get_berr_counter;
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +00001243 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001244 CAN_CTRLMODE_LISTENONLY |
1245 CAN_CTRLMODE_BERR_REPORTING;
1246
1247 return dev;
1248}
1249EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1250
AnilKumar Ch82120032012-09-21 15:29:01 +05301251#ifdef CONFIG_PM
1252int c_can_power_down(struct net_device *dev)
1253{
1254 u32 val;
1255 unsigned long time_out;
1256 struct c_can_priv *priv = netdev_priv(dev);
1257
1258 if (!(dev->flags & IFF_UP))
1259 return 0;
1260
1261 WARN_ON(priv->type != BOSCH_D_CAN);
1262
1263 /* set PDR value so the device goes to power down mode */
1264 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1265 val |= CONTROL_EX_PDR;
1266 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1267
1268 /* Wait for the PDA bit to get set */
1269 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1270 while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1271 time_after(time_out, jiffies))
1272 cpu_relax();
1273
1274 if (time_after(jiffies, time_out))
1275 return -ETIMEDOUT;
1276
1277 c_can_stop(dev);
1278
AnilKumar Ch52cde852012-11-21 11:14:10 +05301279 c_can_reset_ram(priv, false);
AnilKumar Ch82120032012-09-21 15:29:01 +05301280 c_can_pm_runtime_put_sync(priv);
1281
1282 return 0;
1283}
1284EXPORT_SYMBOL_GPL(c_can_power_down);
1285
1286int c_can_power_up(struct net_device *dev)
1287{
1288 u32 val;
1289 unsigned long time_out;
1290 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001291 int ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301292
1293 if (!(dev->flags & IFF_UP))
1294 return 0;
1295
1296 WARN_ON(priv->type != BOSCH_D_CAN);
1297
1298 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301299 c_can_reset_ram(priv, true);
AnilKumar Ch82120032012-09-21 15:29:01 +05301300
1301 /* Clear PDR and INIT bits */
1302 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1303 val &= ~CONTROL_EX_PDR;
1304 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1305 val = priv->read_reg(priv, C_CAN_CTRL_REG);
1306 val &= ~CONTROL_INIT;
1307 priv->write_reg(priv, C_CAN_CTRL_REG, val);
1308
1309 /* Wait for the PDA bit to get clear */
1310 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1311 while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1312 time_after(time_out, jiffies))
1313 cpu_relax();
1314
1315 if (time_after(jiffies, time_out))
1316 return -ETIMEDOUT;
1317
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001318 ret = c_can_start(dev);
1319 if (!ret)
1320 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
1321
1322 return ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301323}
1324EXPORT_SYMBOL_GPL(c_can_power_up);
1325#endif
1326
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001327void free_c_can_dev(struct net_device *dev)
1328{
Marc Kleine-Buddef29b4232014-03-18 19:13:59 +01001329 struct c_can_priv *priv = netdev_priv(dev);
1330
1331 netif_napi_del(&priv->napi);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001332 free_candev(dev);
1333}
1334EXPORT_SYMBOL_GPL(free_c_can_dev);
1335
1336static const struct net_device_ops c_can_netdev_ops = {
1337 .ndo_open = c_can_open,
1338 .ndo_stop = c_can_close,
1339 .ndo_start_xmit = c_can_start_xmit,
Oliver Hartkoppc971fa22014-03-07 09:23:41 +01001340 .ndo_change_mtu = can_change_mtu,
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001341};
1342
1343int register_c_can_dev(struct net_device *dev)
1344{
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301345 struct c_can_priv *priv = netdev_priv(dev);
1346 int err;
1347
1348 c_can_pm_runtime_enable(priv);
1349
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001350 dev->flags |= IFF_ECHO; /* we support local echo */
1351 dev->netdev_ops = &c_can_netdev_ops;
1352
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301353 err = register_candev(dev);
1354 if (err)
1355 c_can_pm_runtime_disable(priv);
Fabio Baltieri5090f802012-12-18 18:51:01 +01001356 else
1357 devm_can_led_init(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301358
1359 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001360}
1361EXPORT_SYMBOL_GPL(register_c_can_dev);
1362
1363void unregister_c_can_dev(struct net_device *dev)
1364{
1365 struct c_can_priv *priv = netdev_priv(dev);
1366
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001367 unregister_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301368
1369 c_can_pm_runtime_disable(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001370}
1371EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1372
1373MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1374MODULE_LICENSE("GPL v2");
1375MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");