blob: bf3aed43cf2cba1a8c66222aaec280df2a974f0b [file] [log] [blame]
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080029#include <linux/module.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/if_arp.h>
34#include <linux/if_ether.h>
35#include <linux/list.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080036#include <linux/io.h>
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +053037#include <linux/pm_runtime.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080038
39#include <linux/can.h>
40#include <linux/can/dev.h>
41#include <linux/can/error.h>
Fabio Baltieri5090f802012-12-18 18:51:01 +010042#include <linux/can/led.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080043
44#include "c_can.h"
45
AnilKumar Ch33f81002012-05-29 11:13:15 +053046/* Number of interface registers */
47#define IF_ENUM_REG_LEN 11
48#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
49
AnilKumar Ch82120032012-09-21 15:29:01 +053050/* control extension register D_CAN specific */
51#define CONTROL_EX_PDR BIT(8)
52
Bhupesh Sharma881ff672011-02-13 22:51:44 -080053/* control register */
54#define CONTROL_TEST BIT(7)
55#define CONTROL_CCE BIT(6)
56#define CONTROL_DISABLE_AR BIT(5)
57#define CONTROL_ENABLE_AR (0 << 5)
58#define CONTROL_EIE BIT(3)
59#define CONTROL_SIE BIT(2)
60#define CONTROL_IE BIT(1)
61#define CONTROL_INIT BIT(0)
62
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +000063#define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
64
Bhupesh Sharma881ff672011-02-13 22:51:44 -080065/* test register */
66#define TEST_RX BIT(7)
67#define TEST_TX1 BIT(6)
68#define TEST_TX2 BIT(5)
69#define TEST_LBACK BIT(4)
70#define TEST_SILENT BIT(3)
71#define TEST_BASIC BIT(2)
72
73/* status register */
AnilKumar Ch82120032012-09-21 15:29:01 +053074#define STATUS_PDA BIT(10)
Bhupesh Sharma881ff672011-02-13 22:51:44 -080075#define STATUS_BOFF BIT(7)
76#define STATUS_EWARN BIT(6)
77#define STATUS_EPASS BIT(5)
78#define STATUS_RXOK BIT(4)
79#define STATUS_TXOK BIT(3)
80
81/* error counter register */
82#define ERR_CNT_TEC_MASK 0xff
83#define ERR_CNT_TEC_SHIFT 0
84#define ERR_CNT_REC_SHIFT 8
85#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
86#define ERR_CNT_RP_SHIFT 15
87#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
88
89/* bit-timing register */
90#define BTR_BRP_MASK 0x3f
91#define BTR_BRP_SHIFT 0
92#define BTR_SJW_SHIFT 6
93#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
94#define BTR_TSEG1_SHIFT 8
95#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
96#define BTR_TSEG2_SHIFT 12
97#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
98
99/* brp extension register */
100#define BRP_EXT_BRPE_MASK 0x0f
101#define BRP_EXT_BRPE_SHIFT 0
102
103/* IFx command request */
104#define IF_COMR_BUSY BIT(15)
105
106/* IFx command mask */
107#define IF_COMM_WR BIT(7)
108#define IF_COMM_MASK BIT(6)
109#define IF_COMM_ARB BIT(5)
110#define IF_COMM_CONTROL BIT(4)
111#define IF_COMM_CLR_INT_PND BIT(3)
112#define IF_COMM_TXRQST BIT(2)
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000113#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800114#define IF_COMM_DATAA BIT(1)
115#define IF_COMM_DATAB BIT(0)
116#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
117 IF_COMM_CONTROL | IF_COMM_TXRQST | \
118 IF_COMM_DATAA | IF_COMM_DATAB)
119
Thomas Gleixnerc0a9f4d2014-03-18 17:19:13 +0000120/* For the low buffers we clear the interrupt bit, but keep newdat */
121#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
122 IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
123 IF_COMM_DATAA | IF_COMM_DATAB)
124
125/* For the high buffers we clear the interrupt bit and newdat */
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000126#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
Thomas Gleixnerc0a9f4d2014-03-18 17:19:13 +0000127
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000128
129/* Receive setup of message objects */
130#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
131
Thomas Gleixnerb07faaa2014-04-11 08:13:19 +0000132/* Invalidation of message objects */
133#define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL)
134
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800135/* IFx arbitration */
136#define IF_ARB_MSGVAL BIT(15)
137#define IF_ARB_MSGXTD BIT(14)
138#define IF_ARB_TRANSMIT BIT(13)
139
140/* IFx message control */
141#define IF_MCONT_NEWDAT BIT(15)
142#define IF_MCONT_MSGLST BIT(14)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800143#define IF_MCONT_INTPND BIT(13)
144#define IF_MCONT_UMASK BIT(12)
145#define IF_MCONT_TXIE BIT(11)
146#define IF_MCONT_RXIE BIT(10)
147#define IF_MCONT_RMTEN BIT(9)
148#define IF_MCONT_TXRQST BIT(8)
149#define IF_MCONT_EOB BIT(7)
150#define IF_MCONT_DLC_MASK 0xf
151
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000152#define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK)
153#define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB)
154
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800155/*
Thomas Gleixner640916d2014-03-18 17:19:09 +0000156 * Use IF1 for RX and IF2 for TX
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800157 */
Thomas Gleixner640916d2014-03-18 17:19:09 +0000158#define IF_RX 0
159#define IF_TX 1
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800160
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800161/* minimum timeout for checking BUSY status */
162#define MIN_TIMEOUT_VALUE 6
163
AnilKumar Ch82120032012-09-21 15:29:01 +0530164/* Wait for ~1 sec for INIT bit */
165#define INIT_WAIT_MS 1000
166
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800167/* napi related */
168#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
169
170/* c_can lec values */
171enum c_can_lec_type {
172 LEC_NO_ERROR = 0,
173 LEC_STUFF_ERROR,
174 LEC_FORM_ERROR,
175 LEC_ACK_ERROR,
176 LEC_BIT1_ERROR,
177 LEC_BIT0_ERROR,
178 LEC_CRC_ERROR,
179 LEC_UNUSED,
Thomas Gleixner097aec12014-04-11 08:13:13 +0000180 LEC_MASK = LEC_UNUSED,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800181};
182
183/*
184 * c_can error types:
185 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
186 */
187enum c_can_bus_error_types {
188 C_CAN_NO_ERROR = 0,
189 C_CAN_BUS_OFF,
190 C_CAN_ERROR_WARNING,
191 C_CAN_ERROR_PASSIVE,
192};
193
Marc Kleine-Budde194b9a42012-07-16 12:58:31 +0200194static const struct can_bittiming_const c_can_bittiming_const = {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800195 .name = KBUILD_MODNAME,
196 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
197 .tseg1_max = 16,
198 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
199 .tseg2_max = 8,
200 .sjw_max = 4,
201 .brp_min = 1,
202 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
203 .brp_inc = 1,
204};
205
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530206static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
207{
208 if (priv->device)
209 pm_runtime_enable(priv->device);
210}
211
212static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
213{
214 if (priv->device)
215 pm_runtime_disable(priv->device);
216}
217
218static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
219{
220 if (priv->device)
221 pm_runtime_get_sync(priv->device);
222}
223
224static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
225{
226 if (priv->device)
227 pm_runtime_put_sync(priv->device);
228}
229
AnilKumar Ch52cde852012-11-21 11:14:10 +0530230static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
231{
232 if (priv->raminit)
233 priv->raminit(priv, enable);
234}
235
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800236static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
237{
238 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
239 C_CAN_MSG_OBJ_TX_FIRST;
240}
241
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000242static inline int get_tx_echo_msg_obj(int txecho)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800243{
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000244 return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800245}
246
AnilKumar Ch33f81002012-05-29 11:13:15 +0530247static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800248{
AnilKumar Ch33f81002012-05-29 11:13:15 +0530249 u32 val = priv->read_reg(priv, index);
250 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800251 return val;
252}
253
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000254static void c_can_irq_control(struct c_can_priv *priv, bool enable)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800255{
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000256 u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800257
258 if (enable)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000259 ctrl |= CONTROL_IRQMSK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800260
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000261 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800262}
263
264static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
265{
266 int count = MIN_TIMEOUT_VALUE;
267
268 while (count && priv->read_reg(priv,
AnilKumar Ch33f81002012-05-29 11:13:15 +0530269 C_CAN_IFACE(COMREQ_REG, iface)) &
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800270 IF_COMR_BUSY) {
271 count--;
272 udelay(1);
273 }
274
275 if (!count)
276 return 1;
277
278 return 0;
279}
280
281static inline void c_can_object_get(struct net_device *dev,
282 int iface, int objno, int mask)
283{
284 struct c_can_priv *priv = netdev_priv(dev);
285
286 /*
287 * As per specs, after writting the message object number in the
288 * IF command request register the transfer b/w interface
289 * register and message RAM must be complete in 6 CAN-CLK
290 * period.
291 */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530292 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800293 IFX_WRITE_LOW_16BIT(mask));
AnilKumar Ch33f81002012-05-29 11:13:15 +0530294 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800295 IFX_WRITE_LOW_16BIT(objno));
296
297 if (c_can_msg_obj_is_busy(priv, iface))
298 netdev_err(dev, "timed out in object get\n");
299}
300
301static inline void c_can_object_put(struct net_device *dev,
302 int iface, int objno, int mask)
303{
304 struct c_can_priv *priv = netdev_priv(dev);
305
306 /*
307 * As per specs, after writting the message object number in the
308 * IF command request register the transfer b/w interface
309 * register and message RAM must be complete in 6 CAN-CLK
310 * period.
311 */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530312 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800313 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
AnilKumar Ch33f81002012-05-29 11:13:15 +0530314 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800315 IFX_WRITE_LOW_16BIT(objno));
316
317 if (c_can_msg_obj_is_busy(priv, iface))
318 netdev_err(dev, "timed out in object put\n");
319}
320
321static void c_can_write_msg_object(struct net_device *dev,
322 int iface, struct can_frame *frame, int objno)
323{
324 int i;
325 u16 flags = 0;
326 unsigned int id;
327 struct c_can_priv *priv = netdev_priv(dev);
328
329 if (!(frame->can_id & CAN_RTR_FLAG))
330 flags |= IF_ARB_TRANSMIT;
331
332 if (frame->can_id & CAN_EFF_FLAG) {
333 id = frame->can_id & CAN_EFF_MASK;
334 flags |= IF_ARB_MSGXTD;
335 } else
336 id = ((frame->can_id & CAN_SFF_MASK) << 18);
337
338 flags |= IF_ARB_MSGVAL;
339
AnilKumar Ch33f81002012-05-29 11:13:15 +0530340 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800341 IFX_WRITE_LOW_16BIT(id));
AnilKumar Ch33f81002012-05-29 11:13:15 +0530342 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800343 IFX_WRITE_HIGH_16BIT(id));
344
345 for (i = 0; i < frame->can_dlc; i += 2) {
AnilKumar Ch33f81002012-05-29 11:13:15 +0530346 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800347 frame->data[i] | (frame->data[i + 1] << 8));
348 }
349
350 /* enable interrupt for this message object */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530351 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800352 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
353 frame->can_dlc);
354 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
355}
356
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800357static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000358 int iface)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800359{
360 int i;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800361
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000362 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
363 c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800364}
365
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000366static int c_can_handle_lost_msg_obj(struct net_device *dev,
367 int iface, int objno, u32 ctrl)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800368{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800369 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000370 struct c_can_priv *priv = netdev_priv(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800371 struct can_frame *frame;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000372 struct sk_buff *skb;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800373
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000374 ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
375 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
Thomas Gleixner640916d2014-03-18 17:19:09 +0000376 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800377
Thomas Gleixner1da394d2014-04-11 08:13:13 +0000378 stats->rx_errors++;
379 stats->rx_over_errors++;
380
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800381 /* create an error msg */
382 skb = alloc_can_err_skb(dev, &frame);
383 if (unlikely(!skb))
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000384 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800385
386 frame->can_id |= CAN_ERR_CRTL;
387 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800388
389 netif_receive_skb(skb);
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000390 return 1;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800391}
392
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000393static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800394{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800395 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000396 struct c_can_priv *priv = netdev_priv(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800397 struct can_frame *frame;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000398 struct sk_buff *skb;
399 u32 arb, data;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800400
401 skb = alloc_can_skb(dev, &frame);
402 if (!skb) {
403 stats->rx_dropped++;
404 return -ENOMEM;
405 }
406
407 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
408
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000409 arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
410 arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800411
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000412 if (arb & (IF_ARB_MSGXTD << 16))
413 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800414 else
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000415 frame->can_id = (arb >> 18) & CAN_SFF_MASK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800416
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000417 if (arb & (IF_ARB_TRANSMIT << 16)) {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800418 frame->can_id |= CAN_RTR_FLAG;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000419 } else {
420 int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
421
422 for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
423 data = priv->read_reg(priv, dreg);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800424 frame->data[i] = data;
425 frame->data[i + 1] = data >> 8;
426 }
427 }
428
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800429 stats->rx_packets++;
430 stats->rx_bytes += frame->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000431
432 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800433 return 0;
434}
435
436static void c_can_setup_receive_object(struct net_device *dev, int iface,
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000437 u32 obj, u32 mask, u32 id, u32 mcont)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800438{
439 struct c_can_priv *priv = netdev_priv(dev);
440
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000441 mask |= BIT(29);
442 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
443 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
Alexander Stein2bd3bc42012-12-13 10:06:10 +0100444
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000445 id |= IF_ARB_MSGVAL << 16;
446 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
447 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800448
AnilKumar Ch33f81002012-05-29 11:13:15 +0530449 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000450 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800451}
452
Thomas Gleixnerb07faaa2014-04-11 08:13:19 +0000453static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800454{
455 struct c_can_priv *priv = netdev_priv(dev);
456
AnilKumar Ch33f81002012-05-29 11:13:15 +0530457 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
458 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
459 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800460
Thomas Gleixnerb07faaa2014-04-11 08:13:19 +0000461 c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800462}
463
464static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
465{
AnilKumar Ch33f81002012-05-29 11:13:15 +0530466 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800467
468 /*
469 * as transmission request register's bit n-1 corresponds to
470 * message object n, we need to handle the same properly.
471 */
472 if (val & (1 << (objno - 1)))
473 return 1;
474
475 return 0;
476}
477
478static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
479 struct net_device *dev)
480{
481 u32 msg_obj_no;
482 struct c_can_priv *priv = netdev_priv(dev);
483 struct can_frame *frame = (struct can_frame *)skb->data;
484
485 if (can_dropped_invalid_skb(dev, skb))
486 return NETDEV_TX_OK;
487
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000488 spin_lock_bh(&priv->xmit_lock);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800489 msg_obj_no = get_tx_next_msg_obj(priv);
490
491 /* prepare message object for transmission */
Thomas Gleixner640916d2014-03-18 17:19:09 +0000492 c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
Thomas Gleixner90247002014-03-18 17:19:14 +0000493 priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800494 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
495
496 /*
497 * we have to stop the queue in case of a wrap around or
498 * if the next TX message object is still in use
499 */
500 priv->tx_next++;
501 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
502 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
503 netif_stop_queue(dev);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000504 spin_unlock_bh(&priv->xmit_lock);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800505
506 return NETDEV_TX_OK;
507}
508
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000509static int c_can_wait_for_ctrl_init(struct net_device *dev,
510 struct c_can_priv *priv, u32 init)
511{
512 int retry = 0;
513
514 while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
515 udelay(10);
516 if (retry++ > 1000) {
517 netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
518 return -EIO;
519 }
520 }
521 return 0;
522}
523
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800524static int c_can_set_bittiming(struct net_device *dev)
525{
526 unsigned int reg_btr, reg_brpe, ctrl_save;
527 u8 brp, brpe, sjw, tseg1, tseg2;
528 u32 ten_bit_brp;
529 struct c_can_priv *priv = netdev_priv(dev);
530 const struct can_bittiming *bt = &priv->can.bittiming;
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000531 int res;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800532
533 /* c_can provides a 6-bit brp and 4-bit brpe fields */
534 ten_bit_brp = bt->brp - 1;
535 brp = ten_bit_brp & BTR_BRP_MASK;
536 brpe = ten_bit_brp >> 6;
537
538 sjw = bt->sjw - 1;
539 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
540 tseg2 = bt->phase_seg2 - 1;
541 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
542 (tseg2 << BTR_TSEG2_SHIFT);
543 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
544
545 netdev_info(dev,
546 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
547
AnilKumar Ch33f81002012-05-29 11:13:15 +0530548 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000549 ctrl_save &= ~CONTROL_INIT;
550 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
551 res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
552 if (res)
553 return res;
554
AnilKumar Ch33f81002012-05-29 11:13:15 +0530555 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
556 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
557 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800558
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000559 return c_can_wait_for_ctrl_init(dev, priv, 0);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800560}
561
562/*
563 * Configure C_CAN message objects for Tx and Rx purposes:
564 * C_CAN provides a total of 32 message objects that can be configured
565 * either for Tx or Rx purposes. Here the first 16 message objects are used as
566 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
567 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
568 * See user guide document for further details on configuring message
569 * objects.
570 */
571static void c_can_configure_msg_objects(struct net_device *dev)
572{
573 int i;
574
575 /* first invalidate all message objects */
576 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
Thomas Gleixner640916d2014-03-18 17:19:09 +0000577 c_can_inval_msg_object(dev, IF_RX, i);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800578
579 /* setup receive message objects */
580 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000581 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800582
Thomas Gleixner640916d2014-03-18 17:19:09 +0000583 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000584 IF_MCONT_RCV_EOB);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800585}
586
587/*
588 * Configure C_CAN chip:
589 * - enable/disable auto-retransmission
590 * - set operating mode
591 * - configure message objects
592 */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100593static int c_can_chip_config(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800594{
595 struct c_can_priv *priv = netdev_priv(dev);
596
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +0000597 /* enable automatic retransmission */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000598 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800599
Dan Carpenterd9cb9bd2012-06-15 00:20:44 +0000600 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
601 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800602 /* loopback + silent mode : useful for hot self-test */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000603 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
604 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800605 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
606 /* loopback mode : useful for self-test function */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000607 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530608 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800609 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
610 /* silent mode : bus-monitoring mode */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000611 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530612 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000613 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800614
615 /* configure message objects */
616 c_can_configure_msg_objects(dev);
617
618 /* set a `lec` value so that we can check for updates later */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530619 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800620
621 /* set bittiming params */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100622 return c_can_set_bittiming(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800623}
624
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100625static int c_can_start(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800626{
627 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100628 int err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800629
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800630 /* basic c_can configuration */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100631 err = c_can_chip_config(dev);
632 if (err)
633 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800634
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000635 /* Setup the command for new messages */
636 priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
637 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
638
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800639 priv->can.state = CAN_STATE_ERROR_ACTIVE;
640
Thomas Gleixnerfa39b542014-04-11 08:13:15 +0000641 /* reset tx helper pointers and the rx mask */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800642 priv->tx_next = priv->tx_echo = 0;
Thomas Gleixnerfa39b542014-04-11 08:13:15 +0000643 priv->rxmasked = 0;
Jan Altenberg4f2d56c2011-03-21 18:19:26 -0700644
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100645 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800646}
647
648static void c_can_stop(struct net_device *dev)
649{
650 struct c_can_priv *priv = netdev_priv(dev);
651
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000652 c_can_irq_control(priv, false);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800653 priv->can.state = CAN_STATE_STOPPED;
654}
655
656static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
657{
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000658 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100659 int err;
660
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800661 switch (mode) {
662 case CAN_MODE_START:
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100663 err = c_can_start(dev);
664 if (err)
665 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800666 netif_wake_queue(dev);
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000667 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800668 break;
669 default:
670 return -EOPNOTSUPP;
671 }
672
673 return 0;
674}
675
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100676static int __c_can_get_berr_counter(const struct net_device *dev,
677 struct can_berr_counter *bec)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800678{
679 unsigned int reg_err_counter;
680 struct c_can_priv *priv = netdev_priv(dev);
681
AnilKumar Ch33f81002012-05-29 11:13:15 +0530682 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800683 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
684 ERR_CNT_REC_SHIFT;
685 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
686
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100687 return 0;
688}
689
690static int c_can_get_berr_counter(const struct net_device *dev,
691 struct can_berr_counter *bec)
692{
693 struct c_can_priv *priv = netdev_priv(dev);
694 int err;
695
696 c_can_pm_runtime_get_sync(priv);
697 err = __c_can_get_berr_counter(dev, bec);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530698 c_can_pm_runtime_put_sync(priv);
699
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100700 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800701}
702
703/*
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800704 * priv->tx_echo holds the number of the oldest can_frame put for
705 * transmission into the hardware, but not yet ACKed by the CAN tx
706 * complete IRQ.
707 *
708 * We iterate from priv->tx_echo to priv->tx_next and check if the
709 * packet has been transmitted, echo it back to the CAN framework.
AnilKumar Ch617cacc2012-05-23 17:45:09 +0530710 * If we discover a not yet transmitted packet, stop looking for more.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800711 */
712static void c_can_do_tx(struct net_device *dev)
713{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800714 struct c_can_priv *priv = netdev_priv(dev);
715 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000716 u32 val, obj, pkts = 0, bytes = 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800717
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000718 spin_lock_bh(&priv->xmit_lock);
719
720 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000721 obj = get_tx_echo_msg_obj(priv->tx_echo);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530722 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000723
724 if (val & (1 << (obj - 1)))
AnilKumar Ch617cacc2012-05-23 17:45:09 +0530725 break;
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000726
727 can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
728 bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
729 pkts++;
730 c_can_inval_msg_object(dev, IF_TX, obj);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800731 }
732
733 /* restart queue if wrap-up or if queue stalled on last pkt */
734 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
735 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
736 netif_wake_queue(dev);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000737
738 spin_unlock_bh(&priv->xmit_lock);
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000739
740 if (pkts) {
741 stats->tx_bytes += bytes;
742 stats->tx_packets += pkts;
743 can_led_event(dev, CAN_LED_EVENT_TX);
744 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800745}
746
747/*
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000748 * If we have a gap in the pending bits, that means we either
749 * raced with the hardware or failed to readout all upper
750 * objects in the last run due to quota limit.
751 */
752static u32 c_can_adjust_pending(u32 pend)
753{
754 u32 weight, lasts;
755
756 if (pend == RECEIVE_OBJECT_BITS)
757 return pend;
758
759 /*
760 * If the last set bit is larger than the number of pending
761 * bits we have a gap.
762 */
763 weight = hweight32(pend);
764 lasts = fls(pend);
765
766 /* If the bits are linear, nothing to do */
767 if (lasts == weight)
768 return pend;
769
770 /*
771 * Find the first set bit after the gap. We walk backwards
772 * from the last set bit.
773 */
774 for (lasts--; pend & (1 << (lasts - 1)); lasts--);
775
776 return pend & ~((1 << lasts) - 1);
777}
778
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000779static inline void c_can_rx_object_get(struct net_device *dev,
780 struct c_can_priv *priv, u32 obj)
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000781{
782#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
783 if (obj < C_CAN_MSG_RX_LOW_LAST)
784 c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
785 else
786#endif
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000787 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000788}
789
790static inline void c_can_rx_finalize(struct net_device *dev,
791 struct c_can_priv *priv, u32 obj)
792{
793#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
794 if (obj < C_CAN_MSG_RX_LOW_LAST)
795 priv->rxmasked |= BIT(obj - 1);
796 else if (obj == C_CAN_MSG_RX_LOW_LAST) {
797 priv->rxmasked = 0;
798 /* activate all lower message objects */
799 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
800 }
801#endif
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000802 if (priv->type != BOSCH_D_CAN)
803 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000804}
805
Thomas Gleixner520f5702014-03-18 19:27:42 +0100806static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
807 u32 pend, int quota)
808{
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000809 u32 pkts = 0, ctrl, obj;
Thomas Gleixner520f5702014-03-18 19:27:42 +0100810
811 while ((obj = ffs(pend)) && quota > 0) {
812 pend &= ~BIT(obj - 1);
813
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000814 c_can_rx_object_get(dev, priv, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100815 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
816
817 if (ctrl & IF_MCONT_MSGLST) {
818 int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
819
820 pkts += n;
821 quota -= n;
822 continue;
823 }
824
825 /*
826 * This really should not happen, but this covers some
827 * odd HW behaviour. Do not remove that unless you
828 * want to brick your machine.
829 */
830 if (!(ctrl & IF_MCONT_NEWDAT))
831 continue;
832
833 /* read the data from the message object */
834 c_can_read_msg_object(dev, IF_RX, ctrl);
835
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000836 c_can_rx_finalize(dev, priv, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100837
838 pkts++;
839 quota--;
840 }
841
842 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800843}
844
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000845static inline u32 c_can_get_pending(struct c_can_priv *priv)
846{
847 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
848
849#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
850 pend &= ~priv->rxmasked;
851#endif
852 return pend;
853}
854
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800855/*
856 * theory of operation:
857 *
858 * c_can core saves a received CAN message into the first free message
859 * object it finds free (starting with the lowest). Bits NEWDAT and
860 * INTPND are set for this message object indicating that a new message
861 * has arrived. To work-around this issue, we keep two groups of message
862 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
863 *
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000864 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
865 *
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800866 * To ensure in-order frame reception we use the following
867 * approach while re-activating a message object to receive further
868 * frames:
869 * - if the current message object number is lower than
870 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
871 * the INTPND bit.
872 * - if the current message object number is equal to
873 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
874 * receive message objects.
875 * - if the current message object number is greater than
876 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
877 * only this message object.
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000878 *
879 * This can cause packet loss!
880 *
881 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
882 *
883 * We clear the newdat bit right away.
884 *
885 * This can result in packet reordering when the readout is slow.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800886 */
887static int c_can_do_rx_poll(struct net_device *dev, int quota)
888{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800889 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100890 u32 pkts = 0, pend = 0, toread, n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800891
Markus Pargmann4ce78a82013-11-01 10:36:36 +0100892 /*
893 * It is faster to read only one 16bit register. This is only possible
894 * for a maximum number of 16 objects.
895 */
896 BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
897 "Implementation does not support more message objects than 16");
898
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000899 while (quota > 0) {
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000900 if (!pend) {
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000901 pend = c_can_get_pending(priv);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000902 if (!pend)
Thomas Gleixner520f5702014-03-18 19:27:42 +0100903 break;
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000904 /*
905 * If the pending field has a gap, handle the
906 * bits above the gap first.
907 */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100908 toread = c_can_adjust_pending(pend);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000909 } else {
Thomas Gleixner520f5702014-03-18 19:27:42 +0100910 toread = pend;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800911 }
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000912 /* Remove the bits from pend */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100913 pend &= ~toread;
914 /* Read the objects */
915 n = c_can_read_objects(dev, priv, toread, quota);
916 pkts += n;
917 quota -= n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800918 }
919
Thomas Gleixnerb1d8e432014-03-18 17:19:15 +0000920 if (pkts)
921 can_led_event(dev, CAN_LED_EVENT_RX);
922
Thomas Gleixner520f5702014-03-18 19:27:42 +0100923 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800924}
925
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800926static int c_can_handle_state_change(struct net_device *dev,
927 enum c_can_bus_error_types error_type)
928{
929 unsigned int reg_err_counter;
930 unsigned int rx_err_passive;
931 struct c_can_priv *priv = netdev_priv(dev);
932 struct net_device_stats *stats = &dev->stats;
933 struct can_frame *cf;
934 struct sk_buff *skb;
935 struct can_berr_counter bec;
936
Thomas Gleixnerf058d542014-04-11 08:13:12 +0000937 switch (error_type) {
938 case C_CAN_ERROR_WARNING:
939 /* error warning state */
940 priv->can.can_stats.error_warning++;
941 priv->can.state = CAN_STATE_ERROR_WARNING;
942 break;
943 case C_CAN_ERROR_PASSIVE:
944 /* error passive state */
945 priv->can.can_stats.error_passive++;
946 priv->can.state = CAN_STATE_ERROR_PASSIVE;
947 break;
948 case C_CAN_BUS_OFF:
949 /* bus-off state */
950 priv->can.state = CAN_STATE_BUS_OFF;
951 can_bus_off(dev);
952 break;
953 default:
954 break;
955 }
956
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300957 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800958 skb = alloc_can_err_skb(dev, &cf);
959 if (unlikely(!skb))
960 return 0;
961
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100962 __c_can_get_berr_counter(dev, &bec);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530963 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800964 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
965 ERR_CNT_RP_SHIFT;
966
967 switch (error_type) {
968 case C_CAN_ERROR_WARNING:
969 /* error warning state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800970 cf->can_id |= CAN_ERR_CRTL;
971 cf->data[1] = (bec.txerr > bec.rxerr) ?
972 CAN_ERR_CRTL_TX_WARNING :
973 CAN_ERR_CRTL_RX_WARNING;
974 cf->data[6] = bec.txerr;
975 cf->data[7] = bec.rxerr;
976
977 break;
978 case C_CAN_ERROR_PASSIVE:
979 /* error passive state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800980 cf->can_id |= CAN_ERR_CRTL;
981 if (rx_err_passive)
982 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
983 if (bec.txerr > 127)
984 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
985
986 cf->data[6] = bec.txerr;
987 cf->data[7] = bec.rxerr;
988 break;
989 case C_CAN_BUS_OFF:
990 /* bus-off state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800991 cf->can_id |= CAN_ERR_BUSOFF;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800992 can_bus_off(dev);
993 break;
994 default:
995 break;
996 }
997
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800998 stats->rx_packets++;
999 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +00001000 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001001
1002 return 1;
1003}
1004
1005static int c_can_handle_bus_err(struct net_device *dev,
1006 enum c_can_lec_type lec_type)
1007{
1008 struct c_can_priv *priv = netdev_priv(dev);
1009 struct net_device_stats *stats = &dev->stats;
1010 struct can_frame *cf;
1011 struct sk_buff *skb;
1012
1013 /*
1014 * early exit if no lec update or no error.
1015 * no lec update means that no CAN bus event has been detected
1016 * since CPU wrote 0x7 value to status reg.
1017 */
1018 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
1019 return 0;
1020
Thomas Gleixner097aec12014-04-11 08:13:13 +00001021 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
1022 return 0;
1023
Thomas Gleixner1da394d2014-04-11 08:13:13 +00001024 /* common for all type of bus errors */
1025 priv->can.can_stats.bus_error++;
1026 stats->rx_errors++;
1027
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001028 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001029 skb = alloc_can_err_skb(dev, &cf);
1030 if (unlikely(!skb))
1031 return 0;
1032
1033 /*
1034 * check for 'last error code' which tells us the
1035 * type of the last error to occur on the CAN bus
1036 */
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001037 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1038 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1039
1040 switch (lec_type) {
1041 case LEC_STUFF_ERROR:
1042 netdev_dbg(dev, "stuff error\n");
1043 cf->data[2] |= CAN_ERR_PROT_STUFF;
1044 break;
1045 case LEC_FORM_ERROR:
1046 netdev_dbg(dev, "form error\n");
1047 cf->data[2] |= CAN_ERR_PROT_FORM;
1048 break;
1049 case LEC_ACK_ERROR:
1050 netdev_dbg(dev, "ack error\n");
Olivier Sobrie6ea45882013-01-18 09:32:39 +01001051 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001052 CAN_ERR_PROT_LOC_ACK_DEL);
1053 break;
1054 case LEC_BIT1_ERROR:
1055 netdev_dbg(dev, "bit1 error\n");
1056 cf->data[2] |= CAN_ERR_PROT_BIT1;
1057 break;
1058 case LEC_BIT0_ERROR:
1059 netdev_dbg(dev, "bit0 error\n");
1060 cf->data[2] |= CAN_ERR_PROT_BIT0;
1061 break;
1062 case LEC_CRC_ERROR:
1063 netdev_dbg(dev, "CRC error\n");
Olivier Sobrie6ea45882013-01-18 09:32:39 +01001064 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001065 CAN_ERR_PROT_LOC_CRC_DEL);
1066 break;
1067 default:
1068 break;
1069 }
1070
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001071 stats->rx_packets++;
1072 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +00001073 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001074 return 1;
1075}
1076
1077static int c_can_poll(struct napi_struct *napi, int quota)
1078{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001079 struct net_device *dev = napi->dev;
1080 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001081 u16 curr, last = priv->last_status;
1082 int work_done = 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001083
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001084 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1085 /* Ack status on C_CAN. D_CAN is self clearing */
1086 if (priv->type != BOSCH_D_CAN)
1087 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001088
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001089 /* handle state changes */
1090 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1091 netdev_dbg(dev, "entered error warning state\n");
1092 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001093 }
1094
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001095 if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1096 netdev_dbg(dev, "entered error passive state\n");
1097 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1098 }
1099
1100 if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1101 netdev_dbg(dev, "entered bus off state\n");
1102 work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1103 goto end;
1104 }
1105
1106 /* handle bus recovery events */
1107 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1108 netdev_dbg(dev, "left bus off state\n");
1109 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1110 }
1111 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1112 netdev_dbg(dev, "left error passive state\n");
1113 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1114 }
1115
1116 /* handle lec errors on the bus */
1117 work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1118
1119 /* Handle Tx/Rx events. We do this unconditionally */
1120 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1121 c_can_do_tx(dev);
1122
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001123end:
1124 if (work_done < quota) {
1125 napi_complete(napi);
Thomas Gleixneref1d2e22014-04-11 08:13:11 +00001126 /* enable all IRQs if we are not in bus off state */
1127 if (priv->can.state != CAN_STATE_BUS_OFF)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001128 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001129 }
1130
1131 return work_done;
1132}
1133
1134static irqreturn_t c_can_isr(int irq, void *dev_id)
1135{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001136 struct net_device *dev = (struct net_device *)dev_id;
1137 struct c_can_priv *priv = netdev_priv(dev);
1138
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001139 if (!priv->read_reg(priv, C_CAN_INT_REG))
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001140 return IRQ_NONE;
1141
1142 /* disable all interrupts and schedule the NAPI */
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001143 c_can_irq_control(priv, false);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001144 napi_schedule(&priv->napi);
1145
1146 return IRQ_HANDLED;
1147}
1148
1149static int c_can_open(struct net_device *dev)
1150{
1151 int err;
1152 struct c_can_priv *priv = netdev_priv(dev);
1153
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301154 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301155 c_can_reset_ram(priv, true);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301156
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001157 /* open the can device */
1158 err = open_candev(dev);
1159 if (err) {
1160 netdev_err(dev, "failed to open can device\n");
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301161 goto exit_open_fail;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001162 }
1163
1164 /* register interrupt handler */
1165 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1166 dev);
1167 if (err < 0) {
1168 netdev_err(dev, "failed to request interrupt\n");
1169 goto exit_irq_fail;
1170 }
1171
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001172 /* start the c_can controller */
1173 err = c_can_start(dev);
1174 if (err)
1175 goto exit_start_fail;
AnilKumar Chf461f272012-05-23 17:45:11 +05301176
Fabio Baltieri5090f802012-12-18 18:51:01 +01001177 can_led_event(dev, CAN_LED_EVENT_OPEN);
1178
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001179 napi_enable(&priv->napi);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001180 /* enable status change, error and module interrupts */
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001181 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001182 netif_start_queue(dev);
1183
1184 return 0;
1185
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001186exit_start_fail:
1187 free_irq(dev->irq, dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001188exit_irq_fail:
1189 close_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301190exit_open_fail:
AnilKumar Ch52cde852012-11-21 11:14:10 +05301191 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301192 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001193 return err;
1194}
1195
1196static int c_can_close(struct net_device *dev)
1197{
1198 struct c_can_priv *priv = netdev_priv(dev);
1199
1200 netif_stop_queue(dev);
1201 napi_disable(&priv->napi);
1202 c_can_stop(dev);
1203 free_irq(dev->irq, dev);
1204 close_candev(dev);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301205
1206 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301207 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001208
Fabio Baltieri5090f802012-12-18 18:51:01 +01001209 can_led_event(dev, CAN_LED_EVENT_STOP);
1210
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001211 return 0;
1212}
1213
1214struct net_device *alloc_c_can_dev(void)
1215{
1216 struct net_device *dev;
1217 struct c_can_priv *priv;
1218
1219 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1220 if (!dev)
1221 return NULL;
1222
1223 priv = netdev_priv(dev);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +00001224 spin_lock_init(&priv->xmit_lock);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001225 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1226
1227 priv->dev = dev;
1228 priv->can.bittiming_const = &c_can_bittiming_const;
1229 priv->can.do_set_mode = c_can_set_mode;
1230 priv->can.do_get_berr_counter = c_can_get_berr_counter;
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +00001231 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001232 CAN_CTRLMODE_LISTENONLY |
1233 CAN_CTRLMODE_BERR_REPORTING;
1234
1235 return dev;
1236}
1237EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1238
AnilKumar Ch82120032012-09-21 15:29:01 +05301239#ifdef CONFIG_PM
1240int c_can_power_down(struct net_device *dev)
1241{
1242 u32 val;
1243 unsigned long time_out;
1244 struct c_can_priv *priv = netdev_priv(dev);
1245
1246 if (!(dev->flags & IFF_UP))
1247 return 0;
1248
1249 WARN_ON(priv->type != BOSCH_D_CAN);
1250
1251 /* set PDR value so the device goes to power down mode */
1252 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1253 val |= CONTROL_EX_PDR;
1254 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1255
1256 /* Wait for the PDA bit to get set */
1257 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1258 while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1259 time_after(time_out, jiffies))
1260 cpu_relax();
1261
1262 if (time_after(jiffies, time_out))
1263 return -ETIMEDOUT;
1264
1265 c_can_stop(dev);
1266
AnilKumar Ch52cde852012-11-21 11:14:10 +05301267 c_can_reset_ram(priv, false);
AnilKumar Ch82120032012-09-21 15:29:01 +05301268 c_can_pm_runtime_put_sync(priv);
1269
1270 return 0;
1271}
1272EXPORT_SYMBOL_GPL(c_can_power_down);
1273
1274int c_can_power_up(struct net_device *dev)
1275{
1276 u32 val;
1277 unsigned long time_out;
1278 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001279 int ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301280
1281 if (!(dev->flags & IFF_UP))
1282 return 0;
1283
1284 WARN_ON(priv->type != BOSCH_D_CAN);
1285
1286 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301287 c_can_reset_ram(priv, true);
AnilKumar Ch82120032012-09-21 15:29:01 +05301288
1289 /* Clear PDR and INIT bits */
1290 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1291 val &= ~CONTROL_EX_PDR;
1292 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1293 val = priv->read_reg(priv, C_CAN_CTRL_REG);
1294 val &= ~CONTROL_INIT;
1295 priv->write_reg(priv, C_CAN_CTRL_REG, val);
1296
1297 /* Wait for the PDA bit to get clear */
1298 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1299 while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1300 time_after(time_out, jiffies))
1301 cpu_relax();
1302
1303 if (time_after(jiffies, time_out))
1304 return -ETIMEDOUT;
1305
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001306 ret = c_can_start(dev);
1307 if (!ret)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001308 c_can_irq_control(priv, true);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001309
1310 return ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301311}
1312EXPORT_SYMBOL_GPL(c_can_power_up);
1313#endif
1314
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001315void free_c_can_dev(struct net_device *dev)
1316{
Marc Kleine-Buddef29b4232014-03-18 19:13:59 +01001317 struct c_can_priv *priv = netdev_priv(dev);
1318
1319 netif_napi_del(&priv->napi);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001320 free_candev(dev);
1321}
1322EXPORT_SYMBOL_GPL(free_c_can_dev);
1323
1324static const struct net_device_ops c_can_netdev_ops = {
1325 .ndo_open = c_can_open,
1326 .ndo_stop = c_can_close,
1327 .ndo_start_xmit = c_can_start_xmit,
Oliver Hartkoppc971fa22014-03-07 09:23:41 +01001328 .ndo_change_mtu = can_change_mtu,
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001329};
1330
1331int register_c_can_dev(struct net_device *dev)
1332{
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301333 struct c_can_priv *priv = netdev_priv(dev);
1334 int err;
1335
1336 c_can_pm_runtime_enable(priv);
1337
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001338 dev->flags |= IFF_ECHO; /* we support local echo */
1339 dev->netdev_ops = &c_can_netdev_ops;
1340
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301341 err = register_candev(dev);
1342 if (err)
1343 c_can_pm_runtime_disable(priv);
Fabio Baltieri5090f802012-12-18 18:51:01 +01001344 else
1345 devm_can_led_init(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301346
1347 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001348}
1349EXPORT_SYMBOL_GPL(register_c_can_dev);
1350
1351void unregister_c_can_dev(struct net_device *dev)
1352{
1353 struct c_can_priv *priv = netdev_priv(dev);
1354
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001355 unregister_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301356
1357 c_can_pm_runtime_disable(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001358}
1359EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1360
1361MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1362MODULE_LICENSE("GPL v2");
1363MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");