blob: c654efbcc527a0703035a584f90c79e0dc755a42 [file] [log] [blame]
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080029#include <linux/module.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/if_arp.h>
34#include <linux/if_ether.h>
35#include <linux/list.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080036#include <linux/io.h>
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +053037#include <linux/pm_runtime.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080038
39#include <linux/can.h>
40#include <linux/can/dev.h>
41#include <linux/can/error.h>
Fabio Baltieri5090f802012-12-18 18:51:01 +010042#include <linux/can/led.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080043
44#include "c_can.h"
45
AnilKumar Ch33f81002012-05-29 11:13:15 +053046/* Number of interface registers */
47#define IF_ENUM_REG_LEN 11
48#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
49
AnilKumar Ch82120032012-09-21 15:29:01 +053050/* control extension register D_CAN specific */
51#define CONTROL_EX_PDR BIT(8)
52
Bhupesh Sharma881ff672011-02-13 22:51:44 -080053/* control register */
54#define CONTROL_TEST BIT(7)
55#define CONTROL_CCE BIT(6)
56#define CONTROL_DISABLE_AR BIT(5)
57#define CONTROL_ENABLE_AR (0 << 5)
58#define CONTROL_EIE BIT(3)
59#define CONTROL_SIE BIT(2)
60#define CONTROL_IE BIT(1)
61#define CONTROL_INIT BIT(0)
62
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +000063#define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
64
Bhupesh Sharma881ff672011-02-13 22:51:44 -080065/* test register */
66#define TEST_RX BIT(7)
67#define TEST_TX1 BIT(6)
68#define TEST_TX2 BIT(5)
69#define TEST_LBACK BIT(4)
70#define TEST_SILENT BIT(3)
71#define TEST_BASIC BIT(2)
72
73/* status register */
AnilKumar Ch82120032012-09-21 15:29:01 +053074#define STATUS_PDA BIT(10)
Bhupesh Sharma881ff672011-02-13 22:51:44 -080075#define STATUS_BOFF BIT(7)
76#define STATUS_EWARN BIT(6)
77#define STATUS_EPASS BIT(5)
78#define STATUS_RXOK BIT(4)
79#define STATUS_TXOK BIT(3)
80
81/* error counter register */
82#define ERR_CNT_TEC_MASK 0xff
83#define ERR_CNT_TEC_SHIFT 0
84#define ERR_CNT_REC_SHIFT 8
85#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
86#define ERR_CNT_RP_SHIFT 15
87#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
88
89/* bit-timing register */
90#define BTR_BRP_MASK 0x3f
91#define BTR_BRP_SHIFT 0
92#define BTR_SJW_SHIFT 6
93#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
94#define BTR_TSEG1_SHIFT 8
95#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
96#define BTR_TSEG2_SHIFT 12
97#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
98
99/* brp extension register */
100#define BRP_EXT_BRPE_MASK 0x0f
101#define BRP_EXT_BRPE_SHIFT 0
102
103/* IFx command request */
104#define IF_COMR_BUSY BIT(15)
105
106/* IFx command mask */
107#define IF_COMM_WR BIT(7)
108#define IF_COMM_MASK BIT(6)
109#define IF_COMM_ARB BIT(5)
110#define IF_COMM_CONTROL BIT(4)
111#define IF_COMM_CLR_INT_PND BIT(3)
112#define IF_COMM_TXRQST BIT(2)
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000113#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800114#define IF_COMM_DATAA BIT(1)
115#define IF_COMM_DATAB BIT(0)
116#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
117 IF_COMM_CONTROL | IF_COMM_TXRQST | \
118 IF_COMM_DATAA | IF_COMM_DATAB)
119
Thomas Gleixnerc0a9f4d2014-03-18 17:19:13 +0000120/* For the low buffers we clear the interrupt bit, but keep newdat */
121#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
122 IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
123 IF_COMM_DATAA | IF_COMM_DATAB)
124
125/* For the high buffers we clear the interrupt bit and newdat */
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000126#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
Thomas Gleixnerc0a9f4d2014-03-18 17:19:13 +0000127
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000128
129/* Receive setup of message objects */
130#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
131
Thomas Gleixnerb07faaa2014-04-11 08:13:19 +0000132/* Invalidation of message objects */
133#define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL)
134
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800135/* IFx arbitration */
136#define IF_ARB_MSGVAL BIT(15)
137#define IF_ARB_MSGXTD BIT(14)
138#define IF_ARB_TRANSMIT BIT(13)
139
140/* IFx message control */
141#define IF_MCONT_NEWDAT BIT(15)
142#define IF_MCONT_MSGLST BIT(14)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800143#define IF_MCONT_INTPND BIT(13)
144#define IF_MCONT_UMASK BIT(12)
145#define IF_MCONT_TXIE BIT(11)
146#define IF_MCONT_RXIE BIT(10)
147#define IF_MCONT_RMTEN BIT(9)
148#define IF_MCONT_TXRQST BIT(8)
149#define IF_MCONT_EOB BIT(7)
150#define IF_MCONT_DLC_MASK 0xf
151
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000152#define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK)
153#define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB)
154
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800155/*
Thomas Gleixner640916d2014-03-18 17:19:09 +0000156 * Use IF1 for RX and IF2 for TX
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800157 */
Thomas Gleixner640916d2014-03-18 17:19:09 +0000158#define IF_RX 0
159#define IF_TX 1
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800160
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800161/* minimum timeout for checking BUSY status */
162#define MIN_TIMEOUT_VALUE 6
163
AnilKumar Ch82120032012-09-21 15:29:01 +0530164/* Wait for ~1 sec for INIT bit */
165#define INIT_WAIT_MS 1000
166
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800167/* napi related */
168#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
169
170/* c_can lec values */
171enum c_can_lec_type {
172 LEC_NO_ERROR = 0,
173 LEC_STUFF_ERROR,
174 LEC_FORM_ERROR,
175 LEC_ACK_ERROR,
176 LEC_BIT1_ERROR,
177 LEC_BIT0_ERROR,
178 LEC_CRC_ERROR,
179 LEC_UNUSED,
Thomas Gleixner097aec12014-04-11 08:13:13 +0000180 LEC_MASK = LEC_UNUSED,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800181};
182
183/*
184 * c_can error types:
185 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
186 */
187enum c_can_bus_error_types {
188 C_CAN_NO_ERROR = 0,
189 C_CAN_BUS_OFF,
190 C_CAN_ERROR_WARNING,
191 C_CAN_ERROR_PASSIVE,
192};
193
Marc Kleine-Budde194b9a42012-07-16 12:58:31 +0200194static const struct can_bittiming_const c_can_bittiming_const = {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800195 .name = KBUILD_MODNAME,
196 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
197 .tseg1_max = 16,
198 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
199 .tseg2_max = 8,
200 .sjw_max = 4,
201 .brp_min = 1,
202 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
203 .brp_inc = 1,
204};
205
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530206static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
207{
208 if (priv->device)
209 pm_runtime_enable(priv->device);
210}
211
212static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
213{
214 if (priv->device)
215 pm_runtime_disable(priv->device);
216}
217
218static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
219{
220 if (priv->device)
221 pm_runtime_get_sync(priv->device);
222}
223
224static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
225{
226 if (priv->device)
227 pm_runtime_put_sync(priv->device);
228}
229
AnilKumar Ch52cde852012-11-21 11:14:10 +0530230static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
231{
232 if (priv->raminit)
233 priv->raminit(priv, enable);
234}
235
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800236static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
237{
238 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
239 C_CAN_MSG_OBJ_TX_FIRST;
240}
241
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000242static inline int get_tx_echo_msg_obj(int txecho)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800243{
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000244 return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800245}
246
AnilKumar Ch33f81002012-05-29 11:13:15 +0530247static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800248{
AnilKumar Ch33f81002012-05-29 11:13:15 +0530249 u32 val = priv->read_reg(priv, index);
250 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800251 return val;
252}
253
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000254static void c_can_irq_control(struct c_can_priv *priv, bool enable)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800255{
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000256 u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800257
258 if (enable)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000259 ctrl |= CONTROL_IRQMSK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800260
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000261 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800262}
263
Thomas Gleixner7af28632014-04-11 08:13:20 +0000264static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800265{
Thomas Gleixner7af28632014-04-11 08:13:20 +0000266 struct c_can_priv *priv = netdev_priv(dev);
267 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800268
Thomas Gleixner7af28632014-04-11 08:13:20 +0000269 priv->write_reg(priv, reg + 1, cmd);
270 priv->write_reg(priv, reg, obj);
271
272 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
273 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
274 return;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800275 udelay(1);
276 }
Thomas Gleixner7af28632014-04-11 08:13:20 +0000277 netdev_err(dev, "Updating object timed out\n");
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800278
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800279}
280
Thomas Gleixner7af28632014-04-11 08:13:20 +0000281static inline void c_can_object_get(struct net_device *dev, int iface,
282 u32 obj, u32 cmd)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800283{
Thomas Gleixner7af28632014-04-11 08:13:20 +0000284 c_can_obj_update(dev, iface, cmd, obj);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800285}
286
Thomas Gleixner7af28632014-04-11 08:13:20 +0000287static inline void c_can_object_put(struct net_device *dev, int iface,
288 u32 obj, u32 cmd)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800289{
Thomas Gleixner7af28632014-04-11 08:13:20 +0000290 c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800291}
292
293static void c_can_write_msg_object(struct net_device *dev,
294 int iface, struct can_frame *frame, int objno)
295{
296 int i;
297 u16 flags = 0;
298 unsigned int id;
299 struct c_can_priv *priv = netdev_priv(dev);
300
301 if (!(frame->can_id & CAN_RTR_FLAG))
302 flags |= IF_ARB_TRANSMIT;
303
304 if (frame->can_id & CAN_EFF_FLAG) {
305 id = frame->can_id & CAN_EFF_MASK;
306 flags |= IF_ARB_MSGXTD;
307 } else
308 id = ((frame->can_id & CAN_SFF_MASK) << 18);
309
310 flags |= IF_ARB_MSGVAL;
311
AnilKumar Ch33f81002012-05-29 11:13:15 +0530312 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800313 IFX_WRITE_LOW_16BIT(id));
AnilKumar Ch33f81002012-05-29 11:13:15 +0530314 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800315 IFX_WRITE_HIGH_16BIT(id));
316
317 for (i = 0; i < frame->can_dlc; i += 2) {
AnilKumar Ch33f81002012-05-29 11:13:15 +0530318 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800319 frame->data[i] | (frame->data[i + 1] << 8));
320 }
321
322 /* enable interrupt for this message object */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530323 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800324 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
325 frame->can_dlc);
326 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
327}
328
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800329static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000330 int iface)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800331{
332 int i;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800333
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000334 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
335 c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800336}
337
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000338static int c_can_handle_lost_msg_obj(struct net_device *dev,
339 int iface, int objno, u32 ctrl)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800340{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800341 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000342 struct c_can_priv *priv = netdev_priv(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800343 struct can_frame *frame;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000344 struct sk_buff *skb;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800345
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000346 ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
347 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
Thomas Gleixner640916d2014-03-18 17:19:09 +0000348 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800349
Thomas Gleixner1da394d2014-04-11 08:13:13 +0000350 stats->rx_errors++;
351 stats->rx_over_errors++;
352
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800353 /* create an error msg */
354 skb = alloc_can_err_skb(dev, &frame);
355 if (unlikely(!skb))
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000356 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800357
358 frame->can_id |= CAN_ERR_CRTL;
359 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800360
361 netif_receive_skb(skb);
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000362 return 1;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800363}
364
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000365static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800366{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800367 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000368 struct c_can_priv *priv = netdev_priv(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800369 struct can_frame *frame;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000370 struct sk_buff *skb;
371 u32 arb, data;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800372
373 skb = alloc_can_skb(dev, &frame);
374 if (!skb) {
375 stats->rx_dropped++;
376 return -ENOMEM;
377 }
378
379 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
380
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000381 arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
382 arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800383
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000384 if (arb & (IF_ARB_MSGXTD << 16))
385 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800386 else
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000387 frame->can_id = (arb >> 18) & CAN_SFF_MASK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800388
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000389 if (arb & (IF_ARB_TRANSMIT << 16)) {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800390 frame->can_id |= CAN_RTR_FLAG;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000391 } else {
392 int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
393
394 for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
395 data = priv->read_reg(priv, dreg);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800396 frame->data[i] = data;
397 frame->data[i + 1] = data >> 8;
398 }
399 }
400
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800401 stats->rx_packets++;
402 stats->rx_bytes += frame->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000403
404 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800405 return 0;
406}
407
408static void c_can_setup_receive_object(struct net_device *dev, int iface,
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000409 u32 obj, u32 mask, u32 id, u32 mcont)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800410{
411 struct c_can_priv *priv = netdev_priv(dev);
412
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000413 mask |= BIT(29);
414 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
415 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
Alexander Stein2bd3bc42012-12-13 10:06:10 +0100416
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000417 id |= IF_ARB_MSGVAL << 16;
418 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
419 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800420
AnilKumar Ch33f81002012-05-29 11:13:15 +0530421 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000422 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800423}
424
Thomas Gleixnerb07faaa2014-04-11 08:13:19 +0000425static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800426{
427 struct c_can_priv *priv = netdev_priv(dev);
428
AnilKumar Ch33f81002012-05-29 11:13:15 +0530429 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
430 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
431 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800432
Thomas Gleixnerb07faaa2014-04-11 08:13:19 +0000433 c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800434}
435
436static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
437{
AnilKumar Ch33f81002012-05-29 11:13:15 +0530438 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800439
440 /*
441 * as transmission request register's bit n-1 corresponds to
442 * message object n, we need to handle the same properly.
443 */
444 if (val & (1 << (objno - 1)))
445 return 1;
446
447 return 0;
448}
449
450static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
451 struct net_device *dev)
452{
453 u32 msg_obj_no;
454 struct c_can_priv *priv = netdev_priv(dev);
455 struct can_frame *frame = (struct can_frame *)skb->data;
456
457 if (can_dropped_invalid_skb(dev, skb))
458 return NETDEV_TX_OK;
459
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000460 spin_lock_bh(&priv->xmit_lock);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800461 msg_obj_no = get_tx_next_msg_obj(priv);
462
463 /* prepare message object for transmission */
Thomas Gleixner640916d2014-03-18 17:19:09 +0000464 c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
Thomas Gleixner90247002014-03-18 17:19:14 +0000465 priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800466 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
467
468 /*
469 * we have to stop the queue in case of a wrap around or
470 * if the next TX message object is still in use
471 */
472 priv->tx_next++;
473 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
474 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
475 netif_stop_queue(dev);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000476 spin_unlock_bh(&priv->xmit_lock);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800477
478 return NETDEV_TX_OK;
479}
480
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000481static int c_can_wait_for_ctrl_init(struct net_device *dev,
482 struct c_can_priv *priv, u32 init)
483{
484 int retry = 0;
485
486 while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
487 udelay(10);
488 if (retry++ > 1000) {
489 netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
490 return -EIO;
491 }
492 }
493 return 0;
494}
495
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800496static int c_can_set_bittiming(struct net_device *dev)
497{
498 unsigned int reg_btr, reg_brpe, ctrl_save;
499 u8 brp, brpe, sjw, tseg1, tseg2;
500 u32 ten_bit_brp;
501 struct c_can_priv *priv = netdev_priv(dev);
502 const struct can_bittiming *bt = &priv->can.bittiming;
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000503 int res;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800504
505 /* c_can provides a 6-bit brp and 4-bit brpe fields */
506 ten_bit_brp = bt->brp - 1;
507 brp = ten_bit_brp & BTR_BRP_MASK;
508 brpe = ten_bit_brp >> 6;
509
510 sjw = bt->sjw - 1;
511 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
512 tseg2 = bt->phase_seg2 - 1;
513 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
514 (tseg2 << BTR_TSEG2_SHIFT);
515 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
516
517 netdev_info(dev,
518 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
519
AnilKumar Ch33f81002012-05-29 11:13:15 +0530520 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000521 ctrl_save &= ~CONTROL_INIT;
522 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
523 res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
524 if (res)
525 return res;
526
AnilKumar Ch33f81002012-05-29 11:13:15 +0530527 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
528 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
529 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800530
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000531 return c_can_wait_for_ctrl_init(dev, priv, 0);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800532}
533
534/*
535 * Configure C_CAN message objects for Tx and Rx purposes:
536 * C_CAN provides a total of 32 message objects that can be configured
537 * either for Tx or Rx purposes. Here the first 16 message objects are used as
538 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
539 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
540 * See user guide document for further details on configuring message
541 * objects.
542 */
543static void c_can_configure_msg_objects(struct net_device *dev)
544{
545 int i;
546
547 /* first invalidate all message objects */
548 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
Thomas Gleixner640916d2014-03-18 17:19:09 +0000549 c_can_inval_msg_object(dev, IF_RX, i);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800550
551 /* setup receive message objects */
552 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000553 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800554
Thomas Gleixner640916d2014-03-18 17:19:09 +0000555 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000556 IF_MCONT_RCV_EOB);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800557}
558
559/*
560 * Configure C_CAN chip:
561 * - enable/disable auto-retransmission
562 * - set operating mode
563 * - configure message objects
564 */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100565static int c_can_chip_config(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800566{
567 struct c_can_priv *priv = netdev_priv(dev);
568
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +0000569 /* enable automatic retransmission */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000570 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800571
Dan Carpenterd9cb9bd2012-06-15 00:20:44 +0000572 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
573 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800574 /* loopback + silent mode : useful for hot self-test */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000575 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
576 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800577 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
578 /* loopback mode : useful for self-test function */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000579 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530580 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800581 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
582 /* silent mode : bus-monitoring mode */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000583 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530584 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000585 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800586
587 /* configure message objects */
588 c_can_configure_msg_objects(dev);
589
590 /* set a `lec` value so that we can check for updates later */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530591 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800592
593 /* set bittiming params */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100594 return c_can_set_bittiming(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800595}
596
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100597static int c_can_start(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800598{
599 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100600 int err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800601
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800602 /* basic c_can configuration */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100603 err = c_can_chip_config(dev);
604 if (err)
605 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800606
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000607 /* Setup the command for new messages */
608 priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
609 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
610
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800611 priv->can.state = CAN_STATE_ERROR_ACTIVE;
612
Thomas Gleixnerfa39b542014-04-11 08:13:15 +0000613 /* reset tx helper pointers and the rx mask */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800614 priv->tx_next = priv->tx_echo = 0;
Thomas Gleixnerfa39b542014-04-11 08:13:15 +0000615 priv->rxmasked = 0;
Jan Altenberg4f2d56c2011-03-21 18:19:26 -0700616
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100617 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800618}
619
620static void c_can_stop(struct net_device *dev)
621{
622 struct c_can_priv *priv = netdev_priv(dev);
623
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000624 c_can_irq_control(priv, false);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800625 priv->can.state = CAN_STATE_STOPPED;
626}
627
628static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
629{
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000630 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100631 int err;
632
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800633 switch (mode) {
634 case CAN_MODE_START:
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100635 err = c_can_start(dev);
636 if (err)
637 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800638 netif_wake_queue(dev);
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000639 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800640 break;
641 default:
642 return -EOPNOTSUPP;
643 }
644
645 return 0;
646}
647
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100648static int __c_can_get_berr_counter(const struct net_device *dev,
649 struct can_berr_counter *bec)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800650{
651 unsigned int reg_err_counter;
652 struct c_can_priv *priv = netdev_priv(dev);
653
AnilKumar Ch33f81002012-05-29 11:13:15 +0530654 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800655 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
656 ERR_CNT_REC_SHIFT;
657 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
658
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100659 return 0;
660}
661
662static int c_can_get_berr_counter(const struct net_device *dev,
663 struct can_berr_counter *bec)
664{
665 struct c_can_priv *priv = netdev_priv(dev);
666 int err;
667
668 c_can_pm_runtime_get_sync(priv);
669 err = __c_can_get_berr_counter(dev, bec);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530670 c_can_pm_runtime_put_sync(priv);
671
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100672 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800673}
674
675/*
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800676 * priv->tx_echo holds the number of the oldest can_frame put for
677 * transmission into the hardware, but not yet ACKed by the CAN tx
678 * complete IRQ.
679 *
680 * We iterate from priv->tx_echo to priv->tx_next and check if the
681 * packet has been transmitted, echo it back to the CAN framework.
AnilKumar Ch617cacc2012-05-23 17:45:09 +0530682 * If we discover a not yet transmitted packet, stop looking for more.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800683 */
684static void c_can_do_tx(struct net_device *dev)
685{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800686 struct c_can_priv *priv = netdev_priv(dev);
687 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000688 u32 val, obj, pkts = 0, bytes = 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800689
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000690 spin_lock_bh(&priv->xmit_lock);
691
692 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000693 obj = get_tx_echo_msg_obj(priv->tx_echo);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530694 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000695
696 if (val & (1 << (obj - 1)))
AnilKumar Ch617cacc2012-05-23 17:45:09 +0530697 break;
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000698
699 can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
700 bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
701 pkts++;
702 c_can_inval_msg_object(dev, IF_TX, obj);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800703 }
704
705 /* restart queue if wrap-up or if queue stalled on last pkt */
706 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
707 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
708 netif_wake_queue(dev);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000709
710 spin_unlock_bh(&priv->xmit_lock);
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000711
712 if (pkts) {
713 stats->tx_bytes += bytes;
714 stats->tx_packets += pkts;
715 can_led_event(dev, CAN_LED_EVENT_TX);
716 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800717}
718
719/*
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000720 * If we have a gap in the pending bits, that means we either
721 * raced with the hardware or failed to readout all upper
722 * objects in the last run due to quota limit.
723 */
724static u32 c_can_adjust_pending(u32 pend)
725{
726 u32 weight, lasts;
727
728 if (pend == RECEIVE_OBJECT_BITS)
729 return pend;
730
731 /*
732 * If the last set bit is larger than the number of pending
733 * bits we have a gap.
734 */
735 weight = hweight32(pend);
736 lasts = fls(pend);
737
738 /* If the bits are linear, nothing to do */
739 if (lasts == weight)
740 return pend;
741
742 /*
743 * Find the first set bit after the gap. We walk backwards
744 * from the last set bit.
745 */
746 for (lasts--; pend & (1 << (lasts - 1)); lasts--);
747
748 return pend & ~((1 << lasts) - 1);
749}
750
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000751static inline void c_can_rx_object_get(struct net_device *dev,
752 struct c_can_priv *priv, u32 obj)
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000753{
754#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
755 if (obj < C_CAN_MSG_RX_LOW_LAST)
756 c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
757 else
758#endif
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000759 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000760}
761
762static inline void c_can_rx_finalize(struct net_device *dev,
763 struct c_can_priv *priv, u32 obj)
764{
765#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
766 if (obj < C_CAN_MSG_RX_LOW_LAST)
767 priv->rxmasked |= BIT(obj - 1);
768 else if (obj == C_CAN_MSG_RX_LOW_LAST) {
769 priv->rxmasked = 0;
770 /* activate all lower message objects */
771 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
772 }
773#endif
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000774 if (priv->type != BOSCH_D_CAN)
775 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000776}
777
Thomas Gleixner520f5702014-03-18 19:27:42 +0100778static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
779 u32 pend, int quota)
780{
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000781 u32 pkts = 0, ctrl, obj;
Thomas Gleixner520f5702014-03-18 19:27:42 +0100782
783 while ((obj = ffs(pend)) && quota > 0) {
784 pend &= ~BIT(obj - 1);
785
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000786 c_can_rx_object_get(dev, priv, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100787 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
788
789 if (ctrl & IF_MCONT_MSGLST) {
790 int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
791
792 pkts += n;
793 quota -= n;
794 continue;
795 }
796
797 /*
798 * This really should not happen, but this covers some
799 * odd HW behaviour. Do not remove that unless you
800 * want to brick your machine.
801 */
802 if (!(ctrl & IF_MCONT_NEWDAT))
803 continue;
804
805 /* read the data from the message object */
806 c_can_read_msg_object(dev, IF_RX, ctrl);
807
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000808 c_can_rx_finalize(dev, priv, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100809
810 pkts++;
811 quota--;
812 }
813
814 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800815}
816
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000817static inline u32 c_can_get_pending(struct c_can_priv *priv)
818{
819 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
820
821#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
822 pend &= ~priv->rxmasked;
823#endif
824 return pend;
825}
826
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800827/*
828 * theory of operation:
829 *
830 * c_can core saves a received CAN message into the first free message
831 * object it finds free (starting with the lowest). Bits NEWDAT and
832 * INTPND are set for this message object indicating that a new message
833 * has arrived. To work-around this issue, we keep two groups of message
834 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
835 *
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000836 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
837 *
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800838 * To ensure in-order frame reception we use the following
839 * approach while re-activating a message object to receive further
840 * frames:
841 * - if the current message object number is lower than
842 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
843 * the INTPND bit.
844 * - if the current message object number is equal to
845 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
846 * receive message objects.
847 * - if the current message object number is greater than
848 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
849 * only this message object.
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000850 *
851 * This can cause packet loss!
852 *
853 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
854 *
855 * We clear the newdat bit right away.
856 *
857 * This can result in packet reordering when the readout is slow.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800858 */
859static int c_can_do_rx_poll(struct net_device *dev, int quota)
860{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800861 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100862 u32 pkts = 0, pend = 0, toread, n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800863
Markus Pargmann4ce78a82013-11-01 10:36:36 +0100864 /*
865 * It is faster to read only one 16bit register. This is only possible
866 * for a maximum number of 16 objects.
867 */
868 BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
869 "Implementation does not support more message objects than 16");
870
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000871 while (quota > 0) {
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000872 if (!pend) {
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000873 pend = c_can_get_pending(priv);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000874 if (!pend)
Thomas Gleixner520f5702014-03-18 19:27:42 +0100875 break;
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000876 /*
877 * If the pending field has a gap, handle the
878 * bits above the gap first.
879 */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100880 toread = c_can_adjust_pending(pend);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000881 } else {
Thomas Gleixner520f5702014-03-18 19:27:42 +0100882 toread = pend;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800883 }
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000884 /* Remove the bits from pend */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100885 pend &= ~toread;
886 /* Read the objects */
887 n = c_can_read_objects(dev, priv, toread, quota);
888 pkts += n;
889 quota -= n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800890 }
891
Thomas Gleixnerb1d8e432014-03-18 17:19:15 +0000892 if (pkts)
893 can_led_event(dev, CAN_LED_EVENT_RX);
894
Thomas Gleixner520f5702014-03-18 19:27:42 +0100895 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800896}
897
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800898static int c_can_handle_state_change(struct net_device *dev,
899 enum c_can_bus_error_types error_type)
900{
901 unsigned int reg_err_counter;
902 unsigned int rx_err_passive;
903 struct c_can_priv *priv = netdev_priv(dev);
904 struct net_device_stats *stats = &dev->stats;
905 struct can_frame *cf;
906 struct sk_buff *skb;
907 struct can_berr_counter bec;
908
Thomas Gleixnerf058d542014-04-11 08:13:12 +0000909 switch (error_type) {
910 case C_CAN_ERROR_WARNING:
911 /* error warning state */
912 priv->can.can_stats.error_warning++;
913 priv->can.state = CAN_STATE_ERROR_WARNING;
914 break;
915 case C_CAN_ERROR_PASSIVE:
916 /* error passive state */
917 priv->can.can_stats.error_passive++;
918 priv->can.state = CAN_STATE_ERROR_PASSIVE;
919 break;
920 case C_CAN_BUS_OFF:
921 /* bus-off state */
922 priv->can.state = CAN_STATE_BUS_OFF;
923 can_bus_off(dev);
924 break;
925 default:
926 break;
927 }
928
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300929 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800930 skb = alloc_can_err_skb(dev, &cf);
931 if (unlikely(!skb))
932 return 0;
933
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100934 __c_can_get_berr_counter(dev, &bec);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530935 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800936 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
937 ERR_CNT_RP_SHIFT;
938
939 switch (error_type) {
940 case C_CAN_ERROR_WARNING:
941 /* error warning state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800942 cf->can_id |= CAN_ERR_CRTL;
943 cf->data[1] = (bec.txerr > bec.rxerr) ?
944 CAN_ERR_CRTL_TX_WARNING :
945 CAN_ERR_CRTL_RX_WARNING;
946 cf->data[6] = bec.txerr;
947 cf->data[7] = bec.rxerr;
948
949 break;
950 case C_CAN_ERROR_PASSIVE:
951 /* error passive state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800952 cf->can_id |= CAN_ERR_CRTL;
953 if (rx_err_passive)
954 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
955 if (bec.txerr > 127)
956 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
957
958 cf->data[6] = bec.txerr;
959 cf->data[7] = bec.rxerr;
960 break;
961 case C_CAN_BUS_OFF:
962 /* bus-off state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800963 cf->can_id |= CAN_ERR_BUSOFF;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800964 can_bus_off(dev);
965 break;
966 default:
967 break;
968 }
969
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800970 stats->rx_packets++;
971 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000972 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800973
974 return 1;
975}
976
977static int c_can_handle_bus_err(struct net_device *dev,
978 enum c_can_lec_type lec_type)
979{
980 struct c_can_priv *priv = netdev_priv(dev);
981 struct net_device_stats *stats = &dev->stats;
982 struct can_frame *cf;
983 struct sk_buff *skb;
984
985 /*
986 * early exit if no lec update or no error.
987 * no lec update means that no CAN bus event has been detected
988 * since CPU wrote 0x7 value to status reg.
989 */
990 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
991 return 0;
992
Thomas Gleixner097aec12014-04-11 08:13:13 +0000993 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
994 return 0;
995
Thomas Gleixner1da394d2014-04-11 08:13:13 +0000996 /* common for all type of bus errors */
997 priv->can.can_stats.bus_error++;
998 stats->rx_errors++;
999
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001000 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001001 skb = alloc_can_err_skb(dev, &cf);
1002 if (unlikely(!skb))
1003 return 0;
1004
1005 /*
1006 * check for 'last error code' which tells us the
1007 * type of the last error to occur on the CAN bus
1008 */
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001009 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1010 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1011
1012 switch (lec_type) {
1013 case LEC_STUFF_ERROR:
1014 netdev_dbg(dev, "stuff error\n");
1015 cf->data[2] |= CAN_ERR_PROT_STUFF;
1016 break;
1017 case LEC_FORM_ERROR:
1018 netdev_dbg(dev, "form error\n");
1019 cf->data[2] |= CAN_ERR_PROT_FORM;
1020 break;
1021 case LEC_ACK_ERROR:
1022 netdev_dbg(dev, "ack error\n");
Olivier Sobrie6ea45882013-01-18 09:32:39 +01001023 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001024 CAN_ERR_PROT_LOC_ACK_DEL);
1025 break;
1026 case LEC_BIT1_ERROR:
1027 netdev_dbg(dev, "bit1 error\n");
1028 cf->data[2] |= CAN_ERR_PROT_BIT1;
1029 break;
1030 case LEC_BIT0_ERROR:
1031 netdev_dbg(dev, "bit0 error\n");
1032 cf->data[2] |= CAN_ERR_PROT_BIT0;
1033 break;
1034 case LEC_CRC_ERROR:
1035 netdev_dbg(dev, "CRC error\n");
Olivier Sobrie6ea45882013-01-18 09:32:39 +01001036 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001037 CAN_ERR_PROT_LOC_CRC_DEL);
1038 break;
1039 default:
1040 break;
1041 }
1042
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001043 stats->rx_packets++;
1044 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +00001045 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001046 return 1;
1047}
1048
1049static int c_can_poll(struct napi_struct *napi, int quota)
1050{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001051 struct net_device *dev = napi->dev;
1052 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001053 u16 curr, last = priv->last_status;
1054 int work_done = 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001055
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001056 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1057 /* Ack status on C_CAN. D_CAN is self clearing */
1058 if (priv->type != BOSCH_D_CAN)
1059 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001060
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001061 /* handle state changes */
1062 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1063 netdev_dbg(dev, "entered error warning state\n");
1064 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001065 }
1066
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001067 if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1068 netdev_dbg(dev, "entered error passive state\n");
1069 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1070 }
1071
1072 if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1073 netdev_dbg(dev, "entered bus off state\n");
1074 work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1075 goto end;
1076 }
1077
1078 /* handle bus recovery events */
1079 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1080 netdev_dbg(dev, "left bus off state\n");
1081 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1082 }
1083 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1084 netdev_dbg(dev, "left error passive state\n");
1085 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1086 }
1087
1088 /* handle lec errors on the bus */
1089 work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1090
1091 /* Handle Tx/Rx events. We do this unconditionally */
1092 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1093 c_can_do_tx(dev);
1094
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001095end:
1096 if (work_done < quota) {
1097 napi_complete(napi);
Thomas Gleixneref1d2e22014-04-11 08:13:11 +00001098 /* enable all IRQs if we are not in bus off state */
1099 if (priv->can.state != CAN_STATE_BUS_OFF)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001100 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001101 }
1102
1103 return work_done;
1104}
1105
1106static irqreturn_t c_can_isr(int irq, void *dev_id)
1107{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001108 struct net_device *dev = (struct net_device *)dev_id;
1109 struct c_can_priv *priv = netdev_priv(dev);
1110
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001111 if (!priv->read_reg(priv, C_CAN_INT_REG))
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001112 return IRQ_NONE;
1113
1114 /* disable all interrupts and schedule the NAPI */
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001115 c_can_irq_control(priv, false);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001116 napi_schedule(&priv->napi);
1117
1118 return IRQ_HANDLED;
1119}
1120
1121static int c_can_open(struct net_device *dev)
1122{
1123 int err;
1124 struct c_can_priv *priv = netdev_priv(dev);
1125
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301126 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301127 c_can_reset_ram(priv, true);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301128
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001129 /* open the can device */
1130 err = open_candev(dev);
1131 if (err) {
1132 netdev_err(dev, "failed to open can device\n");
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301133 goto exit_open_fail;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001134 }
1135
1136 /* register interrupt handler */
1137 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1138 dev);
1139 if (err < 0) {
1140 netdev_err(dev, "failed to request interrupt\n");
1141 goto exit_irq_fail;
1142 }
1143
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001144 /* start the c_can controller */
1145 err = c_can_start(dev);
1146 if (err)
1147 goto exit_start_fail;
AnilKumar Chf461f272012-05-23 17:45:11 +05301148
Fabio Baltieri5090f802012-12-18 18:51:01 +01001149 can_led_event(dev, CAN_LED_EVENT_OPEN);
1150
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001151 napi_enable(&priv->napi);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001152 /* enable status change, error and module interrupts */
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001153 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001154 netif_start_queue(dev);
1155
1156 return 0;
1157
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001158exit_start_fail:
1159 free_irq(dev->irq, dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001160exit_irq_fail:
1161 close_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301162exit_open_fail:
AnilKumar Ch52cde852012-11-21 11:14:10 +05301163 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301164 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001165 return err;
1166}
1167
1168static int c_can_close(struct net_device *dev)
1169{
1170 struct c_can_priv *priv = netdev_priv(dev);
1171
1172 netif_stop_queue(dev);
1173 napi_disable(&priv->napi);
1174 c_can_stop(dev);
1175 free_irq(dev->irq, dev);
1176 close_candev(dev);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301177
1178 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301179 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001180
Fabio Baltieri5090f802012-12-18 18:51:01 +01001181 can_led_event(dev, CAN_LED_EVENT_STOP);
1182
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001183 return 0;
1184}
1185
1186struct net_device *alloc_c_can_dev(void)
1187{
1188 struct net_device *dev;
1189 struct c_can_priv *priv;
1190
1191 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1192 if (!dev)
1193 return NULL;
1194
1195 priv = netdev_priv(dev);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +00001196 spin_lock_init(&priv->xmit_lock);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001197 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1198
1199 priv->dev = dev;
1200 priv->can.bittiming_const = &c_can_bittiming_const;
1201 priv->can.do_set_mode = c_can_set_mode;
1202 priv->can.do_get_berr_counter = c_can_get_berr_counter;
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +00001203 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001204 CAN_CTRLMODE_LISTENONLY |
1205 CAN_CTRLMODE_BERR_REPORTING;
1206
1207 return dev;
1208}
1209EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1210
AnilKumar Ch82120032012-09-21 15:29:01 +05301211#ifdef CONFIG_PM
1212int c_can_power_down(struct net_device *dev)
1213{
1214 u32 val;
1215 unsigned long time_out;
1216 struct c_can_priv *priv = netdev_priv(dev);
1217
1218 if (!(dev->flags & IFF_UP))
1219 return 0;
1220
1221 WARN_ON(priv->type != BOSCH_D_CAN);
1222
1223 /* set PDR value so the device goes to power down mode */
1224 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1225 val |= CONTROL_EX_PDR;
1226 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1227
1228 /* Wait for the PDA bit to get set */
1229 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1230 while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1231 time_after(time_out, jiffies))
1232 cpu_relax();
1233
1234 if (time_after(jiffies, time_out))
1235 return -ETIMEDOUT;
1236
1237 c_can_stop(dev);
1238
AnilKumar Ch52cde852012-11-21 11:14:10 +05301239 c_can_reset_ram(priv, false);
AnilKumar Ch82120032012-09-21 15:29:01 +05301240 c_can_pm_runtime_put_sync(priv);
1241
1242 return 0;
1243}
1244EXPORT_SYMBOL_GPL(c_can_power_down);
1245
1246int c_can_power_up(struct net_device *dev)
1247{
1248 u32 val;
1249 unsigned long time_out;
1250 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001251 int ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301252
1253 if (!(dev->flags & IFF_UP))
1254 return 0;
1255
1256 WARN_ON(priv->type != BOSCH_D_CAN);
1257
1258 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301259 c_can_reset_ram(priv, true);
AnilKumar Ch82120032012-09-21 15:29:01 +05301260
1261 /* Clear PDR and INIT bits */
1262 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1263 val &= ~CONTROL_EX_PDR;
1264 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1265 val = priv->read_reg(priv, C_CAN_CTRL_REG);
1266 val &= ~CONTROL_INIT;
1267 priv->write_reg(priv, C_CAN_CTRL_REG, val);
1268
1269 /* Wait for the PDA bit to get clear */
1270 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1271 while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1272 time_after(time_out, jiffies))
1273 cpu_relax();
1274
1275 if (time_after(jiffies, time_out))
1276 return -ETIMEDOUT;
1277
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001278 ret = c_can_start(dev);
1279 if (!ret)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001280 c_can_irq_control(priv, true);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001281
1282 return ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301283}
1284EXPORT_SYMBOL_GPL(c_can_power_up);
1285#endif
1286
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001287void free_c_can_dev(struct net_device *dev)
1288{
Marc Kleine-Buddef29b4232014-03-18 19:13:59 +01001289 struct c_can_priv *priv = netdev_priv(dev);
1290
1291 netif_napi_del(&priv->napi);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001292 free_candev(dev);
1293}
1294EXPORT_SYMBOL_GPL(free_c_can_dev);
1295
1296static const struct net_device_ops c_can_netdev_ops = {
1297 .ndo_open = c_can_open,
1298 .ndo_stop = c_can_close,
1299 .ndo_start_xmit = c_can_start_xmit,
Oliver Hartkoppc971fa22014-03-07 09:23:41 +01001300 .ndo_change_mtu = can_change_mtu,
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001301};
1302
1303int register_c_can_dev(struct net_device *dev)
1304{
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301305 struct c_can_priv *priv = netdev_priv(dev);
1306 int err;
1307
1308 c_can_pm_runtime_enable(priv);
1309
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001310 dev->flags |= IFF_ECHO; /* we support local echo */
1311 dev->netdev_ops = &c_can_netdev_ops;
1312
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301313 err = register_candev(dev);
1314 if (err)
1315 c_can_pm_runtime_disable(priv);
Fabio Baltieri5090f802012-12-18 18:51:01 +01001316 else
1317 devm_can_led_init(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301318
1319 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001320}
1321EXPORT_SYMBOL_GPL(register_c_can_dev);
1322
1323void unregister_c_can_dev(struct net_device *dev)
1324{
1325 struct c_can_priv *priv = netdev_priv(dev);
1326
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001327 unregister_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301328
1329 c_can_pm_runtime_disable(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001330}
1331EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1332
1333MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1334MODULE_LICENSE("GPL v2");
1335MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");