blob: 041525d2595ced06ee4363bb09f5fe62dfe5b7ec [file] [log] [blame]
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080029#include <linux/module.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/if_arp.h>
34#include <linux/if_ether.h>
35#include <linux/list.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080036#include <linux/io.h>
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +053037#include <linux/pm_runtime.h>
Roger Quadros3973c522014-11-14 17:40:13 +020038#include <linux/pinctrl/consumer.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080039
40#include <linux/can.h>
41#include <linux/can/dev.h>
42#include <linux/can/error.h>
Fabio Baltieri5090f802012-12-18 18:51:01 +010043#include <linux/can/led.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080044
45#include "c_can.h"
46
AnilKumar Ch33f81002012-05-29 11:13:15 +053047/* Number of interface registers */
48#define IF_ENUM_REG_LEN 11
49#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
50
AnilKumar Ch82120032012-09-21 15:29:01 +053051/* control extension register D_CAN specific */
52#define CONTROL_EX_PDR BIT(8)
53
Bhupesh Sharma881ff672011-02-13 22:51:44 -080054/* control register */
55#define CONTROL_TEST BIT(7)
56#define CONTROL_CCE BIT(6)
57#define CONTROL_DISABLE_AR BIT(5)
58#define CONTROL_ENABLE_AR (0 << 5)
59#define CONTROL_EIE BIT(3)
60#define CONTROL_SIE BIT(2)
61#define CONTROL_IE BIT(1)
62#define CONTROL_INIT BIT(0)
63
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +000064#define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
65
Bhupesh Sharma881ff672011-02-13 22:51:44 -080066/* test register */
67#define TEST_RX BIT(7)
68#define TEST_TX1 BIT(6)
69#define TEST_TX2 BIT(5)
70#define TEST_LBACK BIT(4)
71#define TEST_SILENT BIT(3)
72#define TEST_BASIC BIT(2)
73
74/* status register */
AnilKumar Ch82120032012-09-21 15:29:01 +053075#define STATUS_PDA BIT(10)
Bhupesh Sharma881ff672011-02-13 22:51:44 -080076#define STATUS_BOFF BIT(7)
77#define STATUS_EWARN BIT(6)
78#define STATUS_EPASS BIT(5)
79#define STATUS_RXOK BIT(4)
80#define STATUS_TXOK BIT(3)
81
82/* error counter register */
83#define ERR_CNT_TEC_MASK 0xff
84#define ERR_CNT_TEC_SHIFT 0
85#define ERR_CNT_REC_SHIFT 8
86#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
87#define ERR_CNT_RP_SHIFT 15
88#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
89
90/* bit-timing register */
91#define BTR_BRP_MASK 0x3f
92#define BTR_BRP_SHIFT 0
93#define BTR_SJW_SHIFT 6
94#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
95#define BTR_TSEG1_SHIFT 8
96#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
97#define BTR_TSEG2_SHIFT 12
98#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
99
100/* brp extension register */
101#define BRP_EXT_BRPE_MASK 0x0f
102#define BRP_EXT_BRPE_SHIFT 0
103
104/* IFx command request */
105#define IF_COMR_BUSY BIT(15)
106
107/* IFx command mask */
108#define IF_COMM_WR BIT(7)
109#define IF_COMM_MASK BIT(6)
110#define IF_COMM_ARB BIT(5)
111#define IF_COMM_CONTROL BIT(4)
112#define IF_COMM_CLR_INT_PND BIT(3)
113#define IF_COMM_TXRQST BIT(2)
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000114#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800115#define IF_COMM_DATAA BIT(1)
116#define IF_COMM_DATAB BIT(0)
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000117
118/* TX buffer setup */
119#define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \
120 IF_COMM_TXRQST | \
121 IF_COMM_DATAA | IF_COMM_DATAB)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800122
Thomas Gleixnerc0a9f4d32014-03-18 17:19:13 +0000123/* For the low buffers we clear the interrupt bit, but keep newdat */
124#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
125 IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
126 IF_COMM_DATAA | IF_COMM_DATAB)
127
128/* For the high buffers we clear the interrupt bit and newdat */
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000129#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
Thomas Gleixnerc0a9f4d32014-03-18 17:19:13 +0000130
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000131
132/* Receive setup of message objects */
133#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
134
Thomas Gleixnerb07faaa2014-04-11 08:13:19 +0000135/* Invalidation of message objects */
136#define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL)
137
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800138/* IFx arbitration */
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000139#define IF_ARB_MSGVAL BIT(31)
140#define IF_ARB_MSGXTD BIT(30)
141#define IF_ARB_TRANSMIT BIT(29)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800142
143/* IFx message control */
144#define IF_MCONT_NEWDAT BIT(15)
145#define IF_MCONT_MSGLST BIT(14)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800146#define IF_MCONT_INTPND BIT(13)
147#define IF_MCONT_UMASK BIT(12)
148#define IF_MCONT_TXIE BIT(11)
149#define IF_MCONT_RXIE BIT(10)
150#define IF_MCONT_RMTEN BIT(9)
151#define IF_MCONT_TXRQST BIT(8)
152#define IF_MCONT_EOB BIT(7)
153#define IF_MCONT_DLC_MASK 0xf
154
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000155#define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK)
156#define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB)
157
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000158#define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB)
159
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800160/*
Thomas Gleixner640916d2014-03-18 17:19:09 +0000161 * Use IF1 for RX and IF2 for TX
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800162 */
Thomas Gleixner640916d2014-03-18 17:19:09 +0000163#define IF_RX 0
164#define IF_TX 1
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800165
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800166/* minimum timeout for checking BUSY status */
167#define MIN_TIMEOUT_VALUE 6
168
AnilKumar Ch82120032012-09-21 15:29:01 +0530169/* Wait for ~1 sec for INIT bit */
170#define INIT_WAIT_MS 1000
171
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800172/* napi related */
173#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
174
175/* c_can lec values */
176enum c_can_lec_type {
177 LEC_NO_ERROR = 0,
178 LEC_STUFF_ERROR,
179 LEC_FORM_ERROR,
180 LEC_ACK_ERROR,
181 LEC_BIT1_ERROR,
182 LEC_BIT0_ERROR,
183 LEC_CRC_ERROR,
184 LEC_UNUSED,
Thomas Gleixner097aec12014-04-11 08:13:13 +0000185 LEC_MASK = LEC_UNUSED,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800186};
187
188/*
189 * c_can error types:
190 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
191 */
192enum c_can_bus_error_types {
193 C_CAN_NO_ERROR = 0,
194 C_CAN_BUS_OFF,
195 C_CAN_ERROR_WARNING,
196 C_CAN_ERROR_PASSIVE,
197};
198
Marc Kleine-Budde194b9a42012-07-16 12:58:31 +0200199static const struct can_bittiming_const c_can_bittiming_const = {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800200 .name = KBUILD_MODNAME,
201 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
202 .tseg1_max = 16,
203 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
204 .tseg2_max = 8,
205 .sjw_max = 4,
206 .brp_min = 1,
207 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
208 .brp_inc = 1,
209};
210
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530211static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
212{
213 if (priv->device)
214 pm_runtime_enable(priv->device);
215}
216
217static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
218{
219 if (priv->device)
220 pm_runtime_disable(priv->device);
221}
222
223static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
224{
225 if (priv->device)
226 pm_runtime_get_sync(priv->device);
227}
228
229static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
230{
231 if (priv->device)
232 pm_runtime_put_sync(priv->device);
233}
234
AnilKumar Ch52cde852012-11-21 11:14:10 +0530235static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
236{
237 if (priv->raminit)
238 priv->raminit(priv, enable);
239}
240
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000241static void c_can_irq_control(struct c_can_priv *priv, bool enable)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800242{
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000243 u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800244
245 if (enable)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000246 ctrl |= CONTROL_IRQMSK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800247
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000248 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800249}
250
Thomas Gleixner7af28632014-04-11 08:13:20 +0000251static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800252{
Thomas Gleixner7af28632014-04-11 08:13:20 +0000253 struct c_can_priv *priv = netdev_priv(dev);
254 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800255
Pavel Machekccbc5352014-05-06 15:57:02 +0200256 priv->write_reg32(priv, reg, (cmd << 16) | obj);
Thomas Gleixner7af28632014-04-11 08:13:20 +0000257
258 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
259 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
260 return;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800261 udelay(1);
262 }
Thomas Gleixner7af28632014-04-11 08:13:20 +0000263 netdev_err(dev, "Updating object timed out\n");
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800264
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800265}
266
Thomas Gleixner7af28632014-04-11 08:13:20 +0000267static inline void c_can_object_get(struct net_device *dev, int iface,
268 u32 obj, u32 cmd)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800269{
Thomas Gleixner7af28632014-04-11 08:13:20 +0000270 c_can_obj_update(dev, iface, cmd, obj);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800271}
272
Thomas Gleixner7af28632014-04-11 08:13:20 +0000273static inline void c_can_object_put(struct net_device *dev, int iface,
274 u32 obj, u32 cmd)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800275{
Thomas Gleixner7af28632014-04-11 08:13:20 +0000276 c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800277}
278
Thomas Gleixner93941592014-04-11 08:13:22 +0000279/*
280 * Note: According to documentation clearing TXIE while MSGVAL is set
281 * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
282 * load significantly.
283 */
284static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
285{
286 struct c_can_priv *priv = netdev_priv(dev);
287
288 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
289 c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
290}
291
292static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
293{
294 struct c_can_priv *priv = netdev_priv(dev);
295
296 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
297 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
298 c_can_inval_tx_object(dev, iface, obj);
299}
300
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000301static void c_can_setup_tx_object(struct net_device *dev, int iface,
Thomas Gleixner93941592014-04-11 08:13:22 +0000302 struct can_frame *frame, int idx)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800303{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800304 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000305 u16 ctrl = IF_MCONT_TX | frame->can_dlc;
Thomas Gleixner93941592014-04-11 08:13:22 +0000306 bool rtr = frame->can_id & CAN_RTR_FLAG;
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000307 u32 arb = IF_ARB_MSGVAL;
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000308 int i;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800309
310 if (frame->can_id & CAN_EFF_FLAG) {
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000311 arb |= frame->can_id & CAN_EFF_MASK;
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000312 arb |= IF_ARB_MSGXTD;
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000313 } else {
314 arb |= (frame->can_id & CAN_SFF_MASK) << 18;
315 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800316
Thomas Gleixner93941592014-04-11 08:13:22 +0000317 if (!rtr)
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000318 arb |= IF_ARB_TRANSMIT;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800319
Thomas Gleixner93941592014-04-11 08:13:22 +0000320 /*
321 * If we change the DIR bit, we need to invalidate the buffer
322 * first, i.e. clear the MSGVAL flag in the arbiter.
323 */
324 if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
325 u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
326
327 c_can_inval_msg_object(dev, iface, obj);
328 change_bit(idx, &priv->tx_dir);
329 }
330
Pavel Machekccbc5352014-05-06 15:57:02 +0200331 priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000332
333 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800334
335 for (i = 0; i < frame->can_dlc; i += 2) {
AnilKumar Ch33f81002012-05-29 11:13:15 +0530336 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800337 frame->data[i] | (frame->data[i + 1] << 8));
338 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800339}
340
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800341static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000342 int iface)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800343{
344 int i;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800345
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000346 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
347 c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800348}
349
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000350static int c_can_handle_lost_msg_obj(struct net_device *dev,
351 int iface, int objno, u32 ctrl)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800352{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800353 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000354 struct c_can_priv *priv = netdev_priv(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800355 struct can_frame *frame;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000356 struct sk_buff *skb;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800357
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000358 ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
359 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
Thomas Gleixner640916d2014-03-18 17:19:09 +0000360 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800361
Thomas Gleixner1da394d2014-04-11 08:13:13 +0000362 stats->rx_errors++;
363 stats->rx_over_errors++;
364
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800365 /* create an error msg */
366 skb = alloc_can_err_skb(dev, &frame);
367 if (unlikely(!skb))
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000368 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800369
370 frame->can_id |= CAN_ERR_CRTL;
371 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800372
373 netif_receive_skb(skb);
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000374 return 1;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800375}
376
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000377static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800378{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800379 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000380 struct c_can_priv *priv = netdev_priv(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800381 struct can_frame *frame;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000382 struct sk_buff *skb;
383 u32 arb, data;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800384
385 skb = alloc_can_skb(dev, &frame);
386 if (!skb) {
387 stats->rx_dropped++;
388 return -ENOMEM;
389 }
390
391 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
392
Pavel Machekccbc5352014-05-06 15:57:02 +0200393 arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800394
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000395 if (arb & IF_ARB_MSGXTD)
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000396 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800397 else
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000398 frame->can_id = (arb >> 18) & CAN_SFF_MASK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800399
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000400 if (arb & IF_ARB_TRANSMIT) {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800401 frame->can_id |= CAN_RTR_FLAG;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000402 } else {
403 int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
404
405 for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
406 data = priv->read_reg(priv, dreg);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800407 frame->data[i] = data;
408 frame->data[i + 1] = data >> 8;
409 }
410 }
411
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800412 stats->rx_packets++;
413 stats->rx_bytes += frame->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000414
415 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800416 return 0;
417}
418
419static void c_can_setup_receive_object(struct net_device *dev, int iface,
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000420 u32 obj, u32 mask, u32 id, u32 mcont)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800421{
422 struct c_can_priv *priv = netdev_priv(dev);
423
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000424 mask |= BIT(29);
Pavel Machekccbc5352014-05-06 15:57:02 +0200425 priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
Alexander Stein2bd3bc42012-12-13 10:06:10 +0100426
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000427 id |= IF_ARB_MSGVAL;
Pavel Machekccbc5352014-05-06 15:57:02 +0200428 priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800429
AnilKumar Ch33f81002012-05-29 11:13:15 +0530430 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000431 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800432}
433
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800434static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000435 struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800436{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800437 struct can_frame *frame = (struct can_frame *)skb->data;
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000438 struct c_can_priv *priv = netdev_priv(dev);
439 u32 idx, obj;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800440
441 if (can_dropped_invalid_skb(dev, skb))
442 return NETDEV_TX_OK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800443 /*
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000444 * This is not a FIFO. C/D_CAN sends out the buffers
445 * prioritized. The lowest buffer number wins.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800446 */
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000447 idx = fls(atomic_read(&priv->tx_active));
448 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
449
450 /* If this is the last buffer, stop the xmit queue */
451 if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800452 netif_stop_queue(dev);
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000453 /*
454 * Store the message in the interface so we can call
455 * can_put_echo_skb(). We must do this before we enable
456 * transmit as we might race against do_tx().
457 */
Thomas Gleixner93941592014-04-11 08:13:22 +0000458 c_can_setup_tx_object(dev, IF_TX, frame, idx);
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000459 priv->dlc[idx] = frame->can_dlc;
460 can_put_echo_skb(skb, dev, idx);
461
462 /* Update the active bits */
463 atomic_add((1 << idx), &priv->tx_active);
464 /* Start transmission */
465 c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800466
467 return NETDEV_TX_OK;
468}
469
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000470static int c_can_wait_for_ctrl_init(struct net_device *dev,
471 struct c_can_priv *priv, u32 init)
472{
473 int retry = 0;
474
475 while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
476 udelay(10);
477 if (retry++ > 1000) {
478 netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
479 return -EIO;
480 }
481 }
482 return 0;
483}
484
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800485static int c_can_set_bittiming(struct net_device *dev)
486{
487 unsigned int reg_btr, reg_brpe, ctrl_save;
488 u8 brp, brpe, sjw, tseg1, tseg2;
489 u32 ten_bit_brp;
490 struct c_can_priv *priv = netdev_priv(dev);
491 const struct can_bittiming *bt = &priv->can.bittiming;
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000492 int res;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800493
494 /* c_can provides a 6-bit brp and 4-bit brpe fields */
495 ten_bit_brp = bt->brp - 1;
496 brp = ten_bit_brp & BTR_BRP_MASK;
497 brpe = ten_bit_brp >> 6;
498
499 sjw = bt->sjw - 1;
500 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
501 tseg2 = bt->phase_seg2 - 1;
502 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
503 (tseg2 << BTR_TSEG2_SHIFT);
504 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
505
506 netdev_info(dev,
507 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
508
AnilKumar Ch33f81002012-05-29 11:13:15 +0530509 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000510 ctrl_save &= ~CONTROL_INIT;
511 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
512 res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
513 if (res)
514 return res;
515
AnilKumar Ch33f81002012-05-29 11:13:15 +0530516 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
517 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
518 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800519
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000520 return c_can_wait_for_ctrl_init(dev, priv, 0);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800521}
522
523/*
524 * Configure C_CAN message objects for Tx and Rx purposes:
525 * C_CAN provides a total of 32 message objects that can be configured
526 * either for Tx or Rx purposes. Here the first 16 message objects are used as
527 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
528 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
529 * See user guide document for further details on configuring message
530 * objects.
531 */
532static void c_can_configure_msg_objects(struct net_device *dev)
533{
534 int i;
535
536 /* first invalidate all message objects */
537 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
Thomas Gleixner640916d2014-03-18 17:19:09 +0000538 c_can_inval_msg_object(dev, IF_RX, i);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800539
540 /* setup receive message objects */
541 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000542 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800543
Thomas Gleixner640916d2014-03-18 17:19:09 +0000544 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000545 IF_MCONT_RCV_EOB);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800546}
547
548/*
549 * Configure C_CAN chip:
550 * - enable/disable auto-retransmission
551 * - set operating mode
552 * - configure message objects
553 */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100554static int c_can_chip_config(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800555{
556 struct c_can_priv *priv = netdev_priv(dev);
557
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +0000558 /* enable automatic retransmission */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000559 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800560
Dan Carpenterd9cb9bd2012-06-15 00:20:44 +0000561 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
562 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800563 /* loopback + silent mode : useful for hot self-test */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000564 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
565 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800566 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
567 /* loopback mode : useful for self-test function */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000568 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530569 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800570 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
571 /* silent mode : bus-monitoring mode */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000572 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530573 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000574 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800575
576 /* configure message objects */
577 c_can_configure_msg_objects(dev);
578
579 /* set a `lec` value so that we can check for updates later */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530580 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800581
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000582 /* Clear all internal status */
583 atomic_set(&priv->tx_active, 0);
584 priv->rxmasked = 0;
Thomas Gleixner93941592014-04-11 08:13:22 +0000585 priv->tx_dir = 0;
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000586
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800587 /* set bittiming params */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100588 return c_can_set_bittiming(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800589}
590
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100591static int c_can_start(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800592{
593 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100594 int err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800595
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800596 /* basic c_can configuration */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100597 err = c_can_chip_config(dev);
598 if (err)
599 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800600
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000601 /* Setup the command for new messages */
602 priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
603 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
604
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800605 priv->can.state = CAN_STATE_ERROR_ACTIVE;
606
Roger Quadros3973c522014-11-14 17:40:13 +0200607 /* activate pins */
608 pinctrl_pm_select_default_state(dev->dev.parent);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100609 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800610}
611
612static void c_can_stop(struct net_device *dev)
613{
614 struct c_can_priv *priv = netdev_priv(dev);
615
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000616 c_can_irq_control(priv, false);
Roger Quadros3973c522014-11-14 17:40:13 +0200617
Viktor Babrian7ffd7b42015-01-18 20:01:40 +0100618 /* put ctrl to init on stop to end ongoing transmission */
619 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
620
Roger Quadros3973c522014-11-14 17:40:13 +0200621 /* deactivate pins */
622 pinctrl_pm_select_sleep_state(dev->dev.parent);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800623 priv->can.state = CAN_STATE_STOPPED;
624}
625
626static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
627{
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000628 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100629 int err;
630
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800631 switch (mode) {
632 case CAN_MODE_START:
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100633 err = c_can_start(dev);
634 if (err)
635 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800636 netif_wake_queue(dev);
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000637 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800638 break;
639 default:
640 return -EOPNOTSUPP;
641 }
642
643 return 0;
644}
645
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100646static int __c_can_get_berr_counter(const struct net_device *dev,
647 struct can_berr_counter *bec)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800648{
649 unsigned int reg_err_counter;
650 struct c_can_priv *priv = netdev_priv(dev);
651
AnilKumar Ch33f81002012-05-29 11:13:15 +0530652 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800653 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
654 ERR_CNT_REC_SHIFT;
655 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
656
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100657 return 0;
658}
659
660static int c_can_get_berr_counter(const struct net_device *dev,
661 struct can_berr_counter *bec)
662{
663 struct c_can_priv *priv = netdev_priv(dev);
664 int err;
665
666 c_can_pm_runtime_get_sync(priv);
667 err = __c_can_get_berr_counter(dev, bec);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530668 c_can_pm_runtime_put_sync(priv);
669
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100670 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800671}
672
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800673static void c_can_do_tx(struct net_device *dev)
674{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800675 struct c_can_priv *priv = netdev_priv(dev);
676 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000677 u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800678
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000679 clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000680
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000681 while ((idx = ffs(pend))) {
682 idx--;
683 pend &= ~(1 << idx);
684 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
Thomas Gleixner93941592014-04-11 08:13:22 +0000685 c_can_inval_tx_object(dev, IF_RX, obj);
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000686 can_get_echo_skb(dev, idx);
687 bytes += priv->dlc[idx];
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000688 pkts++;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800689 }
690
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000691 /* Clear the bits in the tx_active mask */
692 atomic_sub(clr, &priv->tx_active);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000693
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000694 if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
695 netif_wake_queue(dev);
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000696
697 if (pkts) {
698 stats->tx_bytes += bytes;
699 stats->tx_packets += pkts;
700 can_led_event(dev, CAN_LED_EVENT_TX);
701 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800702}
703
704/*
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000705 * If we have a gap in the pending bits, that means we either
706 * raced with the hardware or failed to readout all upper
707 * objects in the last run due to quota limit.
708 */
709static u32 c_can_adjust_pending(u32 pend)
710{
711 u32 weight, lasts;
712
713 if (pend == RECEIVE_OBJECT_BITS)
714 return pend;
715
716 /*
717 * If the last set bit is larger than the number of pending
718 * bits we have a gap.
719 */
720 weight = hweight32(pend);
721 lasts = fls(pend);
722
723 /* If the bits are linear, nothing to do */
724 if (lasts == weight)
725 return pend;
726
727 /*
728 * Find the first set bit after the gap. We walk backwards
729 * from the last set bit.
730 */
731 for (lasts--; pend & (1 << (lasts - 1)); lasts--);
732
733 return pend & ~((1 << lasts) - 1);
734}
735
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000736static inline void c_can_rx_object_get(struct net_device *dev,
737 struct c_can_priv *priv, u32 obj)
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000738{
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000739 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000740}
741
742static inline void c_can_rx_finalize(struct net_device *dev,
743 struct c_can_priv *priv, u32 obj)
744{
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000745 if (priv->type != BOSCH_D_CAN)
746 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000747}
748
Thomas Gleixner520f5702014-03-18 19:27:42 +0100749static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
750 u32 pend, int quota)
751{
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000752 u32 pkts = 0, ctrl, obj;
Thomas Gleixner520f5702014-03-18 19:27:42 +0100753
754 while ((obj = ffs(pend)) && quota > 0) {
755 pend &= ~BIT(obj - 1);
756
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000757 c_can_rx_object_get(dev, priv, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100758 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
759
760 if (ctrl & IF_MCONT_MSGLST) {
761 int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
762
763 pkts += n;
764 quota -= n;
765 continue;
766 }
767
768 /*
769 * This really should not happen, but this covers some
770 * odd HW behaviour. Do not remove that unless you
771 * want to brick your machine.
772 */
773 if (!(ctrl & IF_MCONT_NEWDAT))
774 continue;
775
776 /* read the data from the message object */
777 c_can_read_msg_object(dev, IF_RX, ctrl);
778
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000779 c_can_rx_finalize(dev, priv, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100780
781 pkts++;
782 quota--;
783 }
784
785 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800786}
787
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000788static inline u32 c_can_get_pending(struct c_can_priv *priv)
789{
790 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
791
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000792 return pend;
793}
794
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800795/*
796 * theory of operation:
797 *
798 * c_can core saves a received CAN message into the first free message
799 * object it finds free (starting with the lowest). Bits NEWDAT and
800 * INTPND are set for this message object indicating that a new message
801 * has arrived. To work-around this issue, we keep two groups of message
802 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
803 *
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000804 * We clear the newdat bit right away.
805 *
806 * This can result in packet reordering when the readout is slow.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800807 */
808static int c_can_do_rx_poll(struct net_device *dev, int quota)
809{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800810 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100811 u32 pkts = 0, pend = 0, toread, n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800812
Markus Pargmann4ce78a82013-11-01 10:36:36 +0100813 /*
814 * It is faster to read only one 16bit register. This is only possible
815 * for a maximum number of 16 objects.
816 */
817 BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
818 "Implementation does not support more message objects than 16");
819
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000820 while (quota > 0) {
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000821 if (!pend) {
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000822 pend = c_can_get_pending(priv);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000823 if (!pend)
Thomas Gleixner520f5702014-03-18 19:27:42 +0100824 break;
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000825 /*
826 * If the pending field has a gap, handle the
827 * bits above the gap first.
828 */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100829 toread = c_can_adjust_pending(pend);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000830 } else {
Thomas Gleixner520f5702014-03-18 19:27:42 +0100831 toread = pend;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800832 }
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000833 /* Remove the bits from pend */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100834 pend &= ~toread;
835 /* Read the objects */
836 n = c_can_read_objects(dev, priv, toread, quota);
837 pkts += n;
838 quota -= n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800839 }
840
Thomas Gleixnerb1d8e432014-03-18 17:19:15 +0000841 if (pkts)
842 can_led_event(dev, CAN_LED_EVENT_RX);
843
Thomas Gleixner520f5702014-03-18 19:27:42 +0100844 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800845}
846
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800847static int c_can_handle_state_change(struct net_device *dev,
848 enum c_can_bus_error_types error_type)
849{
850 unsigned int reg_err_counter;
851 unsigned int rx_err_passive;
852 struct c_can_priv *priv = netdev_priv(dev);
853 struct net_device_stats *stats = &dev->stats;
854 struct can_frame *cf;
855 struct sk_buff *skb;
856 struct can_berr_counter bec;
857
Thomas Gleixnerf058d542014-04-11 08:13:12 +0000858 switch (error_type) {
859 case C_CAN_ERROR_WARNING:
860 /* error warning state */
861 priv->can.can_stats.error_warning++;
862 priv->can.state = CAN_STATE_ERROR_WARNING;
863 break;
864 case C_CAN_ERROR_PASSIVE:
865 /* error passive state */
866 priv->can.can_stats.error_passive++;
867 priv->can.state = CAN_STATE_ERROR_PASSIVE;
868 break;
869 case C_CAN_BUS_OFF:
870 /* bus-off state */
871 priv->can.state = CAN_STATE_BUS_OFF;
Andri Yngvasonbe38a6f2015-01-16 14:30:28 +0000872 priv->can.can_stats.bus_off++;
Thomas Gleixnerf058d542014-04-11 08:13:12 +0000873 break;
874 default:
875 break;
876 }
877
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300878 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800879 skb = alloc_can_err_skb(dev, &cf);
880 if (unlikely(!skb))
881 return 0;
882
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100883 __c_can_get_berr_counter(dev, &bec);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530884 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800885 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
886 ERR_CNT_RP_SHIFT;
887
888 switch (error_type) {
889 case C_CAN_ERROR_WARNING:
890 /* error warning state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800891 cf->can_id |= CAN_ERR_CRTL;
892 cf->data[1] = (bec.txerr > bec.rxerr) ?
893 CAN_ERR_CRTL_TX_WARNING :
894 CAN_ERR_CRTL_RX_WARNING;
895 cf->data[6] = bec.txerr;
896 cf->data[7] = bec.rxerr;
897
898 break;
899 case C_CAN_ERROR_PASSIVE:
900 /* error passive state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800901 cf->can_id |= CAN_ERR_CRTL;
902 if (rx_err_passive)
903 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
904 if (bec.txerr > 127)
905 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
906
907 cf->data[6] = bec.txerr;
908 cf->data[7] = bec.rxerr;
909 break;
910 case C_CAN_BUS_OFF:
911 /* bus-off state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800912 cf->can_id |= CAN_ERR_BUSOFF;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800913 can_bus_off(dev);
914 break;
915 default:
916 break;
917 }
918
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800919 stats->rx_packets++;
920 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000921 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800922
923 return 1;
924}
925
926static int c_can_handle_bus_err(struct net_device *dev,
927 enum c_can_lec_type lec_type)
928{
929 struct c_can_priv *priv = netdev_priv(dev);
930 struct net_device_stats *stats = &dev->stats;
931 struct can_frame *cf;
932 struct sk_buff *skb;
933
934 /*
935 * early exit if no lec update or no error.
936 * no lec update means that no CAN bus event has been detected
937 * since CPU wrote 0x7 value to status reg.
938 */
939 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
940 return 0;
941
Thomas Gleixner097aec12014-04-11 08:13:13 +0000942 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
943 return 0;
944
Thomas Gleixner1da394d2014-04-11 08:13:13 +0000945 /* common for all type of bus errors */
946 priv->can.can_stats.bus_error++;
947 stats->rx_errors++;
948
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300949 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800950 skb = alloc_can_err_skb(dev, &cf);
951 if (unlikely(!skb))
952 return 0;
953
954 /*
955 * check for 'last error code' which tells us the
956 * type of the last error to occur on the CAN bus
957 */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800958 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
959 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
960
961 switch (lec_type) {
962 case LEC_STUFF_ERROR:
963 netdev_dbg(dev, "stuff error\n");
964 cf->data[2] |= CAN_ERR_PROT_STUFF;
965 break;
966 case LEC_FORM_ERROR:
967 netdev_dbg(dev, "form error\n");
968 cf->data[2] |= CAN_ERR_PROT_FORM;
969 break;
970 case LEC_ACK_ERROR:
971 netdev_dbg(dev, "ack error\n");
Olivier Sobrie6ea45882013-01-18 09:32:39 +0100972 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800973 CAN_ERR_PROT_LOC_ACK_DEL);
974 break;
975 case LEC_BIT1_ERROR:
976 netdev_dbg(dev, "bit1 error\n");
977 cf->data[2] |= CAN_ERR_PROT_BIT1;
978 break;
979 case LEC_BIT0_ERROR:
980 netdev_dbg(dev, "bit0 error\n");
981 cf->data[2] |= CAN_ERR_PROT_BIT0;
982 break;
983 case LEC_CRC_ERROR:
984 netdev_dbg(dev, "CRC error\n");
Olivier Sobrie6ea45882013-01-18 09:32:39 +0100985 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800986 CAN_ERR_PROT_LOC_CRC_DEL);
987 break;
988 default:
989 break;
990 }
991
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800992 stats->rx_packets++;
993 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000994 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800995 return 1;
996}
997
998static int c_can_poll(struct napi_struct *napi, int quota)
999{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001000 struct net_device *dev = napi->dev;
1001 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001002 u16 curr, last = priv->last_status;
1003 int work_done = 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001004
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001005 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1006 /* Ack status on C_CAN. D_CAN is self clearing */
1007 if (priv->type != BOSCH_D_CAN)
1008 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001009
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001010 /* handle state changes */
1011 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1012 netdev_dbg(dev, "entered error warning state\n");
1013 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001014 }
1015
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001016 if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1017 netdev_dbg(dev, "entered error passive state\n");
1018 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1019 }
1020
1021 if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1022 netdev_dbg(dev, "entered bus off state\n");
1023 work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1024 goto end;
1025 }
1026
1027 /* handle bus recovery events */
1028 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1029 netdev_dbg(dev, "left bus off state\n");
1030 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1031 }
1032 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1033 netdev_dbg(dev, "left error passive state\n");
1034 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1035 }
1036
1037 /* handle lec errors on the bus */
1038 work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1039
1040 /* Handle Tx/Rx events. We do this unconditionally */
1041 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1042 c_can_do_tx(dev);
1043
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001044end:
1045 if (work_done < quota) {
1046 napi_complete(napi);
Thomas Gleixneref1d2e22014-04-11 08:13:11 +00001047 /* enable all IRQs if we are not in bus off state */
1048 if (priv->can.state != CAN_STATE_BUS_OFF)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001049 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001050 }
1051
1052 return work_done;
1053}
1054
1055static irqreturn_t c_can_isr(int irq, void *dev_id)
1056{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001057 struct net_device *dev = (struct net_device *)dev_id;
1058 struct c_can_priv *priv = netdev_priv(dev);
1059
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001060 if (!priv->read_reg(priv, C_CAN_INT_REG))
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001061 return IRQ_NONE;
1062
1063 /* disable all interrupts and schedule the NAPI */
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001064 c_can_irq_control(priv, false);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001065 napi_schedule(&priv->napi);
1066
1067 return IRQ_HANDLED;
1068}
1069
1070static int c_can_open(struct net_device *dev)
1071{
1072 int err;
1073 struct c_can_priv *priv = netdev_priv(dev);
1074
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301075 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301076 c_can_reset_ram(priv, true);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301077
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001078 /* open the can device */
1079 err = open_candev(dev);
1080 if (err) {
1081 netdev_err(dev, "failed to open can device\n");
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301082 goto exit_open_fail;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001083 }
1084
1085 /* register interrupt handler */
1086 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1087 dev);
1088 if (err < 0) {
1089 netdev_err(dev, "failed to request interrupt\n");
1090 goto exit_irq_fail;
1091 }
1092
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001093 /* start the c_can controller */
1094 err = c_can_start(dev);
1095 if (err)
1096 goto exit_start_fail;
AnilKumar Chf461f272012-05-23 17:45:11 +05301097
Fabio Baltieri5090f802012-12-18 18:51:01 +01001098 can_led_event(dev, CAN_LED_EVENT_OPEN);
1099
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001100 napi_enable(&priv->napi);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001101 /* enable status change, error and module interrupts */
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001102 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001103 netif_start_queue(dev);
1104
1105 return 0;
1106
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001107exit_start_fail:
1108 free_irq(dev->irq, dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001109exit_irq_fail:
1110 close_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301111exit_open_fail:
AnilKumar Ch52cde852012-11-21 11:14:10 +05301112 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301113 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001114 return err;
1115}
1116
1117static int c_can_close(struct net_device *dev)
1118{
1119 struct c_can_priv *priv = netdev_priv(dev);
1120
1121 netif_stop_queue(dev);
1122 napi_disable(&priv->napi);
1123 c_can_stop(dev);
1124 free_irq(dev->irq, dev);
1125 close_candev(dev);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301126
1127 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301128 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001129
Fabio Baltieri5090f802012-12-18 18:51:01 +01001130 can_led_event(dev, CAN_LED_EVENT_STOP);
1131
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001132 return 0;
1133}
1134
1135struct net_device *alloc_c_can_dev(void)
1136{
1137 struct net_device *dev;
1138 struct c_can_priv *priv;
1139
1140 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1141 if (!dev)
1142 return NULL;
1143
1144 priv = netdev_priv(dev);
1145 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1146
1147 priv->dev = dev;
1148 priv->can.bittiming_const = &c_can_bittiming_const;
1149 priv->can.do_set_mode = c_can_set_mode;
1150 priv->can.do_get_berr_counter = c_can_get_berr_counter;
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +00001151 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001152 CAN_CTRLMODE_LISTENONLY |
1153 CAN_CTRLMODE_BERR_REPORTING;
1154
1155 return dev;
1156}
1157EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1158
AnilKumar Ch82120032012-09-21 15:29:01 +05301159#ifdef CONFIG_PM
1160int c_can_power_down(struct net_device *dev)
1161{
1162 u32 val;
1163 unsigned long time_out;
1164 struct c_can_priv *priv = netdev_priv(dev);
1165
1166 if (!(dev->flags & IFF_UP))
1167 return 0;
1168
1169 WARN_ON(priv->type != BOSCH_D_CAN);
1170
1171 /* set PDR value so the device goes to power down mode */
1172 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1173 val |= CONTROL_EX_PDR;
1174 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1175
1176 /* Wait for the PDA bit to get set */
1177 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1178 while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1179 time_after(time_out, jiffies))
1180 cpu_relax();
1181
1182 if (time_after(jiffies, time_out))
1183 return -ETIMEDOUT;
1184
1185 c_can_stop(dev);
1186
AnilKumar Ch52cde852012-11-21 11:14:10 +05301187 c_can_reset_ram(priv, false);
AnilKumar Ch82120032012-09-21 15:29:01 +05301188 c_can_pm_runtime_put_sync(priv);
1189
1190 return 0;
1191}
1192EXPORT_SYMBOL_GPL(c_can_power_down);
1193
1194int c_can_power_up(struct net_device *dev)
1195{
1196 u32 val;
1197 unsigned long time_out;
1198 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001199 int ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301200
1201 if (!(dev->flags & IFF_UP))
1202 return 0;
1203
1204 WARN_ON(priv->type != BOSCH_D_CAN);
1205
1206 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301207 c_can_reset_ram(priv, true);
AnilKumar Ch82120032012-09-21 15:29:01 +05301208
1209 /* Clear PDR and INIT bits */
1210 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1211 val &= ~CONTROL_EX_PDR;
1212 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1213 val = priv->read_reg(priv, C_CAN_CTRL_REG);
1214 val &= ~CONTROL_INIT;
1215 priv->write_reg(priv, C_CAN_CTRL_REG, val);
1216
1217 /* Wait for the PDA bit to get clear */
1218 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1219 while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1220 time_after(time_out, jiffies))
1221 cpu_relax();
1222
1223 if (time_after(jiffies, time_out))
1224 return -ETIMEDOUT;
1225
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001226 ret = c_can_start(dev);
1227 if (!ret)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001228 c_can_irq_control(priv, true);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001229
1230 return ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301231}
1232EXPORT_SYMBOL_GPL(c_can_power_up);
1233#endif
1234
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001235void free_c_can_dev(struct net_device *dev)
1236{
Marc Kleine-Buddef29b4232014-03-18 19:13:59 +01001237 struct c_can_priv *priv = netdev_priv(dev);
1238
1239 netif_napi_del(&priv->napi);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001240 free_candev(dev);
1241}
1242EXPORT_SYMBOL_GPL(free_c_can_dev);
1243
1244static const struct net_device_ops c_can_netdev_ops = {
1245 .ndo_open = c_can_open,
1246 .ndo_stop = c_can_close,
1247 .ndo_start_xmit = c_can_start_xmit,
Oliver Hartkoppc971fa22014-03-07 09:23:41 +01001248 .ndo_change_mtu = can_change_mtu,
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001249};
1250
1251int register_c_can_dev(struct net_device *dev)
1252{
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301253 struct c_can_priv *priv = netdev_priv(dev);
1254 int err;
1255
Roger Quadros3973c522014-11-14 17:40:13 +02001256 /* Deactivate pins to prevent DRA7 DCAN IP from being
1257 * stuck in transition when module is disabled.
1258 * Pins are activated in c_can_start() and deactivated
1259 * in c_can_stop()
1260 */
1261 pinctrl_pm_select_sleep_state(dev->dev.parent);
1262
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301263 c_can_pm_runtime_enable(priv);
1264
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001265 dev->flags |= IFF_ECHO; /* we support local echo */
1266 dev->netdev_ops = &c_can_netdev_ops;
1267
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301268 err = register_candev(dev);
1269 if (err)
1270 c_can_pm_runtime_disable(priv);
Fabio Baltieri5090f802012-12-18 18:51:01 +01001271 else
1272 devm_can_led_init(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301273
1274 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001275}
1276EXPORT_SYMBOL_GPL(register_c_can_dev);
1277
1278void unregister_c_can_dev(struct net_device *dev)
1279{
1280 struct c_can_priv *priv = netdev_priv(dev);
1281
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001282 unregister_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301283
1284 c_can_pm_runtime_disable(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001285}
1286EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1287
1288MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1289MODULE_LICENSE("GPL v2");
1290MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");