blob: f91b094288dad3d86064f24a33a97ad58756f3ca [file] [log] [blame]
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080029#include <linux/module.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/if_arp.h>
34#include <linux/if_ether.h>
35#include <linux/list.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080036#include <linux/io.h>
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +053037#include <linux/pm_runtime.h>
Roger Quadros3973c522014-11-14 17:40:13 +020038#include <linux/pinctrl/consumer.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080039
40#include <linux/can.h>
41#include <linux/can/dev.h>
42#include <linux/can/error.h>
Fabio Baltieri5090f802012-12-18 18:51:01 +010043#include <linux/can/led.h>
Bhupesh Sharma881ff672011-02-13 22:51:44 -080044
45#include "c_can.h"
46
AnilKumar Ch33f81002012-05-29 11:13:15 +053047/* Number of interface registers */
48#define IF_ENUM_REG_LEN 11
49#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
50
AnilKumar Ch82120032012-09-21 15:29:01 +053051/* control extension register D_CAN specific */
52#define CONTROL_EX_PDR BIT(8)
53
Bhupesh Sharma881ff672011-02-13 22:51:44 -080054/* control register */
55#define CONTROL_TEST BIT(7)
56#define CONTROL_CCE BIT(6)
57#define CONTROL_DISABLE_AR BIT(5)
58#define CONTROL_ENABLE_AR (0 << 5)
59#define CONTROL_EIE BIT(3)
60#define CONTROL_SIE BIT(2)
61#define CONTROL_IE BIT(1)
62#define CONTROL_INIT BIT(0)
63
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +000064#define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
65
Bhupesh Sharma881ff672011-02-13 22:51:44 -080066/* test register */
67#define TEST_RX BIT(7)
68#define TEST_TX1 BIT(6)
69#define TEST_TX2 BIT(5)
70#define TEST_LBACK BIT(4)
71#define TEST_SILENT BIT(3)
72#define TEST_BASIC BIT(2)
73
74/* status register */
AnilKumar Ch82120032012-09-21 15:29:01 +053075#define STATUS_PDA BIT(10)
Bhupesh Sharma881ff672011-02-13 22:51:44 -080076#define STATUS_BOFF BIT(7)
77#define STATUS_EWARN BIT(6)
78#define STATUS_EPASS BIT(5)
79#define STATUS_RXOK BIT(4)
80#define STATUS_TXOK BIT(3)
81
82/* error counter register */
83#define ERR_CNT_TEC_MASK 0xff
84#define ERR_CNT_TEC_SHIFT 0
85#define ERR_CNT_REC_SHIFT 8
86#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
87#define ERR_CNT_RP_SHIFT 15
88#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
89
90/* bit-timing register */
91#define BTR_BRP_MASK 0x3f
92#define BTR_BRP_SHIFT 0
93#define BTR_SJW_SHIFT 6
94#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
95#define BTR_TSEG1_SHIFT 8
96#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
97#define BTR_TSEG2_SHIFT 12
98#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
99
100/* brp extension register */
101#define BRP_EXT_BRPE_MASK 0x0f
102#define BRP_EXT_BRPE_SHIFT 0
103
104/* IFx command request */
105#define IF_COMR_BUSY BIT(15)
106
107/* IFx command mask */
108#define IF_COMM_WR BIT(7)
109#define IF_COMM_MASK BIT(6)
110#define IF_COMM_ARB BIT(5)
111#define IF_COMM_CONTROL BIT(4)
112#define IF_COMM_CLR_INT_PND BIT(3)
113#define IF_COMM_TXRQST BIT(2)
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000114#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800115#define IF_COMM_DATAA BIT(1)
116#define IF_COMM_DATAB BIT(0)
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000117
118/* TX buffer setup */
119#define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \
120 IF_COMM_TXRQST | \
121 IF_COMM_DATAA | IF_COMM_DATAB)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800122
Thomas Gleixnerc0a9f4d32014-03-18 17:19:13 +0000123/* For the low buffers we clear the interrupt bit, but keep newdat */
124#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
125 IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
126 IF_COMM_DATAA | IF_COMM_DATAB)
127
128/* For the high buffers we clear the interrupt bit and newdat */
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000129#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
Thomas Gleixnerc0a9f4d32014-03-18 17:19:13 +0000130
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000131
132/* Receive setup of message objects */
133#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
134
Thomas Gleixnerb07faaa2014-04-11 08:13:19 +0000135/* Invalidation of message objects */
136#define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL)
137
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800138/* IFx arbitration */
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000139#define IF_ARB_MSGVAL BIT(31)
140#define IF_ARB_MSGXTD BIT(30)
141#define IF_ARB_TRANSMIT BIT(29)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800142
143/* IFx message control */
144#define IF_MCONT_NEWDAT BIT(15)
145#define IF_MCONT_MSGLST BIT(14)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800146#define IF_MCONT_INTPND BIT(13)
147#define IF_MCONT_UMASK BIT(12)
148#define IF_MCONT_TXIE BIT(11)
149#define IF_MCONT_RXIE BIT(10)
150#define IF_MCONT_RMTEN BIT(9)
151#define IF_MCONT_TXRQST BIT(8)
152#define IF_MCONT_EOB BIT(7)
153#define IF_MCONT_DLC_MASK 0xf
154
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000155#define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK)
156#define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB)
157
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000158#define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB)
159
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800160/*
Thomas Gleixner640916d2014-03-18 17:19:09 +0000161 * Use IF1 for RX and IF2 for TX
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800162 */
Thomas Gleixner640916d2014-03-18 17:19:09 +0000163#define IF_RX 0
164#define IF_TX 1
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800165
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800166/* minimum timeout for checking BUSY status */
167#define MIN_TIMEOUT_VALUE 6
168
AnilKumar Ch82120032012-09-21 15:29:01 +0530169/* Wait for ~1 sec for INIT bit */
170#define INIT_WAIT_MS 1000
171
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800172/* napi related */
173#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
174
175/* c_can lec values */
176enum c_can_lec_type {
177 LEC_NO_ERROR = 0,
178 LEC_STUFF_ERROR,
179 LEC_FORM_ERROR,
180 LEC_ACK_ERROR,
181 LEC_BIT1_ERROR,
182 LEC_BIT0_ERROR,
183 LEC_CRC_ERROR,
184 LEC_UNUSED,
Thomas Gleixner097aec12014-04-11 08:13:13 +0000185 LEC_MASK = LEC_UNUSED,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800186};
187
188/*
189 * c_can error types:
190 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
191 */
192enum c_can_bus_error_types {
193 C_CAN_NO_ERROR = 0,
194 C_CAN_BUS_OFF,
195 C_CAN_ERROR_WARNING,
196 C_CAN_ERROR_PASSIVE,
197};
198
Marc Kleine-Budde194b9a42012-07-16 12:58:31 +0200199static const struct can_bittiming_const c_can_bittiming_const = {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800200 .name = KBUILD_MODNAME,
201 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
202 .tseg1_max = 16,
203 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
204 .tseg2_max = 8,
205 .sjw_max = 4,
206 .brp_min = 1,
207 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
208 .brp_inc = 1,
209};
210
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530211static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
212{
213 if (priv->device)
214 pm_runtime_enable(priv->device);
215}
216
217static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
218{
219 if (priv->device)
220 pm_runtime_disable(priv->device);
221}
222
223static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
224{
225 if (priv->device)
226 pm_runtime_get_sync(priv->device);
227}
228
229static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
230{
231 if (priv->device)
232 pm_runtime_put_sync(priv->device);
233}
234
AnilKumar Ch52cde852012-11-21 11:14:10 +0530235static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
236{
237 if (priv->raminit)
238 priv->raminit(priv, enable);
239}
240
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000241static void c_can_irq_control(struct c_can_priv *priv, bool enable)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800242{
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000243 u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800244
245 if (enable)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000246 ctrl |= CONTROL_IRQMSK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800247
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000248 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800249}
250
Thomas Gleixner7af28632014-04-11 08:13:20 +0000251static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800252{
Thomas Gleixner7af28632014-04-11 08:13:20 +0000253 struct c_can_priv *priv = netdev_priv(dev);
254 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800255
Pavel Machekccbc5352014-05-06 15:57:02 +0200256 priv->write_reg32(priv, reg, (cmd << 16) | obj);
Thomas Gleixner7af28632014-04-11 08:13:20 +0000257
258 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
259 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
260 return;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800261 udelay(1);
262 }
Thomas Gleixner7af28632014-04-11 08:13:20 +0000263 netdev_err(dev, "Updating object timed out\n");
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800264
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800265}
266
Thomas Gleixner7af28632014-04-11 08:13:20 +0000267static inline void c_can_object_get(struct net_device *dev, int iface,
268 u32 obj, u32 cmd)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800269{
Thomas Gleixner7af28632014-04-11 08:13:20 +0000270 c_can_obj_update(dev, iface, cmd, obj);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800271}
272
Thomas Gleixner7af28632014-04-11 08:13:20 +0000273static inline void c_can_object_put(struct net_device *dev, int iface,
274 u32 obj, u32 cmd)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800275{
Thomas Gleixner7af28632014-04-11 08:13:20 +0000276 c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800277}
278
Thomas Gleixner93941592014-04-11 08:13:22 +0000279/*
280 * Note: According to documentation clearing TXIE while MSGVAL is set
281 * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
282 * load significantly.
283 */
284static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
285{
286 struct c_can_priv *priv = netdev_priv(dev);
287
288 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
289 c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
290}
291
292static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
293{
294 struct c_can_priv *priv = netdev_priv(dev);
295
296 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
297 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
298 c_can_inval_tx_object(dev, iface, obj);
299}
300
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000301static void c_can_setup_tx_object(struct net_device *dev, int iface,
Thomas Gleixner93941592014-04-11 08:13:22 +0000302 struct can_frame *frame, int idx)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800303{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800304 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000305 u16 ctrl = IF_MCONT_TX | frame->can_dlc;
Thomas Gleixner93941592014-04-11 08:13:22 +0000306 bool rtr = frame->can_id & CAN_RTR_FLAG;
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000307 u32 arb = IF_ARB_MSGVAL;
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000308 int i;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800309
310 if (frame->can_id & CAN_EFF_FLAG) {
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000311 arb |= frame->can_id & CAN_EFF_MASK;
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000312 arb |= IF_ARB_MSGXTD;
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000313 } else {
314 arb |= (frame->can_id & CAN_SFF_MASK) << 18;
315 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800316
Thomas Gleixner93941592014-04-11 08:13:22 +0000317 if (!rtr)
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000318 arb |= IF_ARB_TRANSMIT;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800319
Thomas Gleixner93941592014-04-11 08:13:22 +0000320 /*
321 * If we change the DIR bit, we need to invalidate the buffer
322 * first, i.e. clear the MSGVAL flag in the arbiter.
323 */
324 if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
325 u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
326
327 c_can_inval_msg_object(dev, iface, obj);
328 change_bit(idx, &priv->tx_dir);
329 }
330
Pavel Machekccbc5352014-05-06 15:57:02 +0200331 priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
Thomas Gleixner23ef0a82014-04-11 08:13:21 +0000332
333 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800334
335 for (i = 0; i < frame->can_dlc; i += 2) {
AnilKumar Ch33f81002012-05-29 11:13:15 +0530336 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800337 frame->data[i] | (frame->data[i + 1] << 8));
338 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800339}
340
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800341static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000342 int iface)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800343{
344 int i;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800345
Thomas Gleixner6b48ff82014-04-11 08:13:14 +0000346 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
347 c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800348}
349
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000350static int c_can_handle_lost_msg_obj(struct net_device *dev,
351 int iface, int objno, u32 ctrl)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800352{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800353 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000354 struct c_can_priv *priv = netdev_priv(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800355 struct can_frame *frame;
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000356 struct sk_buff *skb;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800357
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000358 ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
359 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
Thomas Gleixner640916d2014-03-18 17:19:09 +0000360 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800361
Thomas Gleixner1da394d2014-04-11 08:13:13 +0000362 stats->rx_errors++;
363 stats->rx_over_errors++;
364
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800365 /* create an error msg */
366 skb = alloc_can_err_skb(dev, &frame);
367 if (unlikely(!skb))
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000368 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800369
370 frame->can_id |= CAN_ERR_CRTL;
371 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800372
373 netif_receive_skb(skb);
Thomas Gleixner07c7b6f2014-03-18 17:19:10 +0000374 return 1;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800375}
376
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000377static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800378{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800379 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000380 struct c_can_priv *priv = netdev_priv(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800381 struct can_frame *frame;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000382 struct sk_buff *skb;
383 u32 arb, data;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800384
385 skb = alloc_can_skb(dev, &frame);
386 if (!skb) {
387 stats->rx_dropped++;
388 return -ENOMEM;
389 }
390
391 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
392
Pavel Machekccbc5352014-05-06 15:57:02 +0200393 arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800394
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000395 if (arb & IF_ARB_MSGXTD)
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000396 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800397 else
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000398 frame->can_id = (arb >> 18) & CAN_SFF_MASK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800399
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000400 if (arb & IF_ARB_TRANSMIT) {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800401 frame->can_id |= CAN_RTR_FLAG;
Thomas Gleixner4fb6dcc2014-04-11 08:13:18 +0000402 } else {
403 int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
404
405 for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
406 data = priv->read_reg(priv, dreg);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800407 frame->data[i] = data;
408 frame->data[i + 1] = data >> 8;
409 }
410 }
411
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800412 stats->rx_packets++;
413 stats->rx_bytes += frame->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000414
415 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800416 return 0;
417}
418
419static void c_can_setup_receive_object(struct net_device *dev, int iface,
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000420 u32 obj, u32 mask, u32 id, u32 mcont)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800421{
422 struct c_can_priv *priv = netdev_priv(dev);
423
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000424 mask |= BIT(29);
Pavel Machekccbc5352014-05-06 15:57:02 +0200425 priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
Alexander Stein2bd3bc42012-12-13 10:06:10 +0100426
Thomas Gleixnerd48071b2014-04-11 08:13:21 +0000427 id |= IF_ARB_MSGVAL;
Pavel Machekccbc5352014-05-06 15:57:02 +0200428 priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800429
AnilKumar Ch33f81002012-05-29 11:13:15 +0530430 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000431 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800432}
433
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800434static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000435 struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800436{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800437 struct can_frame *frame = (struct can_frame *)skb->data;
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000438 struct c_can_priv *priv = netdev_priv(dev);
439 u32 idx, obj;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800440
441 if (can_dropped_invalid_skb(dev, skb))
442 return NETDEV_TX_OK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800443 /*
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000444 * This is not a FIFO. C/D_CAN sends out the buffers
445 * prioritized. The lowest buffer number wins.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800446 */
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000447 idx = fls(atomic_read(&priv->tx_active));
448 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
449
450 /* If this is the last buffer, stop the xmit queue */
451 if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800452 netif_stop_queue(dev);
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000453 /*
454 * Store the message in the interface so we can call
455 * can_put_echo_skb(). We must do this before we enable
456 * transmit as we might race against do_tx().
457 */
Thomas Gleixner93941592014-04-11 08:13:22 +0000458 c_can_setup_tx_object(dev, IF_TX, frame, idx);
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000459 priv->dlc[idx] = frame->can_dlc;
460 can_put_echo_skb(skb, dev, idx);
461
462 /* Update the active bits */
463 atomic_add((1 << idx), &priv->tx_active);
464 /* Start transmission */
465 c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800466
467 return NETDEV_TX_OK;
468}
469
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000470static int c_can_wait_for_ctrl_init(struct net_device *dev,
471 struct c_can_priv *priv, u32 init)
472{
473 int retry = 0;
474
475 while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
476 udelay(10);
477 if (retry++ > 1000) {
478 netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
479 return -EIO;
480 }
481 }
482 return 0;
483}
484
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800485static int c_can_set_bittiming(struct net_device *dev)
486{
487 unsigned int reg_btr, reg_brpe, ctrl_save;
488 u8 brp, brpe, sjw, tseg1, tseg2;
489 u32 ten_bit_brp;
490 struct c_can_priv *priv = netdev_priv(dev);
491 const struct can_bittiming *bt = &priv->can.bittiming;
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000492 int res;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800493
494 /* c_can provides a 6-bit brp and 4-bit brpe fields */
495 ten_bit_brp = bt->brp - 1;
496 brp = ten_bit_brp & BTR_BRP_MASK;
497 brpe = ten_bit_brp >> 6;
498
499 sjw = bt->sjw - 1;
500 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
501 tseg2 = bt->phase_seg2 - 1;
502 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
503 (tseg2 << BTR_TSEG2_SHIFT);
504 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
505
506 netdev_info(dev,
507 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
508
AnilKumar Ch33f81002012-05-29 11:13:15 +0530509 ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000510 ctrl_save &= ~CONTROL_INIT;
511 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
512 res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
513 if (res)
514 return res;
515
AnilKumar Ch33f81002012-05-29 11:13:15 +0530516 priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
517 priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
518 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800519
Thomas Gleixner9fac1d12014-03-18 17:19:08 +0000520 return c_can_wait_for_ctrl_init(dev, priv, 0);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800521}
522
523/*
524 * Configure C_CAN message objects for Tx and Rx purposes:
525 * C_CAN provides a total of 32 message objects that can be configured
526 * either for Tx or Rx purposes. Here the first 16 message objects are used as
527 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
528 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
529 * See user guide document for further details on configuring message
530 * objects.
531 */
532static void c_can_configure_msg_objects(struct net_device *dev)
533{
534 int i;
535
536 /* first invalidate all message objects */
537 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
Thomas Gleixner640916d2014-03-18 17:19:09 +0000538 c_can_inval_msg_object(dev, IF_RX, i);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800539
540 /* setup receive message objects */
541 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000542 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800543
Thomas Gleixner640916d2014-03-18 17:19:09 +0000544 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
Thomas Gleixner8ff2de02014-04-11 08:13:18 +0000545 IF_MCONT_RCV_EOB);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800546}
547
548/*
549 * Configure C_CAN chip:
550 * - enable/disable auto-retransmission
551 * - set operating mode
552 * - configure message objects
553 */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100554static int c_can_chip_config(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800555{
556 struct c_can_priv *priv = netdev_priv(dev);
557
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +0000558 /* enable automatic retransmission */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000559 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800560
Dan Carpenterd9cb9bd2012-06-15 00:20:44 +0000561 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
562 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800563 /* loopback + silent mode : useful for hot self-test */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000564 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
565 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800566 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
567 /* loopback mode : useful for self-test function */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000568 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530569 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800570 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
571 /* silent mode : bus-monitoring mode */
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000572 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530573 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000574 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800575
576 /* configure message objects */
577 c_can_configure_msg_objects(dev);
578
579 /* set a `lec` value so that we can check for updates later */
AnilKumar Ch33f81002012-05-29 11:13:15 +0530580 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800581
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000582 /* Clear all internal status */
583 atomic_set(&priv->tx_active, 0);
584 priv->rxmasked = 0;
Thomas Gleixner93941592014-04-11 08:13:22 +0000585 priv->tx_dir = 0;
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000586
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800587 /* set bittiming params */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100588 return c_can_set_bittiming(dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800589}
590
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100591static int c_can_start(struct net_device *dev)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800592{
593 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100594 int err;
J.D. Schroeder03336512015-07-08 14:38:12 +0300595 struct pinctrl *p;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800596
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800597 /* basic c_can configuration */
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100598 err = c_can_chip_config(dev);
599 if (err)
600 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800601
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000602 /* Setup the command for new messages */
603 priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
604 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
605
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800606 priv->can.state = CAN_STATE_ERROR_ACTIVE;
607
J.D. Schroeder03336512015-07-08 14:38:12 +0300608 /* Attempt to use "active" if available else use "default" */
609 p = pinctrl_get_select(priv->device, "active");
610 if (!IS_ERR(p))
611 pinctrl_put(p);
612 else
613 pinctrl_pm_select_default_state(priv->device);
614
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100615 return 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800616}
617
618static void c_can_stop(struct net_device *dev)
619{
620 struct c_can_priv *priv = netdev_priv(dev);
621
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000622 c_can_irq_control(priv, false);
Roger Quadros3973c522014-11-14 17:40:13 +0200623
Viktor Babrian7ffd7b42015-01-18 20:01:40 +0100624 /* put ctrl to init on stop to end ongoing transmission */
625 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
626
Roger Quadros3973c522014-11-14 17:40:13 +0200627 /* deactivate pins */
628 pinctrl_pm_select_sleep_state(dev->dev.parent);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800629 priv->can.state = CAN_STATE_STOPPED;
630}
631
632static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
633{
Thomas Gleixnerbed11db2014-04-11 08:13:10 +0000634 struct c_can_priv *priv = netdev_priv(dev);
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100635 int err;
636
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800637 switch (mode) {
638 case CAN_MODE_START:
Marc Kleine-Budde130a5172014-03-18 19:06:01 +0100639 err = c_can_start(dev);
640 if (err)
641 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800642 netif_wake_queue(dev);
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +0000643 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800644 break;
645 default:
646 return -EOPNOTSUPP;
647 }
648
649 return 0;
650}
651
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100652static int __c_can_get_berr_counter(const struct net_device *dev,
653 struct can_berr_counter *bec)
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800654{
655 unsigned int reg_err_counter;
656 struct c_can_priv *priv = netdev_priv(dev);
657
AnilKumar Ch33f81002012-05-29 11:13:15 +0530658 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800659 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
660 ERR_CNT_REC_SHIFT;
661 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
662
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100663 return 0;
664}
665
666static int c_can_get_berr_counter(const struct net_device *dev,
667 struct can_berr_counter *bec)
668{
669 struct c_can_priv *priv = netdev_priv(dev);
670 int err;
671
672 c_can_pm_runtime_get_sync(priv);
673 err = __c_can_get_berr_counter(dev, bec);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +0530674 c_can_pm_runtime_put_sync(priv);
675
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100676 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800677}
678
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800679static void c_can_do_tx(struct net_device *dev)
680{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800681 struct c_can_priv *priv = netdev_priv(dev);
682 struct net_device_stats *stats = &dev->stats;
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000683 u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800684
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000685 clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000686
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000687 while ((idx = ffs(pend))) {
688 idx--;
689 pend &= ~(1 << idx);
690 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
Thomas Gleixner93941592014-04-11 08:13:22 +0000691 c_can_inval_tx_object(dev, IF_RX, obj);
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000692 can_get_echo_skb(dev, idx);
693 bytes += priv->dlc[idx];
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000694 pkts++;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800695 }
696
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000697 /* Clear the bits in the tx_active mask */
698 atomic_sub(clr, &priv->tx_active);
Thomas Gleixnerbf88a2062014-03-18 17:19:12 +0000699
Thomas Gleixner35bdafb52014-04-11 08:13:22 +0000700 if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
701 netif_wake_queue(dev);
Thomas Gleixner5a7513a2014-03-18 17:19:14 +0000702
703 if (pkts) {
704 stats->tx_bytes += bytes;
705 stats->tx_packets += pkts;
706 can_led_event(dev, CAN_LED_EVENT_TX);
707 }
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800708}
709
710/*
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000711 * If we have a gap in the pending bits, that means we either
712 * raced with the hardware or failed to readout all upper
713 * objects in the last run due to quota limit.
714 */
715static u32 c_can_adjust_pending(u32 pend)
716{
717 u32 weight, lasts;
718
719 if (pend == RECEIVE_OBJECT_BITS)
720 return pend;
721
722 /*
723 * If the last set bit is larger than the number of pending
724 * bits we have a gap.
725 */
726 weight = hweight32(pend);
727 lasts = fls(pend);
728
729 /* If the bits are linear, nothing to do */
730 if (lasts == weight)
731 return pend;
732
733 /*
734 * Find the first set bit after the gap. We walk backwards
735 * from the last set bit.
736 */
737 for (lasts--; pend & (1 << (lasts - 1)); lasts--);
738
739 return pend & ~((1 << lasts) - 1);
740}
741
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000742static inline void c_can_rx_object_get(struct net_device *dev,
743 struct c_can_priv *priv, u32 obj)
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000744{
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000745 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000746}
747
748static inline void c_can_rx_finalize(struct net_device *dev,
749 struct c_can_priv *priv, u32 obj)
750{
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000751 if (priv->type != BOSCH_D_CAN)
752 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000753}
754
Thomas Gleixner520f5702014-03-18 19:27:42 +0100755static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
756 u32 pend, int quota)
757{
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000758 u32 pkts = 0, ctrl, obj;
Thomas Gleixner520f5702014-03-18 19:27:42 +0100759
760 while ((obj = ffs(pend)) && quota > 0) {
761 pend &= ~BIT(obj - 1);
762
Thomas Gleixnerd61d09d2014-04-11 08:13:17 +0000763 c_can_rx_object_get(dev, priv, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100764 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
765
766 if (ctrl & IF_MCONT_MSGLST) {
767 int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
768
769 pkts += n;
770 quota -= n;
771 continue;
772 }
773
774 /*
775 * This really should not happen, but this covers some
776 * odd HW behaviour. Do not remove that unless you
777 * want to brick your machine.
778 */
779 if (!(ctrl & IF_MCONT_NEWDAT))
780 continue;
781
782 /* read the data from the message object */
783 c_can_read_msg_object(dev, IF_RX, ctrl);
784
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000785 c_can_rx_finalize(dev, priv, obj);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100786
787 pkts++;
788 quota--;
789 }
790
791 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800792}
793
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000794static inline u32 c_can_get_pending(struct c_can_priv *priv)
795{
796 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
797
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000798 return pend;
799}
800
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800801/*
802 * theory of operation:
803 *
804 * c_can core saves a received CAN message into the first free message
805 * object it finds free (starting with the lowest). Bits NEWDAT and
806 * INTPND are set for this message object indicating that a new message
807 * has arrived. To work-around this issue, we keep two groups of message
808 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
809 *
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000810 * We clear the newdat bit right away.
811 *
812 * This can result in packet reordering when the readout is slow.
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800813 */
814static int c_can_do_rx_poll(struct net_device *dev, int quota)
815{
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800816 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixner520f5702014-03-18 19:27:42 +0100817 u32 pkts = 0, pend = 0, toread, n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800818
Markus Pargmann4ce78a82013-11-01 10:36:36 +0100819 /*
820 * It is faster to read only one 16bit register. This is only possible
821 * for a maximum number of 16 objects.
822 */
823 BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
824 "Implementation does not support more message objects than 16");
825
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000826 while (quota > 0) {
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000827 if (!pend) {
Thomas Gleixner2b9aecd2014-04-11 08:13:16 +0000828 pend = c_can_get_pending(priv);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000829 if (!pend)
Thomas Gleixner520f5702014-03-18 19:27:42 +0100830 break;
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000831 /*
832 * If the pending field has a gap, handle the
833 * bits above the gap first.
834 */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100835 toread = c_can_adjust_pending(pend);
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000836 } else {
Thomas Gleixner520f5702014-03-18 19:27:42 +0100837 toread = pend;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800838 }
Thomas Gleixner64f08f22014-03-18 17:19:10 +0000839 /* Remove the bits from pend */
Thomas Gleixner520f5702014-03-18 19:27:42 +0100840 pend &= ~toread;
841 /* Read the objects */
842 n = c_can_read_objects(dev, priv, toread, quota);
843 pkts += n;
844 quota -= n;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800845 }
846
Thomas Gleixnerb1d8e432014-03-18 17:19:15 +0000847 if (pkts)
848 can_led_event(dev, CAN_LED_EVENT_RX);
849
Thomas Gleixner520f5702014-03-18 19:27:42 +0100850 return pkts;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800851}
852
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800853static int c_can_handle_state_change(struct net_device *dev,
854 enum c_can_bus_error_types error_type)
855{
856 unsigned int reg_err_counter;
857 unsigned int rx_err_passive;
858 struct c_can_priv *priv = netdev_priv(dev);
859 struct net_device_stats *stats = &dev->stats;
860 struct can_frame *cf;
861 struct sk_buff *skb;
862 struct can_berr_counter bec;
863
Thomas Gleixnerf058d542014-04-11 08:13:12 +0000864 switch (error_type) {
865 case C_CAN_ERROR_WARNING:
866 /* error warning state */
867 priv->can.can_stats.error_warning++;
868 priv->can.state = CAN_STATE_ERROR_WARNING;
869 break;
870 case C_CAN_ERROR_PASSIVE:
871 /* error passive state */
872 priv->can.can_stats.error_passive++;
873 priv->can.state = CAN_STATE_ERROR_PASSIVE;
874 break;
875 case C_CAN_BUS_OFF:
876 /* bus-off state */
877 priv->can.state = CAN_STATE_BUS_OFF;
Andri Yngvasonbe38a6f2015-01-16 14:30:28 +0000878 priv->can.can_stats.bus_off++;
Thomas Gleixnerf058d542014-04-11 08:13:12 +0000879 break;
880 default:
881 break;
882 }
883
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300884 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800885 skb = alloc_can_err_skb(dev, &cf);
886 if (unlikely(!skb))
887 return 0;
888
Marc Kleine-Buddee35d46a2013-11-24 23:31:24 +0100889 __c_can_get_berr_counter(dev, &bec);
AnilKumar Ch33f81002012-05-29 11:13:15 +0530890 reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800891 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
892 ERR_CNT_RP_SHIFT;
893
894 switch (error_type) {
895 case C_CAN_ERROR_WARNING:
896 /* error warning state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800897 cf->can_id |= CAN_ERR_CRTL;
898 cf->data[1] = (bec.txerr > bec.rxerr) ?
899 CAN_ERR_CRTL_TX_WARNING :
900 CAN_ERR_CRTL_RX_WARNING;
901 cf->data[6] = bec.txerr;
902 cf->data[7] = bec.rxerr;
903
904 break;
905 case C_CAN_ERROR_PASSIVE:
906 /* error passive state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800907 cf->can_id |= CAN_ERR_CRTL;
908 if (rx_err_passive)
909 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
910 if (bec.txerr > 127)
911 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
912
913 cf->data[6] = bec.txerr;
914 cf->data[7] = bec.rxerr;
915 break;
916 case C_CAN_BUS_OFF:
917 /* bus-off state */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800918 cf->can_id |= CAN_ERR_BUSOFF;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800919 can_bus_off(dev);
920 break;
921 default:
922 break;
923 }
924
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800925 stats->rx_packets++;
926 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000927 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800928
929 return 1;
930}
931
932static int c_can_handle_bus_err(struct net_device *dev,
933 enum c_can_lec_type lec_type)
934{
935 struct c_can_priv *priv = netdev_priv(dev);
936 struct net_device_stats *stats = &dev->stats;
937 struct can_frame *cf;
938 struct sk_buff *skb;
939
940 /*
941 * early exit if no lec update or no error.
942 * no lec update means that no CAN bus event has been detected
943 * since CPU wrote 0x7 value to status reg.
944 */
945 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
946 return 0;
947
Thomas Gleixner097aec12014-04-11 08:13:13 +0000948 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
949 return 0;
950
Thomas Gleixner1da394d2014-04-11 08:13:13 +0000951 /* common for all type of bus errors */
952 priv->can.can_stats.bus_error++;
953 stats->rx_errors++;
954
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300955 /* propagate the error condition to the CAN stack */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800956 skb = alloc_can_err_skb(dev, &cf);
957 if (unlikely(!skb))
958 return 0;
959
960 /*
961 * check for 'last error code' which tells us the
962 * type of the last error to occur on the CAN bus
963 */
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800964 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800965
966 switch (lec_type) {
967 case LEC_STUFF_ERROR:
968 netdev_dbg(dev, "stuff error\n");
969 cf->data[2] |= CAN_ERR_PROT_STUFF;
970 break;
971 case LEC_FORM_ERROR:
972 netdev_dbg(dev, "form error\n");
973 cf->data[2] |= CAN_ERR_PROT_FORM;
974 break;
975 case LEC_ACK_ERROR:
976 netdev_dbg(dev, "ack error\n");
Oliver Hartkoppffd461f2015-11-21 18:41:20 +0100977 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800978 break;
979 case LEC_BIT1_ERROR:
980 netdev_dbg(dev, "bit1 error\n");
981 cf->data[2] |= CAN_ERR_PROT_BIT1;
982 break;
983 case LEC_BIT0_ERROR:
984 netdev_dbg(dev, "bit0 error\n");
985 cf->data[2] |= CAN_ERR_PROT_BIT0;
986 break;
987 case LEC_CRC_ERROR:
988 netdev_dbg(dev, "CRC error\n");
Oliver Hartkoppffd461f2015-11-21 18:41:20 +0100989 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800990 break;
991 default:
992 break;
993 }
994
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800995 stats->rx_packets++;
996 stats->rx_bytes += cf->can_dlc;
Thomas Gleixner9c648632014-04-11 08:13:12 +0000997 netif_receive_skb(skb);
Bhupesh Sharma881ff672011-02-13 22:51:44 -0800998 return 1;
999}
1000
1001static int c_can_poll(struct napi_struct *napi, int quota)
1002{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001003 struct net_device *dev = napi->dev;
1004 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001005 u16 curr, last = priv->last_status;
1006 int work_done = 0;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001007
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001008 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1009 /* Ack status on C_CAN. D_CAN is self clearing */
1010 if (priv->type != BOSCH_D_CAN)
1011 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001012
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001013 /* handle state changes */
1014 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1015 netdev_dbg(dev, "entered error warning state\n");
1016 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001017 }
1018
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001019 if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1020 netdev_dbg(dev, "entered error passive state\n");
1021 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1022 }
1023
1024 if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1025 netdev_dbg(dev, "entered bus off state\n");
1026 work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1027 goto end;
1028 }
1029
1030 /* handle bus recovery events */
1031 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1032 netdev_dbg(dev, "left bus off state\n");
1033 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1034 }
1035 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1036 netdev_dbg(dev, "left error passive state\n");
1037 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1038 }
1039
1040 /* handle lec errors on the bus */
1041 work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1042
1043 /* Handle Tx/Rx events. We do this unconditionally */
1044 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1045 c_can_do_tx(dev);
1046
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001047end:
1048 if (work_done < quota) {
1049 napi_complete(napi);
Thomas Gleixneref1d2e22014-04-11 08:13:11 +00001050 /* enable all IRQs if we are not in bus off state */
1051 if (priv->can.state != CAN_STATE_BUS_OFF)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001052 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001053 }
1054
1055 return work_done;
1056}
1057
1058static irqreturn_t c_can_isr(int irq, void *dev_id)
1059{
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001060 struct net_device *dev = (struct net_device *)dev_id;
1061 struct c_can_priv *priv = netdev_priv(dev);
1062
Thomas Gleixnerfa39b542014-04-11 08:13:15 +00001063 if (!priv->read_reg(priv, C_CAN_INT_REG))
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001064 return IRQ_NONE;
1065
1066 /* disable all interrupts and schedule the NAPI */
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001067 c_can_irq_control(priv, false);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001068 napi_schedule(&priv->napi);
1069
1070 return IRQ_HANDLED;
1071}
1072
1073static int c_can_open(struct net_device *dev)
1074{
1075 int err;
1076 struct c_can_priv *priv = netdev_priv(dev);
1077
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301078 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301079 c_can_reset_ram(priv, true);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301080
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001081 /* open the can device */
1082 err = open_candev(dev);
1083 if (err) {
1084 netdev_err(dev, "failed to open can device\n");
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301085 goto exit_open_fail;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001086 }
1087
1088 /* register interrupt handler */
1089 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1090 dev);
1091 if (err < 0) {
1092 netdev_err(dev, "failed to request interrupt\n");
1093 goto exit_irq_fail;
1094 }
1095
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001096 /* start the c_can controller */
1097 err = c_can_start(dev);
1098 if (err)
1099 goto exit_start_fail;
AnilKumar Chf461f272012-05-23 17:45:11 +05301100
Fabio Baltieri5090f802012-12-18 18:51:01 +01001101 can_led_event(dev, CAN_LED_EVENT_OPEN);
1102
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001103 napi_enable(&priv->napi);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001104 /* enable status change, error and module interrupts */
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001105 c_can_irq_control(priv, true);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001106 netif_start_queue(dev);
1107
1108 return 0;
1109
Marc Kleine-Budde130a5172014-03-18 19:06:01 +01001110exit_start_fail:
1111 free_irq(dev->irq, dev);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001112exit_irq_fail:
1113 close_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301114exit_open_fail:
AnilKumar Ch52cde852012-11-21 11:14:10 +05301115 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301116 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001117 return err;
1118}
1119
1120static int c_can_close(struct net_device *dev)
1121{
1122 struct c_can_priv *priv = netdev_priv(dev);
1123
1124 netif_stop_queue(dev);
1125 napi_disable(&priv->napi);
1126 c_can_stop(dev);
1127 free_irq(dev->irq, dev);
1128 close_candev(dev);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301129
1130 c_can_reset_ram(priv, false);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301131 c_can_pm_runtime_put_sync(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001132
Fabio Baltieri5090f802012-12-18 18:51:01 +01001133 can_led_event(dev, CAN_LED_EVENT_STOP);
1134
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001135 return 0;
1136}
1137
1138struct net_device *alloc_c_can_dev(void)
1139{
1140 struct net_device *dev;
1141 struct c_can_priv *priv;
1142
1143 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1144 if (!dev)
1145 return NULL;
1146
1147 priv = netdev_priv(dev);
1148 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1149
1150 priv->dev = dev;
1151 priv->can.bittiming_const = &c_can_bittiming_const;
1152 priv->can.do_set_mode = c_can_set_mode;
1153 priv->can.do_get_berr_counter = c_can_get_berr_counter;
Marc Kleine-Buddeee6f0982011-03-24 02:34:32 +00001154 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001155 CAN_CTRLMODE_LISTENONLY |
1156 CAN_CTRLMODE_BERR_REPORTING;
1157
1158 return dev;
1159}
1160EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1161
AnilKumar Ch82120032012-09-21 15:29:01 +05301162#ifdef CONFIG_PM
1163int c_can_power_down(struct net_device *dev)
1164{
1165 u32 val;
1166 unsigned long time_out;
1167 struct c_can_priv *priv = netdev_priv(dev);
1168
1169 if (!(dev->flags & IFF_UP))
1170 return 0;
1171
1172 WARN_ON(priv->type != BOSCH_D_CAN);
1173
1174 /* set PDR value so the device goes to power down mode */
1175 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1176 val |= CONTROL_EX_PDR;
1177 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1178
1179 /* Wait for the PDA bit to get set */
1180 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1181 while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1182 time_after(time_out, jiffies))
1183 cpu_relax();
1184
1185 if (time_after(jiffies, time_out))
1186 return -ETIMEDOUT;
1187
1188 c_can_stop(dev);
1189
AnilKumar Ch52cde852012-11-21 11:14:10 +05301190 c_can_reset_ram(priv, false);
AnilKumar Ch82120032012-09-21 15:29:01 +05301191 c_can_pm_runtime_put_sync(priv);
1192
1193 return 0;
1194}
1195EXPORT_SYMBOL_GPL(c_can_power_down);
1196
1197int c_can_power_up(struct net_device *dev)
1198{
1199 u32 val;
1200 unsigned long time_out;
1201 struct c_can_priv *priv = netdev_priv(dev);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001202 int ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301203
1204 if (!(dev->flags & IFF_UP))
1205 return 0;
1206
1207 WARN_ON(priv->type != BOSCH_D_CAN);
1208
1209 c_can_pm_runtime_get_sync(priv);
AnilKumar Ch52cde852012-11-21 11:14:10 +05301210 c_can_reset_ram(priv, true);
AnilKumar Ch82120032012-09-21 15:29:01 +05301211
1212 /* Clear PDR and INIT bits */
1213 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1214 val &= ~CONTROL_EX_PDR;
1215 priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1216 val = priv->read_reg(priv, C_CAN_CTRL_REG);
1217 val &= ~CONTROL_INIT;
1218 priv->write_reg(priv, C_CAN_CTRL_REG, val);
1219
1220 /* Wait for the PDA bit to get clear */
1221 time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1222 while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1223 time_after(time_out, jiffies))
1224 cpu_relax();
1225
1226 if (time_after(jiffies, time_out))
1227 return -ETIMEDOUT;
1228
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001229 ret = c_can_start(dev);
1230 if (!ret)
Thomas Gleixner2d5f4f82014-04-11 08:13:17 +00001231 c_can_irq_control(priv, true);
Thomas Gleixnerbed11db2014-04-11 08:13:10 +00001232
1233 return ret;
AnilKumar Ch82120032012-09-21 15:29:01 +05301234}
1235EXPORT_SYMBOL_GPL(c_can_power_up);
1236#endif
1237
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001238void free_c_can_dev(struct net_device *dev)
1239{
Marc Kleine-Buddef29b4232014-03-18 19:13:59 +01001240 struct c_can_priv *priv = netdev_priv(dev);
1241
1242 netif_napi_del(&priv->napi);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001243 free_candev(dev);
1244}
1245EXPORT_SYMBOL_GPL(free_c_can_dev);
1246
1247static const struct net_device_ops c_can_netdev_ops = {
1248 .ndo_open = c_can_open,
1249 .ndo_stop = c_can_close,
1250 .ndo_start_xmit = c_can_start_xmit,
Oliver Hartkoppc971fa22014-03-07 09:23:41 +01001251 .ndo_change_mtu = can_change_mtu,
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001252};
1253
1254int register_c_can_dev(struct net_device *dev)
1255{
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301256 struct c_can_priv *priv = netdev_priv(dev);
1257 int err;
1258
Roger Quadros3973c522014-11-14 17:40:13 +02001259 /* Deactivate pins to prevent DRA7 DCAN IP from being
1260 * stuck in transition when module is disabled.
1261 * Pins are activated in c_can_start() and deactivated
1262 * in c_can_stop()
1263 */
1264 pinctrl_pm_select_sleep_state(dev->dev.parent);
1265
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301266 c_can_pm_runtime_enable(priv);
1267
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001268 dev->flags |= IFF_ECHO; /* we support local echo */
1269 dev->netdev_ops = &c_can_netdev_ops;
1270
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301271 err = register_candev(dev);
1272 if (err)
1273 c_can_pm_runtime_disable(priv);
Fabio Baltieri5090f802012-12-18 18:51:01 +01001274 else
1275 devm_can_led_init(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301276
1277 return err;
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001278}
1279EXPORT_SYMBOL_GPL(register_c_can_dev);
1280
1281void unregister_c_can_dev(struct net_device *dev)
1282{
1283 struct c_can_priv *priv = netdev_priv(dev);
1284
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001285 unregister_candev(dev);
AnilKumar Ch4cdd34b2012-08-20 16:50:54 +05301286
1287 c_can_pm_runtime_disable(priv);
Bhupesh Sharma881ff672011-02-13 22:51:44 -08001288}
1289EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1290
1291MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1292MODULE_LICENSE("GPL v2");
1293MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");