blob: 6c688edea041e51803ced7404ac1d2d86c92e0b4 [file] [log] [blame]
Aishwarya Prasadb4dab662020-01-31 15:28:41 +05301/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
Sumeet Sahucafbf632019-02-08 19:12:57 +05302 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/workqueue.h>
18#include <linux/spi/spi.h>
19#include <linux/can.h>
20#include <linux/can/dev.h>
21#include <linux/can/error.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_gpio.h>
25#include <linux/uaccess.h>
26#include <linux/pm.h>
27#include <asm/div64.h>
28
29#define DEBUG_QTI_CAN 0
30#if DEBUG_QTI_CAN == 1
31#define LOGDI(...) dev_info(&priv_data->spidev->dev, __VA_ARGS__)
32#define LOGNI(...) netdev_info(netdev, __VA_ARGS__)
33#else
34#define LOGDI(...) dev_dbg(&priv_data->spidev->dev, __VA_ARGS__)
35#define LOGNI(...) netdev_dbg(netdev, __VA_ARGS__)
36#endif
37#define LOGDE(...) dev_err(&priv_data->spidev->dev, __VA_ARGS__)
38#define LOGNE(...) netdev_err(netdev, __VA_ARGS__)
39
40#define MAX_TX_BUFFERS 1
41#define XFER_BUFFER_SIZE 64
42#define RX_ASSEMBLY_BUFFER_SIZE 128
43#define QTI_CAN_FW_QUERY_RETRY_COUNT 3
44#define DRIVER_MODE_RAW_FRAMES 0
45#define DRIVER_MODE_PROPERTIES 1
46#define DRIVER_MODE_AMB 2
47#define QUERY_FIRMWARE_TIMEOUT_MS 300
48#define EUPGRADE 140
49
50struct qti_can {
51 struct net_device **netdev;
52 struct spi_device *spidev;
53 struct mutex spi_lock; /* SPI device lock */
54 struct workqueue_struct *tx_wq;
55 char *tx_buf, *rx_buf;
56 int xfer_length;
57 atomic_t msg_seq;
58 char *assembly_buffer;
59 u8 assembly_buffer_size;
60 atomic_t netif_queue_stop;
61 struct completion response_completion;
62 int wait_cmd;
63 int cmd_result;
64 int driver_mode;
65 int clk_freq_mhz;
66 int max_can_channels;
67 int bits_per_word;
68 int reset_delay_msec;
69 int reset;
70 bool support_can_fd;
71 bool can_fw_cmd_timeout_req;
72 u32 rem_all_buffering_timeout_ms;
73 u32 can_fw_cmd_timeout_ms;
74 s64 time_diff;
75};
76
77struct qti_can_netdev_privdata {
78 struct can_priv can;
79 struct qti_can *qti_can;
80 u8 netdev_index;
81};
82
83struct qti_can_tx_work {
84 struct work_struct work;
85 struct sk_buff *skb;
86 struct net_device *netdev;
87};
88
89/* Message definitions */
90struct spi_mosi { /* TLV for MOSI line */
91 u8 cmd;
92 u8 len;
93 u16 seq;
94 u8 data[];
95} __packed;
96
97struct spi_miso { /* TLV for MISO line */
98 u8 cmd;
99 u8 len;
100 u16 seq; /* should match seq field from request, or 0 for unsols */
101 u8 data[];
102} __packed;
103
104#define CMD_GET_FW_VERSION 0x81
105#define CMD_CAN_SEND_FRAME 0x82
106#define CMD_CAN_ADD_FILTER 0x83
107#define CMD_CAN_REMOVE_FILTER 0x84
108#define CMD_CAN_RECEIVE_FRAME 0x85
109#define CMD_CAN_CONFIG_BIT_TIMING 0x86
110#define CMD_CAN_DATA_BUFF_ADD 0x87
111#define CMD_CAN_DATA_BUFF_REMOVE 0X88
112#define CMD_CAN_RELEASE_BUFFER 0x89
113#define CMD_CAN_DATA_BUFF_REMOVE_ALL 0x8A
114#define CMD_PROPERTY_WRITE 0x8B
115#define CMD_PROPERTY_READ 0x8C
116#define CMD_GET_FW_BR_VERSION 0x95
117#define CMD_BEGIN_FIRMWARE_UPGRADE 0x96
118#define CMD_FIRMWARE_UPGRADE_DATA 0x97
119#define CMD_END_FIRMWARE_UPGRADE 0x98
120#define CMD_BEGIN_BOOT_ROM_UPGRADE 0x99
121#define CMD_BOOT_ROM_UPGRADE_DATA 0x9A
122#define CMD_END_BOOT_ROM_UPGRADE 0x9B
123#define CMD_END_FW_UPDATE_FILE 0x9C
124#define CMD_UPDATE_TIME_INFO 0x9D
Sumeet Sahu6fe22ea2019-03-21 17:46:32 +0530125#define CMD_SUSPEND_EVENT 0x9E
126#define CMD_RESUME_EVENT 0x9F
Sumeet Sahucafbf632019-02-08 19:12:57 +0530127
128#define IOCTL_RELEASE_CAN_BUFFER (SIOCDEVPRIVATE + 0)
129#define IOCTL_ENABLE_BUFFERING (SIOCDEVPRIVATE + 1)
130#define IOCTL_ADD_FRAME_FILTER (SIOCDEVPRIVATE + 2)
131#define IOCTL_REMOVE_FRAME_FILTER (SIOCDEVPRIVATE + 3)
132#define IOCTL_DISABLE_BUFFERING (SIOCDEVPRIVATE + 5)
133#define IOCTL_DISABLE_ALL_BUFFERING (SIOCDEVPRIVATE + 6)
134#define IOCTL_GET_FW_BR_VERSION (SIOCDEVPRIVATE + 7)
135#define IOCTL_BEGIN_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 8)
136#define IOCTL_FIRMWARE_UPGRADE_DATA (SIOCDEVPRIVATE + 9)
137#define IOCTL_END_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 10)
138#define IOCTL_BEGIN_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 11)
139#define IOCTL_BOOT_ROM_UPGRADE_DATA (SIOCDEVPRIVATE + 12)
140#define IOCTL_END_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 13)
141#define IOCTL_END_FW_UPDATE_FILE (SIOCDEVPRIVATE + 14)
142
143#define IFR_DATA_OFFSET 0x100
144struct can_fw_resp {
145 u8 maj;
Aishwarya Prasadb4dab662020-01-31 15:28:41 +0530146 u8 min : 4;
147 u8 sub_min : 4;
Sumeet Sahucafbf632019-02-08 19:12:57 +0530148 u8 ver[48];
149} __packed;
150
151struct can_write_req {
152 u8 can_if;
153 u32 mid;
154 u8 dlc;
155 u8 data[8];
156} __packed;
157
158struct can_write_resp {
159 u8 err;
160} __packed;
161
162struct can_filter_req {
163 u8 can_if;
164 u32 mid;
165 u32 mask;
166} __packed;
167
168struct can_add_filter_resp {
169 u8 err;
170} __packed;
171
172struct can_receive_frame {
173 u8 can_if;
174 __le64 ts;
175 __le32 mid;
176 u8 dlc;
177 u8 data[8];
178} __packed;
179
180struct can_config_bit_timing {
181 u8 can_if;
182 u32 prop_seg;
183 u32 phase_seg1;
184 u32 phase_seg2;
185 u32 sjw;
186 u32 brp;
187} __packed;
188
189struct can_time_info {
190 __le64 time;
191} __packed;
192
193static struct can_bittiming_const rh850_bittiming_const = {
194 .name = "qti_can",
195 .tseg1_min = 1,
196 .tseg1_max = 16,
197 .tseg2_min = 1,
198 .tseg2_max = 16,
199 .sjw_max = 4,
200 .brp_min = 1,
201 .brp_max = 70,
202 .brp_inc = 1,
203};
204
205static struct can_bittiming_const flexcan_bittiming_const = {
206 .name = "qti_can",
207 .tseg1_min = 4,
208 .tseg1_max = 16,
209 .tseg2_min = 2,
210 .tseg2_max = 8,
211 .sjw_max = 4,
212 .brp_min = 1,
213 .brp_max = 256,
214 .brp_inc = 1,
215};
216
217static struct can_bittiming_const qti_can_bittiming_const;
218
219static struct can_bittiming_const qti_can_data_bittiming_const = {
220 .name = "qti_can",
221 .tseg1_min = 1,
222 .tseg1_max = 16,
223 .tseg2_min = 1,
224 .tseg2_max = 16,
225 .sjw_max = 4,
226 .brp_min = 1,
227 .brp_max = 70,
228 .brp_inc = 1,
229};
230
231struct vehicle_property {
232 int id;
233 __le64 ts;
234 int zone;
235 int val_type;
236 u32 data_len;
237 union {
238 u8 bval;
239 int val;
240 int val_arr[4];
241 float f_value;
242 float float_arr[4];
243 u8 str[36];
244 };
245} __packed;
246
247struct qti_can_release_can_buffer {
248 u8 enable;
249} __packed;
250
251struct qti_can_buffer {
252 u8 can_if;
253 u32 mid;
254 u32 mask;
255} __packed;
256
257struct can_fw_br_resp {
258 u8 maj;
Aishwarya Prasadb4dab662020-01-31 15:28:41 +0530259 u8 min : 4;
260 u8 sub_min : 4;
Sumeet Sahucafbf632019-02-08 19:12:57 +0530261 u8 ver[32];
262 u8 br_maj;
263 u8 br_min;
264 u8 curr_exec_mode;
265} __packed;
266
267struct qti_can_ioctl_req {
268 u8 len;
269 u8 data[64];
270} __packed;
271
272static int qti_can_rx_message(struct qti_can *priv_data);
273
274static irqreturn_t qti_can_irq(int irq, void *priv)
275{
276 struct qti_can *priv_data = priv;
277
278 LOGDI("%s\n", __func__);
279 qti_can_rx_message(priv_data);
280 return IRQ_HANDLED;
281}
282
283static void qti_can_receive_frame(struct qti_can *priv_data,
284 struct can_receive_frame *frame)
285{
286 struct can_frame *cf;
287 struct sk_buff *skb;
288 struct skb_shared_hwtstamps *skt;
289 ktime_t nsec;
290 struct net_device *netdev;
291 int i;
292 struct device *dev;
293
294 dev = &priv_data->spidev->dev;
295 if (frame->can_if >= priv_data->max_can_channels) {
296 LOGDE("qti_can rcv error. Channel is %d\n", frame->can_if);
297 return;
298 }
299
300 netdev = priv_data->netdev[frame->can_if];
301 skb = alloc_can_skb(netdev, &cf);
302 if (!skb) {
303 LOGDE("skb alloc failed. frame->can_if %d\n", frame->can_if);
304 return;
305 }
306
307 LOGDI("rcv frame %d %llu %x %d %x %x %x %x %x %x %x %x\n",
308 frame->can_if, frame->ts, frame->mid, frame->dlc,
309 frame->data[0], frame->data[1], frame->data[2], frame->data[3],
310 frame->data[4], frame->data[5], frame->data[6], frame->data[7]);
311 cf->can_id = le32_to_cpu(frame->mid);
312 cf->can_dlc = get_can_dlc(frame->dlc);
313
314 for (i = 0; i < cf->can_dlc; i++)
315 cf->data[i] = frame->data[i];
316
317 nsec = ms_to_ktime(le64_to_cpu(frame->ts)
318 + priv_data->time_diff);
319 skt = skb_hwtstamps(skb);
320 skt->hwtstamp = nsec;
321 skb->tstamp = nsec;
322 netif_rx(skb);
323 LOGDI("hwtstamp: %lld\n", ktime_to_ms(skt->hwtstamp));
324 netdev->stats.rx_packets++;
325}
326
327static void qti_can_receive_property(struct qti_can *priv_data,
328 struct vehicle_property *property)
329{
330 struct canfd_frame *cfd;
331 u8 *p;
332 struct sk_buff *skb;
333 struct skb_shared_hwtstamps *skt;
334 ktime_t nsec;
335 struct net_device *netdev;
336 struct device *dev;
337 int i;
338
339 /* can0 as the channel with properties */
340 dev = &priv_data->spidev->dev;
341 netdev = priv_data->netdev[0];
342 skb = alloc_canfd_skb(netdev, &cfd);
343 if (!skb) {
344 LOGDE("skb alloc failed. frame->can_if %d\n", 0);
345 return;
346 }
347
348 LOGDI("rcv property:0x%x data:%2x %2x %2x %2x", property->id,
349 property->str[0], property->str[1],
350 property->str[2], property->str[3]);
351 cfd->can_id = 0x00;
352 cfd->len = sizeof(struct vehicle_property);
353
354 p = (u8 *)property;
355 for (i = 0; i < cfd->len; i++)
356 cfd->data[i] = p[i];
357
358 nsec = ns_to_ktime(le64_to_cpu(property->ts));
359 skt = skb_hwtstamps(skb);
360 skt->hwtstamp = nsec;
361 LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp));
362 skb->tstamp = nsec;
363 netif_rx(skb);
364 netdev->stats.rx_packets++;
365}
366
367static int qti_can_process_response(struct qti_can *priv_data,
368 struct spi_miso *resp, int length)
369{
370 int ret = 0;
371 u64 mstime;
372 ktime_t ktime_now;
373
374 LOGDI("<%x %2d [%d]\n", resp->cmd, resp->len, resp->seq);
375 if (resp->cmd == CMD_CAN_RECEIVE_FRAME) {
376 struct can_receive_frame *frame =
377 (struct can_receive_frame *)&resp->data;
378 if ((resp->len - (frame->dlc + sizeof(frame->dlc))) <
379 (sizeof(*frame) - (sizeof(frame->dlc)
380 + sizeof(frame->data)))) {
381 LOGDE("len:%d, size:%d\n", resp->len, sizeof(*frame));
382 LOGDE("Check the f/w version & upgrade to latest!!\n");
383 ret = -EUPGRADE;
384 goto exit;
385 }
386 if (resp->len > length) {
387 /* Error. This should never happen */
388 LOGDE("%s error: Saving %d bytes\n", __func__, length);
389 memcpy(priv_data->assembly_buffer, (char *)resp,
390 length);
391 priv_data->assembly_buffer_size = length;
392 } else {
393 qti_can_receive_frame(priv_data, frame);
394 }
395 } else if (resp->cmd == CMD_PROPERTY_READ) {
396 struct vehicle_property *property =
397 (struct vehicle_property *)&resp->data;
398
399 if (resp->len > length) {
400 /* Error. This should never happen */
401 LOGDE("%s error: Saving %d bytes\n", __func__, length);
402 memcpy(priv_data->assembly_buffer, (char *)resp,
403 length);
404 priv_data->assembly_buffer_size = length;
405 } else {
406 qti_can_receive_property(priv_data, property);
407 }
408 } else if (resp->cmd == CMD_GET_FW_VERSION) {
409 struct can_fw_resp *fw_resp = (struct can_fw_resp *)resp->data;
410
Aishwarya Prasadb4dab662020-01-31 15:28:41 +0530411 dev_info(&priv_data->spidev->dev, "fw %d.%d.%d",
412 fw_resp->maj, fw_resp->min, fw_resp->sub_min);
Sumeet Sahucafbf632019-02-08 19:12:57 +0530413 dev_info(&priv_data->spidev->dev, "fw string %s",
414 fw_resp->ver);
415 } else if (resp->cmd == CMD_GET_FW_BR_VERSION) {
416 struct can_fw_br_resp *fw_resp =
417 (struct can_fw_br_resp *)resp->data;
418
Aishwarya Prasadb4dab662020-01-31 15:28:41 +0530419 dev_info(&priv_data->spidev->dev, "fw_can %d.%d.%d",
420 fw_resp->maj, fw_resp->min, fw_resp->sub_min);
Sumeet Sahucafbf632019-02-08 19:12:57 +0530421 dev_info(&priv_data->spidev->dev, "fw string %s",
422 fw_resp->ver);
423 dev_info(&priv_data->spidev->dev, "fw_br %d.%d exec_mode %d",
424 fw_resp->br_maj, fw_resp->br_min,
425 fw_resp->curr_exec_mode);
426 ret = fw_resp->curr_exec_mode << 28;
427 ret |= (fw_resp->br_maj & 0xF) << 24;
428 ret |= (fw_resp->br_min & 0xFF) << 16;
429 ret |= (fw_resp->maj & 0xF) << 8;
Aishwarya Prasadb4dab662020-01-31 15:28:41 +0530430 ret |= (fw_resp->min & 0xF) << 4;
431 ret |= (fw_resp->sub_min & 0xF);
Sumeet Sahucafbf632019-02-08 19:12:57 +0530432 } else if (resp->cmd == CMD_UPDATE_TIME_INFO) {
433 struct can_time_info *time_data =
434 (struct can_time_info *)resp->data;
435
436 ktime_now = ktime_get_boottime();
437 mstime = ktime_to_ms(ktime_now);
438 priv_data->time_diff = mstime -
439 (le64_to_cpu(time_data->time));
440 }
441
442exit:
443 if (resp->cmd == priv_data->wait_cmd) {
444 priv_data->cmd_result = ret;
445 complete(&priv_data->response_completion);
446 }
447 return ret;
448}
449
450static int qti_can_process_rx(struct qti_can *priv_data, char *rx_buf)
451{
452 struct spi_miso *resp;
453 struct device *dev;
454 int length_processed = 0, actual_length = priv_data->xfer_length;
455 int ret = 0;
456
457 dev = &priv_data->spidev->dev;
458 while (length_processed < actual_length) {
459 int length_left = actual_length - length_processed;
460 int length = 0; /* length of consumed chunk */
461 void *data;
462
463 if (priv_data->assembly_buffer_size > 0) {
464 LOGDI("callback: Reassembling %d bytes\n",
465 priv_data->assembly_buffer_size);
466 /* should copy just 1 byte instead, since cmd should */
467 /* already been copied as being first byte */
468 memcpy(priv_data->assembly_buffer +
469 priv_data->assembly_buffer_size,
470 rx_buf, 2);
471 data = priv_data->assembly_buffer;
472 resp = (struct spi_miso *)data;
473 length = resp->len + sizeof(*resp)
474 - priv_data->assembly_buffer_size;
475 if (length > 0)
476 memcpy(priv_data->assembly_buffer +
477 priv_data->assembly_buffer_size,
478 rx_buf, length);
479 length_left += priv_data->assembly_buffer_size;
480 priv_data->assembly_buffer_size = 0;
481 } else {
482 data = rx_buf + length_processed;
483 resp = (struct spi_miso *)data;
484 if (resp->cmd == 0x00 || resp->cmd == 0xFF) {
485 /* special case. ignore cmd==0x00, 0xFF */
486 length_processed += 1;
487 continue;
488 }
489 length = resp->len + sizeof(struct spi_miso);
490 }
491 LOGDI("processing. p %d -> l %d (t %d)\n",
492 length_processed, length_left, priv_data->xfer_length);
493 length_processed += length;
494 if (length_left >= sizeof(*resp) &&
495 resp->len + sizeof(*resp) <= length_left) {
496 struct spi_miso *resp =
497 (struct spi_miso *)data;
498 ret = qti_can_process_response(priv_data, resp,
499 length_left);
500 } else if (length_left > 0) {
501 /* Not full message. Store however much we have for */
502 /* later assembly */
503 LOGDI("callback: Storing %d bytes of response\n",
504 length_left);
505 memcpy(priv_data->assembly_buffer, data, length_left);
506 priv_data->assembly_buffer_size = length_left;
507 break;
508 }
509 }
510 return ret;
511}
512
513static int qti_can_do_spi_transaction(struct qti_can *priv_data)
514{
515 struct spi_device *spi;
516 struct spi_transfer *xfer;
517 struct spi_message *msg;
518 struct device *dev;
519 int ret;
520
521 spi = priv_data->spidev;
522 dev = &spi->dev;
523 msg = devm_kzalloc(&spi->dev, sizeof(*msg), GFP_KERNEL);
524 xfer = devm_kzalloc(&spi->dev, sizeof(*xfer), GFP_KERNEL);
525 if (!xfer || !msg)
526 return -ENOMEM;
527 LOGDI(">%x %2d [%d]\n", priv_data->tx_buf[0],
528 priv_data->tx_buf[1], priv_data->tx_buf[2]);
529 spi_message_init(msg);
530 spi_message_add_tail(xfer, msg);
531 xfer->tx_buf = priv_data->tx_buf;
532 xfer->rx_buf = priv_data->rx_buf;
533 xfer->len = priv_data->xfer_length;
534 xfer->bits_per_word = priv_data->bits_per_word;
535 ret = spi_sync(spi, msg);
536 LOGDI("spi_sync ret %d data %x %x %x %x %x %x %x %x\n", ret,
537 priv_data->rx_buf[0], priv_data->rx_buf[1],
538 priv_data->rx_buf[2], priv_data->rx_buf[3],
539 priv_data->rx_buf[4], priv_data->rx_buf[5],
540 priv_data->rx_buf[6], priv_data->rx_buf[7]);
541
542 if (ret == 0)
543 qti_can_process_rx(priv_data, priv_data->rx_buf);
544 devm_kfree(&spi->dev, msg);
545 devm_kfree(&spi->dev, xfer);
546 return ret;
547}
548
549static int qti_can_rx_message(struct qti_can *priv_data)
550{
551 char *tx_buf, *rx_buf;
552 int ret;
553
554 mutex_lock(&priv_data->spi_lock);
555 tx_buf = priv_data->tx_buf;
556 rx_buf = priv_data->rx_buf;
557 memset(tx_buf, 0, XFER_BUFFER_SIZE);
558 memset(rx_buf, 0, XFER_BUFFER_SIZE);
559 priv_data->xfer_length = XFER_BUFFER_SIZE;
560
561 ret = qti_can_do_spi_transaction(priv_data);
562 mutex_unlock(&priv_data->spi_lock);
563
564 return ret;
565}
566
567static int qti_can_query_firmware_version(struct qti_can *priv_data)
568{
569 char *tx_buf, *rx_buf;
570 int ret;
571 struct spi_mosi *req;
572
573 mutex_lock(&priv_data->spi_lock);
574 tx_buf = priv_data->tx_buf;
575 rx_buf = priv_data->rx_buf;
576 memset(tx_buf, 0, XFER_BUFFER_SIZE);
577 memset(rx_buf, 0, XFER_BUFFER_SIZE);
578 priv_data->xfer_length = XFER_BUFFER_SIZE;
579
580 req = (struct spi_mosi *)tx_buf;
581 req->cmd = CMD_GET_FW_VERSION;
582 req->len = 0;
583 req->seq = atomic_inc_return(&priv_data->msg_seq);
584
585 priv_data->wait_cmd = CMD_GET_FW_VERSION;
586 priv_data->cmd_result = -1;
587 reinit_completion(&priv_data->response_completion);
588
589 ret = qti_can_do_spi_transaction(priv_data);
590 mutex_unlock(&priv_data->spi_lock);
591
592 if (ret == 0) {
593 LOGDI("waiting for completion with timeout of %lu jiffies",
594 msecs_to_jiffies(QUERY_FIRMWARE_TIMEOUT_MS));
595 wait_for_completion_interruptible_timeout(
596 &priv_data->response_completion,
597 msecs_to_jiffies(QUERY_FIRMWARE_TIMEOUT_MS));
598 LOGDI("done waiting");
599 ret = priv_data->cmd_result;
600 }
601
602 return ret;
603}
604
Sumeet Sahu6fe22ea2019-03-21 17:46:32 +0530605static int qti_can_notify_power_events(struct qti_can *priv_data, u8 event_type)
606{
607 char *tx_buf, *rx_buf;
608 int ret;
609 struct spi_mosi *req;
610
611 mutex_lock(&priv_data->spi_lock);
612 tx_buf = priv_data->tx_buf;
613 rx_buf = priv_data->rx_buf;
614 memset(tx_buf, 0, XFER_BUFFER_SIZE);
615 memset(rx_buf, 0, XFER_BUFFER_SIZE);
616 priv_data->xfer_length = XFER_BUFFER_SIZE;
617
618 req = (struct spi_mosi *)tx_buf;
619 req->cmd = event_type;
620 req->len = 0;
621 req->seq = atomic_inc_return(&priv_data->msg_seq);
622
623 ret = qti_can_do_spi_transaction(priv_data);
624 mutex_unlock(&priv_data->spi_lock);
625
626 return ret;
627}
628
Sumeet Sahucafbf632019-02-08 19:12:57 +0530629static int qti_can_set_bitrate(struct net_device *netdev)
630{
631 char *tx_buf, *rx_buf;
632 int ret;
633 struct spi_mosi *req;
634 struct can_config_bit_timing *req_d;
635 struct qti_can *priv_data;
636 struct can_priv *priv = netdev_priv(netdev);
637 struct qti_can_netdev_privdata *qti_can_priv;
638
639 qti_can_priv = netdev_priv(netdev);
640 priv_data = qti_can_priv->qti_can;
641
642 netdev_info(netdev, "ch%i, bitrate setting>%i",
643 qti_can_priv->netdev_index, priv->bittiming.bitrate);
644 LOGNI("sjw>%i brp>%i ph_sg1>%i ph_sg2>%i smpl_pt>%i tq>%i pr_seg>%i",
645 priv->bittiming.sjw, priv->bittiming.brp,
646 priv->bittiming.phase_seg1,
647 priv->bittiming.phase_seg2,
648 priv->bittiming.sample_point,
649 priv->bittiming.tq, priv->bittiming.prop_seg);
650
651 mutex_lock(&priv_data->spi_lock);
652 tx_buf = priv_data->tx_buf;
653 rx_buf = priv_data->rx_buf;
654 memset(tx_buf, 0, XFER_BUFFER_SIZE);
655 memset(rx_buf, 0, XFER_BUFFER_SIZE);
656 priv_data->xfer_length = XFER_BUFFER_SIZE;
657
658 req = (struct spi_mosi *)tx_buf;
659 req->cmd = CMD_CAN_CONFIG_BIT_TIMING;
660 req->len = sizeof(struct can_config_bit_timing);
661 req->seq = atomic_inc_return(&priv_data->msg_seq);
662 req_d = (struct can_config_bit_timing *)req->data;
663 req_d->can_if = qti_can_priv->netdev_index;
664 req_d->prop_seg = priv->bittiming.prop_seg;
665 req_d->phase_seg1 = priv->bittiming.phase_seg1;
666 req_d->phase_seg2 = priv->bittiming.phase_seg2;
667 req_d->sjw = priv->bittiming.sjw;
668 req_d->brp = priv->bittiming.brp;
669 ret = qti_can_do_spi_transaction(priv_data);
670 mutex_unlock(&priv_data->spi_lock);
671
672 return ret;
673}
674
675static int qti_can_write(struct qti_can *priv_data,
676 int can_channel, struct canfd_frame *cf)
677{
678 char *tx_buf, *rx_buf;
679 int ret, i;
680 struct spi_mosi *req;
681 struct can_write_req *req_d;
682 struct net_device *netdev;
683
684 if (can_channel < 0 || can_channel >= priv_data->max_can_channels) {
685 LOGDE("%s error. Channel is %d\n", __func__, can_channel);
686 return -EINVAL;
687 }
688
689 mutex_lock(&priv_data->spi_lock);
690 tx_buf = priv_data->tx_buf;
691 rx_buf = priv_data->rx_buf;
692 memset(tx_buf, 0, XFER_BUFFER_SIZE);
693 memset(rx_buf, 0, XFER_BUFFER_SIZE);
694 priv_data->xfer_length = XFER_BUFFER_SIZE;
695
696 req = (struct spi_mosi *)tx_buf;
697 if (priv_data->driver_mode == DRIVER_MODE_RAW_FRAMES) {
698 req->cmd = CMD_CAN_SEND_FRAME;
699 req->len = sizeof(struct can_write_req) + 8;
700 req->seq = atomic_inc_return(&priv_data->msg_seq);
701
702 req_d = (struct can_write_req *)req->data;
703 req_d->can_if = can_channel;
704 req_d->mid = cf->can_id;
705 req_d->dlc = cf->len;
706
707 for (i = 0; i < cf->len; i++)
708 req_d->data[i] = cf->data[i];
709 } else if (priv_data->driver_mode == DRIVER_MODE_PROPERTIES ||
710 priv_data->driver_mode == DRIVER_MODE_AMB) {
711 req->cmd = CMD_PROPERTY_WRITE;
712 req->len = sizeof(struct vehicle_property);
713 req->seq = atomic_inc_return(&priv_data->msg_seq);
714 for (i = 0; i < cf->len; i++)
715 req->data[i] = cf->data[i];
716 } else {
717 LOGDE("%s: wrong driver mode %i",
718 __func__, priv_data->driver_mode);
719 }
720
721 ret = qti_can_do_spi_transaction(priv_data);
722 netdev = priv_data->netdev[can_channel];
723 netdev->stats.tx_packets++;
724 mutex_unlock(&priv_data->spi_lock);
725
726 return ret;
727}
728
729static int qti_can_netdev_open(struct net_device *netdev)
730{
731 int err;
732
733 LOGNI("Open");
734 err = open_candev(netdev);
735 if (err)
736 return err;
737
738 netif_start_queue(netdev);
739
740 return 0;
741}
742
743static int qti_can_netdev_close(struct net_device *netdev)
744{
745 LOGNI("Close");
746
747 netif_stop_queue(netdev);
748 close_candev(netdev);
749 return 0;
750}
751
752static void qti_can_send_can_frame(struct work_struct *ws)
753{
754 struct qti_can_tx_work *tx_work;
755 struct canfd_frame *cf;
756 struct qti_can *priv_data;
757 struct net_device *netdev;
758 struct qti_can_netdev_privdata *netdev_priv_data;
759 int can_channel;
760
761 tx_work = container_of(ws, struct qti_can_tx_work, work);
762 netdev = tx_work->netdev;
763 netdev_priv_data = netdev_priv(netdev);
764 priv_data = netdev_priv_data->qti_can;
765 can_channel = netdev_priv_data->netdev_index;
766
767 LOGDI("send_can_frame ws %pK\n", ws);
768 LOGDI("send_can_frame tx %pK\n", tx_work);
769
770 cf = (struct canfd_frame *)tx_work->skb->data;
771 qti_can_write(priv_data, can_channel, cf);
772
773 dev_kfree_skb(tx_work->skb);
774 kfree(tx_work);
775}
776
777static netdev_tx_t qti_can_netdev_start_xmit(
778 struct sk_buff *skb, struct net_device *netdev)
779{
780 struct qti_can_netdev_privdata *netdev_priv_data = netdev_priv(netdev);
781 struct qti_can *priv_data = netdev_priv_data->qti_can;
782 struct qti_can_tx_work *tx_work;
783
784 LOGNI("netdev_start_xmit");
785 if (can_dropped_invalid_skb(netdev, skb)) {
786 LOGNE("Dropping invalid can frame\n");
787 return NETDEV_TX_OK;
788 }
789 tx_work = kzalloc(sizeof(*tx_work), GFP_ATOMIC);
790 if (!tx_work)
791 return NETDEV_TX_OK;
792 INIT_WORK(&tx_work->work, qti_can_send_can_frame);
793 tx_work->netdev = netdev;
794 tx_work->skb = skb;
795 queue_work(priv_data->tx_wq, &tx_work->work);
796
797 return NETDEV_TX_OK;
798}
799
800static int qti_can_send_release_can_buffer_cmd(struct net_device *netdev)
801{
802 char *tx_buf, *rx_buf;
803 int ret;
804 struct spi_mosi *req;
805 struct qti_can *priv_data;
806 struct qti_can_netdev_privdata *netdev_priv_data;
807 int *mode;
808
809 netdev_priv_data = netdev_priv(netdev);
810 priv_data = netdev_priv_data->qti_can;
811 mutex_lock(&priv_data->spi_lock);
812 tx_buf = priv_data->tx_buf;
813 rx_buf = priv_data->rx_buf;
814 memset(tx_buf, 0, XFER_BUFFER_SIZE);
815 memset(rx_buf, 0, XFER_BUFFER_SIZE);
816 priv_data->xfer_length = XFER_BUFFER_SIZE;
817
818 req = (struct spi_mosi *)tx_buf;
819 req->cmd = CMD_CAN_RELEASE_BUFFER;
820 req->len = sizeof(int);
821 req->seq = atomic_inc_return(&priv_data->msg_seq);
822 mode = (int *)req->data;
823 *mode = priv_data->driver_mode;
824
825 ret = qti_can_do_spi_transaction(priv_data);
826 mutex_unlock(&priv_data->spi_lock);
827 return ret;
828}
829
830static int qti_can_data_buffering(struct net_device *netdev,
831 struct ifreq *ifr, int cmd)
832{
833 char *tx_buf, *rx_buf;
834 int ret;
835 u32 timeout;
836 struct spi_mosi *req;
837 struct qti_can_buffer *enable_buffering;
838 struct qti_can_buffer *add_request;
839 struct qti_can *priv_data;
840 struct qti_can_netdev_privdata *netdev_priv_data;
841 struct spi_device *spi;
842
843 netdev_priv_data = netdev_priv(netdev);
844 priv_data = netdev_priv_data->qti_can;
845 spi = priv_data->spidev;
846 timeout = priv_data->can_fw_cmd_timeout_ms;
847
848 mutex_lock(&priv_data->spi_lock);
849 tx_buf = priv_data->tx_buf;
850 rx_buf = priv_data->rx_buf;
851 memset(tx_buf, 0, XFER_BUFFER_SIZE);
852 memset(rx_buf, 0, XFER_BUFFER_SIZE);
853 priv_data->xfer_length = XFER_BUFFER_SIZE;
854 if (!ifr)
855 return -EINVAL;
856 add_request = devm_kzalloc(&spi->dev,
857 sizeof(struct qti_can_buffer),
858 GFP_KERNEL);
859 if (!add_request)
860 return -ENOMEM;
861
862 if (copy_from_user(add_request, ifr->ifr_data,
863 sizeof(struct qti_can_buffer))) {
864 devm_kfree(&spi->dev, add_request);
865 return -EFAULT;
866 }
867
868 req = (struct spi_mosi *)tx_buf;
869 if (cmd == IOCTL_ENABLE_BUFFERING)
870 req->cmd = CMD_CAN_DATA_BUFF_ADD;
871 else
872 req->cmd = CMD_CAN_DATA_BUFF_REMOVE;
873 req->len = sizeof(struct qti_can_buffer);
874 req->seq = atomic_inc_return(&priv_data->msg_seq);
875
876 enable_buffering = (struct qti_can_buffer *)req->data;
877 enable_buffering->can_if = add_request->can_if;
878 enable_buffering->mid = add_request->mid;
879 enable_buffering->mask = add_request->mask;
880
881 if (priv_data->can_fw_cmd_timeout_req) {
882 priv_data->wait_cmd = req->cmd;
883 priv_data->cmd_result = -1;
884 reinit_completion(&priv_data->response_completion);
885 }
886
887 ret = qti_can_do_spi_transaction(priv_data);
888 devm_kfree(&spi->dev, add_request);
889 mutex_unlock(&priv_data->spi_lock);
890
891 if (ret == 0 && priv_data->can_fw_cmd_timeout_req) {
892 LOGDI("%s ready to wait for response\n", __func__);
893 ret = wait_for_completion_interruptible_timeout(
894 &priv_data->response_completion,
895 msecs_to_jiffies(timeout));
896 ret = priv_data->cmd_result;
897 }
898 return ret;
899}
900
901static int qti_can_remove_all_buffering(struct net_device *netdev)
902{
903 char *tx_buf, *rx_buf;
904 int ret;
905 u32 timeout;
906 struct spi_mosi *req;
907 struct qti_can *priv_data;
908 struct qti_can_netdev_privdata *netdev_priv_data;
909
910 netdev_priv_data = netdev_priv(netdev);
911 priv_data = netdev_priv_data->qti_can;
912 timeout = priv_data->rem_all_buffering_timeout_ms;
913
914 mutex_lock(&priv_data->spi_lock);
915 tx_buf = priv_data->tx_buf;
916 rx_buf = priv_data->rx_buf;
917 memset(tx_buf, 0, XFER_BUFFER_SIZE);
918 memset(rx_buf, 0, XFER_BUFFER_SIZE);
919 priv_data->xfer_length = XFER_BUFFER_SIZE;
920
921 req = (struct spi_mosi *)tx_buf;
922 req->cmd = CMD_CAN_DATA_BUFF_REMOVE_ALL;
923 req->len = 0;
924 req->seq = atomic_inc_return(&priv_data->msg_seq);
925
926 if (priv_data->can_fw_cmd_timeout_req) {
927 priv_data->wait_cmd = req->cmd;
928 priv_data->cmd_result = -1;
929 reinit_completion(&priv_data->response_completion);
930 }
931
932 ret = qti_can_do_spi_transaction(priv_data);
933 mutex_unlock(&priv_data->spi_lock);
934
935 if (ret == 0 && priv_data->can_fw_cmd_timeout_req) {
936 LOGDI("%s wait for response\n", __func__);
937 ret = wait_for_completion_interruptible_timeout(
938 &priv_data->response_completion,
939 msecs_to_jiffies(timeout));
940 ret = priv_data->cmd_result;
941 }
942
943 return ret;
944}
945
946static int qti_can_frame_filter(struct net_device *netdev,
947 struct ifreq *ifr, int cmd)
948{
949 char *tx_buf, *rx_buf;
950 int ret;
951 struct spi_mosi *req;
952 struct can_filter_req *add_filter;
953 struct can_filter_req *filter_request;
954 struct qti_can *priv_data;
955 struct qti_can_netdev_privdata *netdev_priv_data;
956 struct spi_device *spi;
957
958 netdev_priv_data = netdev_priv(netdev);
959 priv_data = netdev_priv_data->qti_can;
960 spi = priv_data->spidev;
961
962 mutex_lock(&priv_data->spi_lock);
963 tx_buf = priv_data->tx_buf;
964 rx_buf = priv_data->rx_buf;
965 memset(tx_buf, 0, XFER_BUFFER_SIZE);
966 memset(rx_buf, 0, XFER_BUFFER_SIZE);
967 priv_data->xfer_length = XFER_BUFFER_SIZE;
968
969 if (!ifr)
970 return -EINVAL;
971
972 filter_request =
973 devm_kzalloc(&spi->dev, sizeof(struct can_filter_req),
974 GFP_KERNEL);
975 if (!filter_request)
976 return -ENOMEM;
977
978 if (copy_from_user(filter_request, ifr->ifr_data,
979 sizeof(struct can_filter_req))) {
980 devm_kfree(&spi->dev, filter_request);
981 return -EFAULT;
982 }
983
984 req = (struct spi_mosi *)tx_buf;
985 if (cmd == IOCTL_ADD_FRAME_FILTER)
986 req->cmd = CMD_CAN_ADD_FILTER;
987 else
988 req->cmd = CMD_CAN_REMOVE_FILTER;
989
990 req->len = sizeof(struct can_filter_req);
991 req->seq = atomic_inc_return(&priv_data->msg_seq);
992
993 add_filter = (struct can_filter_req *)req->data;
994 add_filter->can_if = filter_request->can_if;
995 add_filter->mid = filter_request->mid;
996 add_filter->mask = filter_request->mask;
997
998 ret = qti_can_do_spi_transaction(priv_data);
999 devm_kfree(&spi->dev, filter_request);
1000 mutex_unlock(&priv_data->spi_lock);
1001 return ret;
1002}
1003
1004static int qti_can_send_spi_locked(struct qti_can *priv_data, int cmd, int len,
1005 u8 *data)
1006{
1007 char *tx_buf, *rx_buf;
1008 struct spi_mosi *req;
1009 int ret;
1010
1011 LOGDI("%s\n", __func__);
1012
1013 tx_buf = priv_data->tx_buf;
1014 rx_buf = priv_data->rx_buf;
1015 memset(tx_buf, 0, XFER_BUFFER_SIZE);
1016 memset(rx_buf, 0, XFER_BUFFER_SIZE);
1017 priv_data->xfer_length = XFER_BUFFER_SIZE;
1018
1019 req = (struct spi_mosi *)tx_buf;
1020 req->cmd = cmd;
1021 req->len = len;
1022 req->seq = atomic_inc_return(&priv_data->msg_seq);
1023
1024 if (unlikely(len > 64))
1025 return -EINVAL;
1026 memcpy(req->data, data, len);
1027
1028 ret = qti_can_do_spi_transaction(priv_data);
1029 return ret;
1030}
1031
1032static int qti_can_convert_ioctl_cmd_to_spi_cmd(int ioctl_cmd)
1033{
1034 switch (ioctl_cmd) {
1035 case IOCTL_GET_FW_BR_VERSION:
1036 return CMD_GET_FW_BR_VERSION;
1037 case IOCTL_BEGIN_FIRMWARE_UPGRADE:
1038 return CMD_BEGIN_FIRMWARE_UPGRADE;
1039 case IOCTL_FIRMWARE_UPGRADE_DATA:
1040 return CMD_FIRMWARE_UPGRADE_DATA;
1041 case IOCTL_END_FIRMWARE_UPGRADE:
1042 return CMD_END_FIRMWARE_UPGRADE;
1043 case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
1044 return CMD_BEGIN_BOOT_ROM_UPGRADE;
1045 case IOCTL_BOOT_ROM_UPGRADE_DATA:
1046 return CMD_BOOT_ROM_UPGRADE_DATA;
1047 case IOCTL_END_BOOT_ROM_UPGRADE:
1048 return CMD_END_BOOT_ROM_UPGRADE;
1049 case IOCTL_END_FW_UPDATE_FILE:
1050 return CMD_END_FW_UPDATE_FILE;
1051 }
1052 return -EINVAL;
1053}
1054
Aishwarya Prasad1ce4cea2019-08-14 15:21:53 +05301055static int qti_can_end_fwupgrade_ioctl(struct net_device *netdev,
1056 struct ifreq *ifr, int cmd)
1057{
1058 int spi_cmd, ret;
1059
1060 struct qti_can *priv_data;
1061 struct qti_can_netdev_privdata *netdev_priv_data;
1062 struct spi_device *spi;
1063 int len = 0;
1064 u8 *data = NULL;
1065
1066 netdev_priv_data = netdev_priv(netdev);
1067 priv_data = netdev_priv_data->qti_can;
1068 spi = priv_data->spidev;
1069 spi_cmd = qti_can_convert_ioctl_cmd_to_spi_cmd(cmd);
1070 LOGDI("%s spi_cmd %x\n", __func__, spi_cmd);
1071 if (spi_cmd < 0) {
1072 LOGDE("%s wrong command %d\n", __func__, cmd);
1073 return spi_cmd;
1074 }
1075
1076 if (!ifr)
1077 return -EINVAL;
1078
1079 mutex_lock(&priv_data->spi_lock);
1080 LOGDI("%s len %d\n", __func__, len);
1081
1082 ret = qti_can_send_spi_locked(priv_data, spi_cmd, len, data);
1083
1084 mutex_unlock(&priv_data->spi_lock);
1085
1086 return ret;
1087}
1088
Sumeet Sahucafbf632019-02-08 19:12:57 +05301089static int qti_can_do_blocking_ioctl(struct net_device *netdev,
1090 struct ifreq *ifr, int cmd)
1091{
1092 int spi_cmd, ret;
1093
1094 struct qti_can *priv_data;
1095 struct qti_can_netdev_privdata *netdev_priv_data;
1096 struct qti_can_ioctl_req *ioctl_data = NULL;
1097 struct spi_device *spi;
1098 int len = 0;
1099 u8 *data = NULL;
1100
1101 netdev_priv_data = netdev_priv(netdev);
1102 priv_data = netdev_priv_data->qti_can;
1103 spi = priv_data->spidev;
1104
1105 spi_cmd = qti_can_convert_ioctl_cmd_to_spi_cmd(cmd);
1106 LOGDI("%s spi_cmd %x\n", __func__, spi_cmd);
1107 if (spi_cmd < 0) {
1108 LOGDE("%s wrong command %d\n", __func__, cmd);
1109 return spi_cmd;
1110 }
1111
1112 if (!ifr)
1113 return -EINVAL;
1114
1115 mutex_lock(&priv_data->spi_lock);
1116 if (spi_cmd == CMD_FIRMWARE_UPGRADE_DATA ||
1117 spi_cmd == CMD_BOOT_ROM_UPGRADE_DATA) {
1118 ioctl_data =
1119 devm_kzalloc(&spi->dev,
1120 sizeof(struct qti_can_ioctl_req),
1121 GFP_KERNEL);
1122 if (!ioctl_data)
1123 return -ENOMEM;
1124
1125 if (copy_from_user(ioctl_data, ifr->ifr_data,
1126 sizeof(struct qti_can_ioctl_req))) {
1127 devm_kfree(&spi->dev, ioctl_data);
1128 return -EFAULT;
1129 }
1130
1131 if (ioctl_data->len < 0) {
1132 LOGDE("ioctl_data->len is: %d\n", ioctl_data->len);
1133 return -EINVAL;
1134 }
1135
1136 /* Regular NULL check will fail here as ioctl_data is at
1137 * some offset
1138 */
1139 if ((void *)ioctl_data > (void *)0x100) {
1140 len = ioctl_data->len;
1141 data = ioctl_data->data;
1142 }
1143 }
1144 LOGDI("%s len %d\n", __func__, len);
1145
1146 if ((len > 64) || (len < 0)) {
1147 LOGDE("len value[%d] is not correct!!\n", len);
1148 return -EINVAL;
1149 }
1150
1151 priv_data->wait_cmd = spi_cmd;
1152 priv_data->cmd_result = -1;
1153 reinit_completion(&priv_data->response_completion);
1154
1155 ret = qti_can_send_spi_locked(priv_data, spi_cmd, len, data);
1156 if (ioctl_data)
1157 devm_kfree(&spi->dev, ioctl_data);
1158 mutex_unlock(&priv_data->spi_lock);
1159
1160 if (ret == 0) {
1161 LOGDI("%s ready to wait for response\n", __func__);
1162 wait_for_completion_interruptible_timeout(
1163 &priv_data->response_completion,
1164 5 * HZ);
1165 ret = priv_data->cmd_result;
1166 }
1167 return ret;
1168}
1169
1170static int qti_can_netdev_do_ioctl(struct net_device *netdev,
1171 struct ifreq *ifr, int cmd)
1172{
1173 struct qti_can *priv_data;
1174 struct qti_can_netdev_privdata *netdev_priv_data;
1175 int *mode;
1176 int ret = -EINVAL;
1177 struct spi_device *spi;
1178
1179 netdev_priv_data = netdev_priv(netdev);
1180 priv_data = netdev_priv_data->qti_can;
1181 spi = priv_data->spidev;
1182 LOGDI("%s %x\n", __func__, cmd);
1183
1184 switch (cmd) {
1185 case IOCTL_RELEASE_CAN_BUFFER:
1186 if (!ifr)
1187 return -EINVAL;
1188
1189 /* Regular NULL check will fail here as ioctl_data is at
1190 * some offset
1191 */
1192 if (ifr->ifr_data > (void __user *)IFR_DATA_OFFSET) {
1193 mutex_lock(&priv_data->spi_lock);
1194 mode = devm_kzalloc(&spi->dev, sizeof(int), GFP_KERNEL);
1195 if (!mode)
1196 return -ENOMEM;
1197 if (copy_from_user(mode, ifr->ifr_data, sizeof(int))) {
1198 devm_kfree(&spi->dev, mode);
1199 return -EFAULT;
1200 }
1201 priv_data->driver_mode = *mode;
1202 LOGDE("qti_can_driver_mode %d\n",
1203 priv_data->driver_mode);
1204 devm_kfree(&spi->dev, mode);
1205 mutex_unlock(&priv_data->spi_lock);
1206 }
1207 qti_can_send_release_can_buffer_cmd(netdev);
1208 ret = 0;
1209 break;
1210 case IOCTL_ENABLE_BUFFERING:
1211 case IOCTL_DISABLE_BUFFERING:
1212 qti_can_data_buffering(netdev, ifr, cmd);
1213 ret = 0;
1214 break;
1215 case IOCTL_DISABLE_ALL_BUFFERING:
1216 qti_can_remove_all_buffering(netdev);
1217 ret = 0;
1218 break;
1219 case IOCTL_ADD_FRAME_FILTER:
1220 case IOCTL_REMOVE_FRAME_FILTER:
1221 qti_can_frame_filter(netdev, ifr, cmd);
1222 ret = 0;
1223 break;
Aishwarya Prasad1ce4cea2019-08-14 15:21:53 +05301224 case IOCTL_END_FIRMWARE_UPGRADE:
1225 ret = qti_can_end_fwupgrade_ioctl(netdev, ifr, cmd);
1226 break;
Sumeet Sahucafbf632019-02-08 19:12:57 +05301227 case IOCTL_GET_FW_BR_VERSION:
1228 case IOCTL_BEGIN_FIRMWARE_UPGRADE:
1229 case IOCTL_FIRMWARE_UPGRADE_DATA:
Sumeet Sahucafbf632019-02-08 19:12:57 +05301230 case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
1231 case IOCTL_BOOT_ROM_UPGRADE_DATA:
1232 case IOCTL_END_BOOT_ROM_UPGRADE:
1233 case IOCTL_END_FW_UPDATE_FILE:
1234 ret = qti_can_do_blocking_ioctl(netdev, ifr, cmd);
1235 break;
1236 }
1237 LOGDI("%s ret %d\n", __func__, ret);
1238
1239 return ret;
1240}
1241
1242static const struct net_device_ops qti_can_netdev_ops = {
1243 .ndo_open = qti_can_netdev_open,
1244 .ndo_stop = qti_can_netdev_close,
1245 .ndo_start_xmit = qti_can_netdev_start_xmit,
1246 .ndo_do_ioctl = qti_can_netdev_do_ioctl,
1247};
1248
1249static int qti_can_create_netdev(struct spi_device *spi,
1250 struct qti_can *priv_data, int index)
1251{
1252 struct net_device *netdev;
1253 struct qti_can_netdev_privdata *netdev_priv_data;
1254
1255 LOGDI("%s %d\n", __func__, index);
1256 if (index < 0 || index >= priv_data->max_can_channels) {
1257 LOGDE("%s wrong index %d\n", __func__, index);
1258 return -EINVAL;
1259 }
1260 netdev = alloc_candev(sizeof(*netdev_priv_data), MAX_TX_BUFFERS);
1261 if (!netdev) {
1262 LOGDE("Couldn't alloc candev\n");
1263 return -ENOMEM;
1264 }
1265
1266 netdev->mtu = CANFD_MTU;
1267
1268 netdev_priv_data = netdev_priv(netdev);
1269 netdev_priv_data->qti_can = priv_data;
1270 netdev_priv_data->netdev_index = index;
1271
1272 priv_data->netdev[index] = netdev;
1273
1274 netdev->netdev_ops = &qti_can_netdev_ops;
1275 SET_NETDEV_DEV(netdev, &spi->dev);
1276 netdev_priv_data->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
1277 CAN_CTRLMODE_LISTENONLY;
1278 if (priv_data->support_can_fd)
1279 netdev_priv_data->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
1280 netdev_priv_data->can.bittiming_const = &qti_can_bittiming_const;
1281 netdev_priv_data->can.data_bittiming_const =
1282 &qti_can_data_bittiming_const;
1283 netdev_priv_data->can.clock.freq = priv_data->clk_freq_mhz;
1284 netdev_priv_data->can.do_set_bittiming = qti_can_set_bitrate;
1285
1286 return 0;
1287}
1288
1289static struct qti_can *qti_can_create_priv_data(struct spi_device *spi)
1290{
1291 struct qti_can *priv_data;
1292 int err;
1293 struct device *dev;
1294
1295 dev = &spi->dev;
1296 priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL);
1297 if (!priv_data) {
1298 err = -ENOMEM;
1299 return NULL;
1300 }
1301 spi_set_drvdata(spi, priv_data);
1302 atomic_set(&priv_data->netif_queue_stop, 0);
1303 priv_data->spidev = spi;
1304 priv_data->assembly_buffer = devm_kzalloc(dev,
1305 RX_ASSEMBLY_BUFFER_SIZE,
1306 GFP_KERNEL);
1307 if (!priv_data->assembly_buffer) {
1308 err = -ENOMEM;
1309 goto cleanup_privdata;
1310 }
1311
1312 priv_data->tx_wq = alloc_workqueue("qti_can_tx_wq", 0, 0);
1313 if (!priv_data->tx_wq) {
1314 LOGDE("Couldn't alloc workqueue\n");
1315 err = -ENOMEM;
1316 goto cleanup_privdata;
1317 }
1318
1319 priv_data->tx_buf = devm_kzalloc(dev,
1320 XFER_BUFFER_SIZE,
1321 GFP_KERNEL);
1322 priv_data->rx_buf = devm_kzalloc(dev,
1323 XFER_BUFFER_SIZE,
1324 GFP_KERNEL);
1325 if (!priv_data->tx_buf || !priv_data->rx_buf) {
1326 LOGDE("Couldn't alloc tx or rx buffers\n");
1327 err = -ENOMEM;
1328 goto cleanup_privdata;
1329 }
1330 priv_data->xfer_length = 0;
1331 priv_data->driver_mode = DRIVER_MODE_RAW_FRAMES;
1332
1333 mutex_init(&priv_data->spi_lock);
1334 atomic_set(&priv_data->msg_seq, 0);
1335 init_completion(&priv_data->response_completion);
1336 return priv_data;
1337
1338cleanup_privdata:
1339 if (priv_data) {
1340 if (priv_data->tx_wq)
1341 destroy_workqueue(priv_data->tx_wq);
1342 devm_kfree(dev, priv_data->rx_buf);
1343 devm_kfree(dev, priv_data->tx_buf);
1344 devm_kfree(dev, priv_data->assembly_buffer);
1345 devm_kfree(dev, priv_data);
1346 }
1347 return NULL;
1348}
1349
1350static const struct of_device_id qti_can_match_table[] = {
1351 { .compatible = "qcom,renesas,rh850" },
1352 { .compatible = "qcom,nxp,mpc5746c" },
1353 { }
1354};
1355
1356static int qti_can_probe(struct spi_device *spi)
1357{
1358 int err, retry = 0, query_err = -1, i;
1359 struct qti_can *priv_data = NULL;
1360 struct device *dev;
1361
1362 dev = &spi->dev;
1363 dev_info(dev, "%s", __func__);
1364
1365 err = spi_setup(spi);
1366 if (err) {
1367 dev_err(dev, "spi_setup failed: %d", err);
1368 return err;
1369 }
1370
1371 priv_data = qti_can_create_priv_data(spi);
1372 if (!priv_data) {
1373 dev_err(dev, "Failed to create qti_can priv_data\n");
1374 err = -ENOMEM;
1375 return err;
1376 }
1377
1378 err = of_property_read_u32(spi->dev.of_node, "qcom,clk-freq-mhz",
1379 &priv_data->clk_freq_mhz);
1380 if (err) {
1381 LOGDE("DT property: qcom,clk-freq-hz not defined\n");
1382 return err;
1383 }
1384
1385 err = of_property_read_u32(spi->dev.of_node, "qcom,max-can-channels",
1386 &priv_data->max_can_channels);
1387 if (err) {
1388 LOGDE("DT property: qcom,max-can-channels not defined\n");
1389 return err;
1390 }
1391
1392 err = of_property_read_u32(spi->dev.of_node, "qcom,bits-per-word",
1393 &priv_data->bits_per_word);
1394 if (err)
1395 priv_data->bits_per_word = 16;
1396
1397 err = of_property_read_u32(spi->dev.of_node, "qcom,reset-delay-msec",
1398 &priv_data->reset_delay_msec);
1399 if (err)
1400 priv_data->reset_delay_msec = 1;
1401
1402 priv_data->can_fw_cmd_timeout_req =
1403 of_property_read_bool(spi->dev.of_node,
1404 "qcom,can-fw-cmd-timeout-req");
1405
1406 err = of_property_read_u32(spi->dev.of_node,
1407 "qcom,can-fw-cmd-timeout-ms",
1408 &priv_data->can_fw_cmd_timeout_ms);
1409 if (err)
1410 priv_data->can_fw_cmd_timeout_ms = 0;
1411
1412 err = of_property_read_u32(spi->dev.of_node,
1413 "qcom,rem-all-buffering-timeout-ms",
1414 &priv_data->rem_all_buffering_timeout_ms);
1415 if (err)
1416 priv_data->rem_all_buffering_timeout_ms = 0;
1417
1418 priv_data->reset = of_get_named_gpio(spi->dev.of_node,
1419 "qcom,reset-gpio", 0);
1420
1421 if (gpio_is_valid(priv_data->reset)) {
1422 err = gpio_request(priv_data->reset, "qti-can-reset");
1423 if (err < 0) {
1424 LOGDE("failed to request gpio %d: %d\n",
1425 priv_data->reset, err);
1426 return err;
1427 }
1428
1429 gpio_direction_output(priv_data->reset, 0);
1430 /* delay to generate non-zero reset pulse width */
1431 udelay(1);
1432 gpio_direction_output(priv_data->reset, 1);
1433 /* wait for controller to come up after reset */
1434 msleep(priv_data->reset_delay_msec);
1435 } else {
1436 msleep(priv_data->reset_delay_msec);
1437 }
1438
1439 priv_data->support_can_fd = of_property_read_bool(spi->dev.of_node,
1440 "support-can-fd");
1441
1442 if (of_device_is_compatible(spi->dev.of_node, "qcom,nxp,mpc5746c"))
1443 qti_can_bittiming_const = flexcan_bittiming_const;
1444 else if (of_device_is_compatible(spi->dev.of_node,
1445 "qcom,renesas,rh850"))
1446 qti_can_bittiming_const = rh850_bittiming_const;
1447
1448 priv_data->netdev = devm_kcalloc(dev,
1449 priv_data->max_can_channels,
1450 sizeof(priv_data->netdev[0]),
1451 GFP_KERNEL);
1452 if (!priv_data->netdev) {
1453 err = -ENOMEM;
1454 return err;
1455 }
1456
1457 for (i = 0; i < priv_data->max_can_channels; i++) {
1458 err = qti_can_create_netdev(spi, priv_data, i);
1459 if (err) {
1460 LOGDE("Failed to create CAN device: %d", err);
1461 goto cleanup_candev;
1462 }
1463
1464 err = register_candev(priv_data->netdev[i]);
1465 if (err) {
1466 LOGDE("Failed to register CAN device: %d", err);
1467 goto unregister_candev;
1468 }
1469 }
1470
1471 err = request_threaded_irq(spi->irq, NULL, qti_can_irq,
1472 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1473 "qti-can", priv_data);
1474 if (err) {
1475 LOGDE("Failed to request irq: %d", err);
1476 goto unregister_candev;
1477 }
1478 dev_info(dev, "Request irq %d ret %d\n", spi->irq, err);
1479
1480 while ((query_err != 0) && (retry < QTI_CAN_FW_QUERY_RETRY_COUNT)) {
1481 LOGDI("Trying to query fw version %d", retry);
1482 query_err = qti_can_query_firmware_version(priv_data);
1483 priv_data->assembly_buffer_size = 0;
1484 retry++;
1485 }
1486
1487 if (query_err) {
1488 LOGDE("QTI CAN probe failed\n");
1489 err = -ENODEV;
1490 goto free_irq;
1491 }
1492 return 0;
1493
1494free_irq:
1495 free_irq(spi->irq, priv_data);
1496unregister_candev:
1497 for (i = 0; i < priv_data->max_can_channels; i++)
1498 unregister_candev(priv_data->netdev[i]);
1499cleanup_candev:
1500 if (priv_data) {
1501 for (i = 0; i < priv_data->max_can_channels; i++) {
1502 if (priv_data->netdev[i])
1503 free_candev(priv_data->netdev[i]);
1504 }
1505 if (priv_data->tx_wq)
1506 destroy_workqueue(priv_data->tx_wq);
1507 devm_kfree(dev, priv_data->rx_buf);
1508 devm_kfree(dev, priv_data->tx_buf);
1509 devm_kfree(dev, priv_data->assembly_buffer);
1510 devm_kfree(dev, priv_data->netdev);
1511 devm_kfree(dev, priv_data);
1512 }
1513 return err;
1514}
1515
1516static int qti_can_remove(struct spi_device *spi)
1517{
1518 struct qti_can *priv_data = spi_get_drvdata(spi);
1519 int i;
1520
1521 LOGDI("%s\n", __func__);
1522 for (i = 0; i < priv_data->max_can_channels; i++) {
1523 unregister_candev(priv_data->netdev[i]);
1524 free_candev(priv_data->netdev[i]);
1525 }
1526 destroy_workqueue(priv_data->tx_wq);
1527 kfree(priv_data->rx_buf);
1528 kfree(priv_data->tx_buf);
1529 kfree(priv_data->assembly_buffer);
1530 kfree(priv_data->netdev);
1531 kfree(priv_data);
1532 return 0;
1533}
1534
1535#ifdef CONFIG_PM
1536static int qti_can_suspend(struct device *dev)
1537{
1538 struct spi_device *spi = to_spi_device(dev);
1539 struct qti_can *priv_data = NULL;
Sumeet Sahu6fe22ea2019-03-21 17:46:32 +05301540 u8 power_event = CMD_SUSPEND_EVENT;
Sumeet Sahucafbf632019-02-08 19:12:57 +05301541 int ret = 0;
1542
1543 if (spi) {
1544 priv_data = spi_get_drvdata(spi);
1545 enable_irq_wake(spi->irq);
1546 } else {
1547 ret = -1;
1548 }
1549
Sumeet Sahu6fe22ea2019-03-21 17:46:32 +05301550 if (priv_data && !(ret < 0))
1551 ret = qti_can_notify_power_events(priv_data, power_event);
Sumeet Sahucafbf632019-02-08 19:12:57 +05301552
1553 return ret;
1554}
1555
1556static int qti_can_resume(struct device *dev)
1557{
1558 struct spi_device *spi = to_spi_device(dev);
1559 struct qti_can *priv_data = NULL;
1560 int ret = 0;
Sumeet Sahu6fe22ea2019-03-21 17:46:32 +05301561 u8 power_event = CMD_RESUME_EVENT;
Sumeet Sahucafbf632019-02-08 19:12:57 +05301562
1563 if (spi) {
1564 priv_data = spi_get_drvdata(spi);
1565 disable_irq_wake(spi->irq);
1566
1567 if (priv_data)
1568 qti_can_rx_message(priv_data);
1569 else
1570 ret = -1;
1571
1572 } else {
1573 ret = -1;
1574 }
1575
Sumeet Sahu6fe22ea2019-03-21 17:46:32 +05301576 if (priv_data && !(ret < 0))
1577 ret = qti_can_notify_power_events(priv_data, power_event);
Sumeet Sahucafbf632019-02-08 19:12:57 +05301578
1579 return ret;
1580}
1581
1582static const struct dev_pm_ops qti_can_dev_pm_ops = {
1583 .suspend = qti_can_suspend,
1584 .resume = qti_can_resume,
1585};
1586#endif
1587
1588static struct spi_driver qti_can_driver = {
1589 .driver = {
1590 .name = "qti-can",
1591 .of_match_table = qti_can_match_table,
1592 .owner = THIS_MODULE,
1593#ifdef CONFIG_PM
1594 .pm = &qti_can_dev_pm_ops,
1595#endif
Sumeet Sahu81d72e02019-03-08 18:26:48 +05301596 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Sumeet Sahucafbf632019-02-08 19:12:57 +05301597 },
1598 .probe = qti_can_probe,
1599 .remove = qti_can_remove,
1600};
1601module_spi_driver(qti_can_driver);
1602
1603MODULE_DESCRIPTION("QTI CAN controller module");
1604MODULE_LICENSE("GPL v2");