blob: 595f0de992bb6d75c2d4cf33a435e8c897aa564a [file] [log] [blame]
Amir Samuelov2f0b9d52019-01-28 09:58:52 +02001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
4 */
5
6/*
7 * Secure-Processor-Communication (SPCOM).
8 *
9 * This driver provides communication to Secure Processor (SP)
10 * over RPMSG framework.
11 *
12 * It provides interface to userspace spcomlib.
13 *
14 * Userspace application shall use spcomlib for communication with SP. Userspace
15 * application can be either client or server. spcomlib shall use write() file
16 * operation to send data, and read() file operation to read data.
17 *
18 * This driver uses RPMSG with glink-spss as a transport layer.
19 * This driver exposes "/dev/<sp-channel-name>" file node for each rpmsg logical
20 * channel.
21 * This driver exposes "/dev/spcom" file node for some debug/control command.
22 * The predefined channel "/dev/sp_kernel" is used for loading SP application
23 * from HLOS.
24 * This driver exposes "/dev/sp_ssr" file node to allow user space poll for SSR.
25 * After the remote SP App is loaded, this driver exposes a new file node
26 * "/dev/<ch-name>" for the matching HLOS App to use.
27 * The access to predefined file nodes and dynamically allocated file nodes is
28 * restricted by using unix group and SELinux.
29 *
30 * No message routing is used, but using the rpmsg/G-Link "multiplexing" feature
31 * to use a dedicated logical channel for HLOS and SP Application-Pair.
32 *
33 * Each HLOS/SP Application can be either Client or Server or both,
34 * Messaging is allways point-to-point between 2 HLOS<=>SP applications.
35 * Each channel exclusevly used by single Client or Server.
36 *
37 * User Space Request & Response are synchronous.
38 * read() & write() operations are blocking until completed or terminated.
39 */
40#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
41
42#include <linux/kernel.h> /* min() */
43#include <linux/module.h> /* MODULE_LICENSE */
44#include <linux/device.h> /* class_create() */
45#include <linux/slab.h> /* kzalloc() */
46#include <linux/fs.h> /* file_operations */
47#include <linux/cdev.h> /* cdev_add() */
48#include <linux/errno.h> /* EINVAL, ETIMEDOUT */
49#include <linux/printk.h> /* pr_err() */
50#include <linux/bitops.h> /* BIT(x) */
51#include <linux/completion.h> /* wait_for_completion_timeout() */
52#include <linux/poll.h> /* POLLOUT */
53#include <linux/platform_device.h>
54#include <linux/of.h> /* of_property_count_strings() */
55#include <linux/workqueue.h>
56#include <linux/delay.h> /* msleep() */
57#include <linux/dma-buf.h>
58#include <linux/limits.h>
59#include <linux/rpmsg.h>
60#include <linux/atomic.h>
61#include <linux/list.h>
62#include <uapi/linux/spcom.h>
63#include <soc/qcom/subsystem_restart.h>
64
65/**
66 * Request buffer size.
67 * Any large data (multiply of 4KB) is provided by temp buffer in DDR.
68 * Request shall provide the temp buffer physical address (align to 4KB).
69 * Maximum request/response size of 268 is used to accommodate APDU size.
70 * From kernel spcom driver perspective a PAGE_SIZE of 4K
71 * is the actual maximum size for a single read/write file operation.
72 */
73#define SPCOM_MAX_RESPONSE_SIZE 268
74
75/* SPCOM driver name */
76#define DEVICE_NAME "spcom"
77
78/* maximum ION buffers should be >= SPCOM_MAX_CHANNELS */
79#define SPCOM_MAX_ION_BUF_PER_CH (SPCOM_MAX_CHANNELS + 4)
80
81/* maximum ION buffer per send request/response command */
82#define SPCOM_MAX_ION_BUF_PER_CMD SPCOM_MAX_ION_BUF
83
84/* Maximum command size */
85#define SPCOM_MAX_COMMAND_SIZE (PAGE_SIZE)
86
87/* Maximum input size */
88#define SPCOM_MAX_READ_SIZE (PAGE_SIZE)
89
90/* Current Process ID */
91#define current_pid() ((u32)(current->pid))
92
93/*
94 * After both sides get CONNECTED,
95 * there is a race between one side queueing rx buffer and the other side
96 * trying to call glink_tx() , this race is only on the 1st tx.
97 * Do tx retry with some delay to allow the other side to queue rx buffer.
98 */
99#define TX_RETRY_DELAY_MSEC 100
100
101/* SPCOM_MAX_REQUEST_SIZE-or-SPCOM_MAX_RESPONSE_SIZE + header */
102#define SPCOM_RX_BUF_SIZE 300
103
104/*
105 * Initial transaction id, use non-zero nonce for debug.
106 * Incremented by client on request, and copied back by server on response.
107 */
108#define INITIAL_TXN_ID 0x12345678
109
110/**
111 * struct spcom_msg_hdr - Request/Response message header between HLOS and SP.
112 *
113 * This header is proceeding any request specific parameters.
114 * The transaction id is used to match request with response.
115 * Note: rpmsg API provides the rx/tx data size, so user payload size is
116 * calculated by reducing the header size.
117 */
118struct spcom_msg_hdr {
119 uint32_t reserved; /* for future use */
120 uint32_t txn_id; /* transaction id */
121 char buf[0]; /* Variable buffer size, must be last field */
122} __packed;
123
124/**
125 * struct spcom_client - Client handle
126 */
127struct spcom_client {
128 struct spcom_channel *ch;
129};
130
131/**
132 * struct spcom_server - Server handle
133 */
134struct spcom_server {
135 struct spcom_channel *ch;
136};
137
138/**
139 * struct spcom_channel - channel context
140 */
141struct spcom_channel {
142 char name[SPCOM_CHANNEL_NAME_SIZE];
143 struct mutex lock;
144 uint32_t txn_id; /* incrementing nonce per client request */
145 bool is_server; /* for txn_id and response_timeout_msec */
146 bool comm_role_undefined; /* is true on channel creation, before */
147 /* first tx/rx on channel */
148 uint32_t response_timeout_msec; /* for client only */
149
150 /* char dev */
151 struct cdev *cdev;
152 struct device *dev;
153 struct device_attribute attr;
154
155 /* rpmsg */
156 struct rpmsg_driver *rpdrv;
157 struct rpmsg_device *rpdev;
158
159 /* Events notification */
160 struct completion rx_done;
161 struct completion connect;
162
163 /*
164 * Only one client or server per channel.
165 * Only one rx/tx transaction at a time (request + response).
166 */
167 bool is_busy;
168
169 u32 pid; /* debug only to find user space application */
170
171 /* abort flags */
172 bool rpmsg_abort;
173
174 /* rx data info */
175 size_t actual_rx_size; /* actual data size received */
176 void *rpmsg_rx_buf;
177
178 /* shared buffer lock/unlock support */
179 int dmabuf_fd_table[SPCOM_MAX_ION_BUF_PER_CH];
180 struct dma_buf *dmabuf_handle_table[SPCOM_MAX_ION_BUF_PER_CH];
181};
182
183/**
184 * struct rx_buff_list - holds rx rpmsg data, before it will be consumed
185 * by spcom_signal_rx_done worker, item per rx packet
186 */
187struct rx_buff_list {
188 struct list_head list;
189
190 void *rpmsg_rx_buf;
191 int rx_buf_size;
192 struct spcom_channel *ch;
193};
194
195/**
196 * struct spcom_device - device state structure.
197 */
198struct spcom_device {
199 char predefined_ch_name[SPCOM_MAX_CHANNELS][SPCOM_CHANNEL_NAME_SIZE];
200
201 /* char device info */
202 struct cdev cdev;
203 dev_t device_no;
204 struct class *driver_class;
205 struct device *class_dev;
206 struct platform_device *pdev;
207
208 /* rpmsg channels */
209 struct spcom_channel channels[SPCOM_MAX_CHANNELS];
210 atomic_t chdev_count;
211
212 struct completion rpmsg_state_change;
213 atomic_t rpmsg_dev_count;
214
215 /* rx data path */
216 struct list_head rx_list_head;
217 spinlock_t rx_lock;
218};
219
220/* Device Driver State */
221static struct spcom_device *spcom_dev;
222
223/* static functions declaration */
224static int spcom_create_channel_chardev(const char *name);
225static struct spcom_channel *spcom_find_channel_by_name(const char *name);
226static int spcom_register_rpmsg_drv(struct spcom_channel *ch);
227static int spcom_unregister_rpmsg_drv(struct spcom_channel *ch);
228
229/**
230 * spcom_is_channel_open() - channel is open on this side.
231 *
232 * Channel is fully connected, when rpmsg driver is registered and
233 * rpmsg device probed
234 */
235static inline bool spcom_is_channel_open(struct spcom_channel *ch)
236{
237 return ch->rpdrv != NULL;
238}
239
240/**
241 * spcom_is_channel_connected() - channel is fully connected by both sides.
242 */
243static inline bool spcom_is_channel_connected(struct spcom_channel *ch)
244{
245 /* Channel must be open before it gets connected */
246 if (!spcom_is_channel_open(ch))
247 return false;
248
249 return ch->rpdev != NULL;
250}
251
252/**
253 * spcom_create_predefined_channels_chardev() - expose predefined channels to
254 * user space.
255 *
256 * Predefined channels list is provided by device tree. Typically, it is for
257 * known servers on remote side that are not loaded by the HLOS
258 */
259static int spcom_create_predefined_channels_chardev(void)
260{
261 int i;
262 int ret;
263 static bool is_predefined_created;
264
265 if (is_predefined_created)
266 return 0;
267
268 for (i = 0; i < SPCOM_MAX_CHANNELS; i++) {
269 const char *name = spcom_dev->predefined_ch_name[i];
270
271 if (name[0] == 0)
272 break;
273 ret = spcom_create_channel_chardev(name);
274 if (ret) {
275 pr_err("failed to create chardev [%s], ret [%d]\n",
276 name, ret);
277 return -EFAULT;
278 }
279 }
280
281 is_predefined_created = true;
282
283 return 0;
284}
285
286/*======================================================================*/
287/* UTILITIES */
288/*======================================================================*/
289
290/**
291 * spcom_init_channel() - initialize channel state.
292 *
293 * @ch: channel state struct pointer
294 * @name: channel name
295 */
296static int spcom_init_channel(struct spcom_channel *ch, const char *name)
297{
298 if (!ch || !name || !name[0]) {
299 pr_err("invalid parameters\n");
300 return -EINVAL;
301 }
302
303 strlcpy(ch->name, name, SPCOM_CHANNEL_NAME_SIZE);
304
305 init_completion(&ch->rx_done);
306 init_completion(&ch->connect);
307
308 mutex_init(&ch->lock);
309 ch->rpdrv = NULL;
310 ch->rpdev = NULL;
311 ch->actual_rx_size = 0;
312 ch->is_busy = false;
313 ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
314 ch->pid = 0;
315 ch->rpmsg_abort = false;
316 ch->rpmsg_rx_buf = NULL;
317 ch->comm_role_undefined = true;
318
319 return 0;
320}
321
322/**
323 * spcom_find_channel_by_name() - find a channel by name.
324 *
325 * @name: channel name
326 *
327 * Return: a channel state struct.
328 */
329static struct spcom_channel *spcom_find_channel_by_name(const char *name)
330{
331 int i;
332
333 for (i = 0 ; i < ARRAY_SIZE(spcom_dev->channels); i++) {
334 struct spcom_channel *ch = &spcom_dev->channels[i];
335
336 if (strcmp(ch->name, name) == 0)
337 return ch;
338 }
339
340 return NULL;
341}
342
343/**
344 * spcom_rx() - wait for received data until timeout, unless pending rx data is
345 * already ready
346 *
347 * @ch: channel state struct pointer
348 * @buf: buffer pointer
349 * @size: buffer size
350 *
351 * Return: size in bytes on success, negative value on failure.
352 */
353static int spcom_rx(struct spcom_channel *ch,
354 void *buf,
355 uint32_t size,
356 uint32_t timeout_msec)
357{
358 unsigned long jiffies = msecs_to_jiffies(timeout_msec);
359 long timeleft = 1;
360 int ret = 0;
361
362 mutex_lock(&ch->lock);
363
364 /* check for already pending data */
365 if (!ch->actual_rx_size) {
366 reinit_completion(&ch->rx_done);
367
368 mutex_unlock(&ch->lock); /* unlock while waiting */
369 /* wait for rx response */
370 pr_debug("wait for rx done, timeout_msec=%d\n", timeout_msec);
371 if (timeout_msec)
372 timeleft = wait_for_completion_interruptible_timeout(
373 &ch->rx_done, jiffies);
374 else
375 ret = wait_for_completion_interruptible(&ch->rx_done);
376
377 mutex_lock(&ch->lock);
378 if (timeout_msec && timeleft == 0) {
379 ch->txn_id++; /* to drop expired rx packet later */
380 pr_err("rx_done timeout expired %d ms, set txn_id=%d\n",
381 timeout_msec, ch->txn_id);
382 ret = -ETIMEDOUT;
383 goto exit_err;
384 } else if (ch->rpmsg_abort) {
385 pr_warn("rpmsg channel is closing\n");
386 ret = -ERESTART;
387 goto exit_err;
388 } else if (ret < 0 || timeleft == -ERESTARTSYS) {
389 pr_debug("wait interrupted: ret=%d, timeleft=%ld\n",
390 ret, timeleft);
391 if (timeleft == -ERESTARTSYS)
392 ret = -ERESTARTSYS;
393 goto exit_err;
394 } else if (ch->actual_rx_size) {
395 pr_debug("actual_rx_size is [%zu], txn_id %d\n",
396 ch->actual_rx_size, ch->txn_id);
397 } else {
398 pr_err("actual_rx_size is zero\n");
399 ret = -EFAULT;
400 goto exit_err;
401 }
402 } else {
Jordan Crousece20ceb2019-03-13 10:16:31 -0600403 pr_debug("pending data size [%zu], requested size [%u], ch->txn_id %d\n",
Amir Samuelov2f0b9d52019-01-28 09:58:52 +0200404 ch->actual_rx_size, size, ch->txn_id);
405 }
406 if (!ch->rpmsg_rx_buf) {
407 pr_err("invalid rpmsg_rx_buf\n");
408 ret = -ENOMEM;
409 goto exit_err;
410 }
411
412 size = min_t(size_t, ch->actual_rx_size, size);
413 memcpy(buf, ch->rpmsg_rx_buf, size);
414
415 pr_debug("copy size [%d]\n", (int) size);
416
417 memset(ch->rpmsg_rx_buf, 0, ch->actual_rx_size);
418 kfree((void *)ch->rpmsg_rx_buf);
419 ch->rpmsg_rx_buf = NULL;
420 ch->actual_rx_size = 0;
421
422 mutex_unlock(&ch->lock);
423
424 return size;
425exit_err:
426 mutex_unlock(&ch->lock);
427 return ret;
428}
429
430/**
431 * spcom_get_next_request_size() - get request size.
432 * already ready
433 *
434 * @ch: channel state struct pointer
435 *
436 * Server needs the size of the next request to allocate a request buffer.
437 * Initially used intent-request, however this complicated the remote side,
438 * so both sides are not using glink_tx() with INTENT_REQ anymore.
439 *
440 * Return: size in bytes on success, negative value on failure.
441 */
442static int spcom_get_next_request_size(struct spcom_channel *ch)
443{
444 int size = -1;
445 int ret = 0;
446
447 /* NOTE: Remote clients might not be connected yet.*/
448 mutex_lock(&ch->lock);
449 reinit_completion(&ch->rx_done);
450
451 /* check if already got it via callback */
452 if (ch->actual_rx_size) {
453 pr_debug("next-req-size already ready ch [%s] size [%zu]\n",
454 ch->name, ch->actual_rx_size);
455 ret = -EFAULT;
456 goto exit_ready;
457 }
458 mutex_unlock(&ch->lock); /* unlock while waiting */
459
460 pr_debug("Wait for Rx Done, ch [%s]\n", ch->name);
461 ret = wait_for_completion_interruptible(&ch->rx_done);
462 if (ret < 0) {
463 pr_debug("ch [%s]:interrupted wait ret=%d\n",
464 ch->name, ret);
465 goto exit_error;
466 }
467
468 mutex_lock(&ch->lock); /* re-lock after waiting */
469
470 if (ch->actual_rx_size == 0) {
471 pr_err("invalid rx size [%zu] ch [%s]\n",
472 ch->actual_rx_size, ch->name);
473 mutex_unlock(&ch->lock);
474 ret = -EFAULT;
475 goto exit_error;
476 }
477
478exit_ready:
479 /* actual_rx_size not exeeds SPCOM_RX_BUF_SIZE*/
480 size = (int)ch->actual_rx_size;
481 if (size > sizeof(struct spcom_msg_hdr)) {
482 size -= sizeof(struct spcom_msg_hdr);
483 } else {
484 pr_err("rx size [%d] too small\n", size);
485 ret = -EFAULT;
486 mutex_unlock(&ch->lock);
487 goto exit_error;
488 }
489
490 mutex_unlock(&ch->lock);
491 return size;
492
493exit_error:
494 return ret;
495}
496
497/*======================================================================*/
498/* USER SPACE commands handling */
499/*======================================================================*/
500
501/**
502 * spcom_handle_create_channel_command() - Handle Create Channel command from
503 * user space.
504 *
505 * @cmd_buf: command buffer.
506 * @cmd_size: command buffer size.
507 *
508 * Return: 0 on successful operation, negative value otherwise.
509 */
510static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
511{
512 int ret = 0;
513 struct spcom_user_create_channel_command *cmd = cmd_buf;
514 const char *ch_name;
515 const size_t maxlen = sizeof(cmd->ch_name);
516
517 if (cmd_size != sizeof(*cmd)) {
518 pr_err("cmd_size [%d] , expected [%d]\n",
519 (int) cmd_size, (int) sizeof(*cmd));
520 return -EINVAL;
521 }
522
523 ch_name = cmd->ch_name;
524 if (strnlen(cmd->ch_name, maxlen) == maxlen) {
525 pr_err("channel name is not NULL terminated\n");
526 return -EINVAL;
527 }
528
529 pr_debug("ch_name [%s]\n", ch_name);
530
531 ret = spcom_create_channel_chardev(ch_name);
532
533 return ret;
534}
535
536/**
537 * spcom_handle_restart_sp_command() - Handle Restart SP command from
538 * user space.
539 *
540 * Return: 0 on successful operation, negative value otherwise.
541 */
542static int spcom_handle_restart_sp_command(void)
543{
544 void *subsystem_get_retval = NULL;
545
546 pr_debug("restart - PIL FW loading process initiated\n");
547
548 subsystem_get_retval = subsystem_get("spss");
549 if (!subsystem_get_retval) {
550 pr_err("restart - unable to trigger PIL process for FW loading\n");
551 return -EINVAL;
552 }
553
554 pr_debug("restart - PIL FW loading process is complete\n");
555 return 0;
556}
557
558/**
559 * spcom_handle_send_command() - Handle send request/response from user space.
560 *
561 * @buf: command buffer.
562 * @buf_size: command buffer size.
563 *
564 * Return: 0 on successful operation, negative value otherwise.
565 */
566static int spcom_handle_send_command(struct spcom_channel *ch,
567 void *cmd_buf, int size)
568{
569 int ret = 0;
570 struct spcom_send_command *cmd = cmd_buf;
571 uint32_t buf_size;
572 void *buf;
573 struct spcom_msg_hdr *hdr;
574 void *tx_buf;
575 int tx_buf_size;
576 uint32_t timeout_msec;
577 int time_msec = 0;
578
579 pr_debug("send req/resp ch [%s] size [%d]\n", ch->name, size);
580
581 /*
582 * check that cmd buf size is at least struct size,
583 * to allow access to struct fields.
584 */
585 if (size < sizeof(*cmd)) {
586 pr_err("ch [%s] invalid cmd buf\n",
587 ch->name);
588 return -EINVAL;
589 }
590
591 /* Check if remote side connect */
592 if (!spcom_is_channel_connected(ch)) {
593 pr_err("ch [%s] remote side not connect\n", ch->name);
594 return -ENOTCONN;
595 }
596
597 /* parse command buffer */
598 buf = &cmd->buf;
599 buf_size = cmd->buf_size;
600 timeout_msec = cmd->timeout_msec;
601
602 /* Check param validity */
603 if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
604 pr_err("ch [%s] invalid buf size [%d]\n",
605 ch->name, buf_size);
606 return -EINVAL;
607 }
608 if (size != sizeof(*cmd) + buf_size) {
609 pr_err("ch [%s] invalid cmd size [%d]\n",
610 ch->name, size);
611 return -EINVAL;
612 }
613
614 /* Allocate Buffers*/
615 tx_buf_size = sizeof(*hdr) + buf_size;
616 tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
617 if (!tx_buf)
618 return -ENOMEM;
619
620 /* Prepare Tx Buf */
621 hdr = tx_buf;
622
623 mutex_lock(&ch->lock);
624 if (ch->comm_role_undefined) {
625 pr_debug("ch [%s] send first -> it is client\n", ch->name);
626 ch->comm_role_undefined = false;
627 ch->is_server = false;
628 }
629
630 if (!ch->is_server) {
631 ch->txn_id++; /* client sets the request txn_id */
632 ch->response_timeout_msec = timeout_msec;
633 }
634 hdr->txn_id = ch->txn_id;
635
636 /* user buf */
637 memcpy(hdr->buf, buf, buf_size);
638
639 time_msec = 0;
640 do {
641 if (ch->rpmsg_abort) {
642 pr_err("ch [%s] aborted\n", ch->name);
643 ret = -ECANCELED;
644 break;
645 }
646 /* may fail when RX intent not queued by SP */
647 ret = rpmsg_trysend(ch->rpdev->ept, tx_buf, tx_buf_size);
648 if (ret == 0)
649 break;
650 time_msec += TX_RETRY_DELAY_MSEC;
651 mutex_unlock(&ch->lock);
652 msleep(TX_RETRY_DELAY_MSEC);
653 mutex_lock(&ch->lock);
654 } while ((ret == -EBUSY || ret == -EAGAIN) && time_msec < timeout_msec);
655 if (ret)
656 pr_err("ch [%s] rpmsg_trysend() error (%d), timeout_msec=%d\n",
657 ch->name, ret, timeout_msec);
658 mutex_unlock(&ch->lock);
659
660 kfree(tx_buf);
661 return ret;
662}
663
664/**
665 * modify_ion_addr() - replace the ION buffer virtual address with physical
666 * address in a request or response buffer.
667 *
668 * @buf: buffer to modify
669 * @buf_size: buffer size
670 * @ion_info: ION buffer info such as FD and offset in buffer.
671 *
672 * Return: 0 on successful operation, negative value otherwise.
673 */
674static int modify_ion_addr(void *buf,
675 uint32_t buf_size,
676 struct spcom_ion_info ion_info)
677{
678 struct dma_buf *dma_buf;
679 struct dma_buf_attachment *attach;
680 struct sg_table *sg = NULL;
681 dma_addr_t phy_addr = 0;
682 int fd, ret = 0;
683 uint32_t buf_offset;
684 char *ptr = (char *)buf;
685
686 fd = ion_info.fd;
687 buf_offset = ion_info.buf_offset;
688 ptr += buf_offset;
689
690 if (fd < 0) {
691 pr_err("invalid fd [%d]\n", fd);
692 return -ENODEV;
693 }
694
695 if (buf_size < sizeof(uint64_t)) {
696 pr_err("buf size too small [%d]\n", buf_size);
697 return -ENODEV;
698 }
699
700 if (buf_offset % sizeof(uint64_t))
701 pr_debug("offset [%d] is NOT 64-bit aligned\n", buf_offset);
702 else
703 pr_debug("offset [%d] is 64-bit aligned\n", buf_offset);
704
705 if (buf_offset > buf_size - sizeof(uint64_t)) {
706 pr_err("invalid buf_offset [%d]\n", buf_offset);
707 return -ENODEV;
708 }
709
710 dma_buf = dma_buf_get(fd);
711 if (IS_ERR_OR_NULL(dma_buf)) {
712 pr_err("fail to get dma buf handle\n");
713 return -EINVAL;
714 }
715 pr_debug("dma_buf handle ok\n");
716 attach = dma_buf_attach(dma_buf, &spcom_dev->pdev->dev);
717 if (IS_ERR_OR_NULL(attach)) {
718 ret = PTR_ERR(attach);
719 pr_err("fail to attach dma buf %d\n", ret);
720 dma_buf_put(dma_buf);
721 goto mem_map_table_failed;
722 }
723
724 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
725 if (IS_ERR_OR_NULL(sg)) {
726 ret = PTR_ERR(sg);
727 pr_err("fail to get sg table of dma buf %d\n", ret);
728 goto mem_map_table_failed;
729 }
730 if (sg->sgl) {
731 phy_addr = sg->sgl->dma_address;
732 } else {
733 pr_err("sgl is NULL\n");
734 ret = -ENOMEM;
735 goto mem_map_sg_failed;
736 }
737
738 /* Set the physical address at the buffer offset */
739 pr_debug("ion phys addr = [0x%lx]\n", (long) phy_addr);
740 memcpy(ptr, &phy_addr, sizeof(phy_addr));
741
742mem_map_sg_failed:
743 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
744mem_map_table_failed:
745 dma_buf_detach(dma_buf, attach);
746 dma_buf_put(dma_buf);
747
748 return ret;
749}
750
751/**
752 * spcom_handle_send_modified_command() - send a request/response with ION
753 * buffer address. Modify the request/response by replacing the ION buffer
754 * virtual address with the physical address.
755 *
756 * @ch: channel pointer
757 * @cmd_buf: User space command buffer
758 * @size: size of user command buffer
759 *
760 * Return: 0 on successful operation, negative value otherwise.
761 */
762static int spcom_handle_send_modified_command(struct spcom_channel *ch,
763 void *cmd_buf, int size)
764{
765 int ret = 0;
766 struct spcom_user_send_modified_command *cmd = cmd_buf;
767 uint32_t buf_size;
768 void *buf;
769 struct spcom_msg_hdr *hdr;
770 void *tx_buf;
771 int tx_buf_size;
772 struct spcom_ion_info ion_info[SPCOM_MAX_ION_BUF_PER_CMD];
773 int i;
774 uint32_t timeout_msec;
775 int time_msec = 0;
776
777 pr_debug("send req/resp ch [%s] size [%d]\n", ch->name, size);
778
779 /*
780 * check that cmd buf size is at least struct size,
781 * to allow access to struct fields.
782 */
783 if (size < sizeof(*cmd)) {
784 pr_err("ch [%s] invalid cmd buf\n",
785 ch->name);
786 return -EINVAL;
787 }
788
789 /* Check if remote side connect */
790 if (!spcom_is_channel_connected(ch)) {
791 pr_err("ch [%s] remote side not connect\n", ch->name);
792 return -ENOTCONN;
793 }
794
795 /* parse command buffer */
796 buf = &cmd->buf;
797 buf_size = cmd->buf_size;
798 timeout_msec = cmd->timeout_msec;
799 memcpy(ion_info, cmd->ion_info, sizeof(ion_info));
800
801 /* Check param validity */
802 if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
803 pr_err("ch [%s] invalid buf size [%d]\n",
804 ch->name, buf_size);
805 return -EINVAL;
806 }
807 if (size != sizeof(*cmd) + buf_size) {
808 pr_err("ch [%s] invalid cmd size [%d]\n",
809 ch->name, size);
810 return -EINVAL;
811 }
812
813 /* Allocate Buffers*/
814 tx_buf_size = sizeof(*hdr) + buf_size;
815 tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
816 if (!tx_buf)
817 return -ENOMEM;
818
819 /* Prepare Tx Buf */
820 hdr = tx_buf;
821
822 mutex_lock(&ch->lock);
823 if (ch->comm_role_undefined) {
824 pr_debug("ch [%s] send first -> it is client\n", ch->name);
825 ch->comm_role_undefined = false;
826 ch->is_server = false;
827 }
828 if (!ch->is_server) {
829 ch->txn_id++; /* client sets the request txn_id */
830 ch->response_timeout_msec = timeout_msec;
831 }
832 hdr->txn_id = ch->txn_id;
833
834 /* user buf */
835 memcpy(hdr->buf, buf, buf_size);
836
837 for (i = 0 ; i < ARRAY_SIZE(ion_info) ; i++) {
838 if (ion_info[i].fd >= 0) {
839 ret = modify_ion_addr(hdr->buf, buf_size, ion_info[i]);
840 if (ret < 0) {
841 mutex_unlock(&ch->lock);
842 pr_err("modify_ion_addr() error [%d]\n", ret);
843 memset(tx_buf, 0, tx_buf_size);
844 kfree(tx_buf);
845 return -EFAULT;
846 }
847 }
848 }
849
850 time_msec = 0;
851 do {
852 if (ch->rpmsg_abort) {
853 pr_err("ch [%s] aborted\n", ch->name);
854 ret = -ECANCELED;
855 break;
856 }
857 /* may fail when RX intent not queued by SP */
858 ret = rpmsg_trysend(ch->rpdev->ept, tx_buf, tx_buf_size);
859 if (ret == 0)
860 break;
861 time_msec += TX_RETRY_DELAY_MSEC;
862 mutex_unlock(&ch->lock);
863 msleep(TX_RETRY_DELAY_MSEC);
864 mutex_lock(&ch->lock);
865 } while ((ret == -EBUSY || ret == -EAGAIN) && time_msec < timeout_msec);
866 if (ret)
867 pr_err("ch [%s] rpmsg_trysend() error (%d), timeout_msec=%d\n",
868 ch->name, ret, timeout_msec);
869
870 mutex_unlock(&ch->lock);
871 memset(tx_buf, 0, tx_buf_size);
872 kfree(tx_buf);
873 return ret;
874}
875
876
877/**
878 * spcom_handle_lock_ion_buf_command() - Lock an shared buffer.
879 *
880 * Lock an shared buffer, prevent it from being free if the userspace App crash,
881 * while it is used by the remote subsystem.
882 */
883static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
884 void *cmd_buf, int size)
885{
886 struct spcom_user_command *cmd = cmd_buf;
887 int fd;
888 int i;
889 struct dma_buf *dma_buf;
890
891 if (size != sizeof(*cmd)) {
892 pr_err("cmd size [%d] , expected [%d]\n",
893 (int) size, (int) sizeof(*cmd));
894 return -EINVAL;
895 }
896
897 if (cmd->arg > (unsigned int)INT_MAX) {
Jordan Crousece20ceb2019-03-13 10:16:31 -0600898 pr_err("int overflow [%u]\n", cmd->arg);
Amir Samuelov2f0b9d52019-01-28 09:58:52 +0200899 return -EINVAL;
900 }
901 fd = cmd->arg;
902
903 dma_buf = dma_buf_get(fd);
904 if (IS_ERR_OR_NULL(dma_buf)) {
905 pr_err("fail to get dma buf handle\n");
906 return -EINVAL;
907 }
908 pr_debug("dma_buf referenced ok\n");
909
910 /* shared buf lock doesn't involve any rx/tx data to SP. */
911 mutex_lock(&ch->lock);
912
913 /* Check if this shared buffer is already locked */
914 for (i = 0 ; i < ARRAY_SIZE(ch->dmabuf_handle_table) ; i++) {
915 if (ch->dmabuf_handle_table[i] == dma_buf) {
916 pr_debug("fd [%d] shared buf is already locked\n", fd);
917 /* decrement back the ref count */
918 mutex_unlock(&ch->lock);
919 dma_buf_put(dma_buf);
920 return -EINVAL;
921 }
922 }
923
924 /* Store the dma_buf handle */
925 for (i = 0 ; i < ARRAY_SIZE(ch->dmabuf_handle_table) ; i++) {
926 if (ch->dmabuf_handle_table[i] == NULL) {
927 ch->dmabuf_handle_table[i] = dma_buf;
928 ch->dmabuf_fd_table[i] = fd;
Jordan Crousece20ceb2019-03-13 10:16:31 -0600929 pr_debug("ch [%s] locked ion buf #%d fd [%d] dma_buf=0x%pK\n",
Amir Samuelov2f0b9d52019-01-28 09:58:52 +0200930 ch->name, i,
931 ch->dmabuf_fd_table[i],
932 ch->dmabuf_handle_table[i]);
933 mutex_unlock(&ch->lock);
934 return 0;
935 }
936 }
937
938 mutex_unlock(&ch->lock);
939 /* decrement back the ref count */
940 dma_buf_put(dma_buf);
941 pr_err("no free entry to store ion handle of fd [%d]\n", fd);
942
943 return -EFAULT;
944}
945
946/**
947 * spcom_handle_unlock_ion_buf_command() - Unlock an ION buffer.
948 *
949 * Unlock an ION buffer, let it be free, when it is no longer being used by
950 * the remote subsystem.
951 */
952static int spcom_handle_unlock_ion_buf_command(struct spcom_channel *ch,
953 void *cmd_buf, int size)
954{
955 int i;
956 struct spcom_user_command *cmd = cmd_buf;
957 int fd;
958 bool found = false;
959 struct dma_buf *dma_buf;
960
961 if (size != sizeof(*cmd)) {
962 pr_err("cmd size [%d], expected [%d]\n",
963 (int)size, (int)sizeof(*cmd));
964 return -EINVAL;
965 }
966 if (cmd->arg > (unsigned int)INT_MAX) {
Jordan Crousece20ceb2019-03-13 10:16:31 -0600967 pr_err("int overflow [%u]\n", cmd->arg);
Amir Samuelov2f0b9d52019-01-28 09:58:52 +0200968 return -EINVAL;
969 }
970 fd = cmd->arg;
971
972 pr_debug("Unlock ion buf ch [%s] fd [%d]\n", ch->name, fd);
973
974 dma_buf = dma_buf_get(fd);
975 if (IS_ERR_OR_NULL(dma_buf)) {
976 pr_err("fail to get dma buf handle\n");
977 return -EINVAL;
978 }
979 dma_buf_put(dma_buf);
980 pr_debug("dma_buf referenced ok\n");
981
982 /* shared buf unlock doesn't involve any rx/tx data to SP. */
983 mutex_lock(&ch->lock);
984 if (fd == (int) SPCOM_ION_FD_UNLOCK_ALL) {
985 pr_debug("unlocked ALL ion buf ch [%s]\n", ch->name);
986 found = true;
987 /* unlock all buf */
988 for (i = 0; i < ARRAY_SIZE(ch->dmabuf_handle_table); i++) {
989 if (ch->dmabuf_handle_table[i] != NULL) {
990 pr_debug("unlocked ion buf #%d fd [%d]\n",
991 i, ch->dmabuf_fd_table[i]);
992 dma_buf_put(ch->dmabuf_handle_table[i]);
993 ch->dmabuf_handle_table[i] = NULL;
994 ch->dmabuf_fd_table[i] = -1;
995 }
996 }
997 } else {
998 /* unlock specific buf */
999 for (i = 0 ; i < ARRAY_SIZE(ch->dmabuf_handle_table) ; i++) {
1000 if (!ch->dmabuf_handle_table[i])
1001 continue;
1002 if (ch->dmabuf_handle_table[i] == dma_buf) {
Jordan Crousece20ceb2019-03-13 10:16:31 -06001003 pr_debug("ch [%s] unlocked ion buf #%d fd [%d] dma_buf=0x%pK\n",
Amir Samuelov2f0b9d52019-01-28 09:58:52 +02001004 ch->name, i,
1005 ch->dmabuf_fd_table[i],
1006 ch->dmabuf_handle_table[i]);
1007 dma_buf_put(ch->dmabuf_handle_table[i]);
1008 ch->dmabuf_handle_table[i] = NULL;
1009 ch->dmabuf_fd_table[i] = -1;
1010 found = true;
1011 break;
1012 }
1013 }
1014 }
1015 mutex_unlock(&ch->lock);
1016
1017 if (!found) {
1018 pr_err("ch [%s] fd [%d] was not found\n", ch->name, fd);
1019 return -ENODEV;
1020 }
1021
1022 return 0;
1023}
1024
1025/**
1026 * spcom_handle_write() - Handle user space write commands.
1027 *
1028 * @buf: command buffer.
1029 * @buf_size: command buffer size.
1030 *
1031 * Return: 0 on successful operation, negative value otherwise.
1032 */
1033static int spcom_handle_write(struct spcom_channel *ch,
1034 void *buf,
1035 int buf_size)
1036{
1037 int ret = 0;
1038 struct spcom_user_command *cmd = NULL;
1039 int cmd_id = 0;
1040
1041 /* Minimal command should have command-id and argument */
1042 if (buf_size < sizeof(struct spcom_user_command)) {
1043 pr_err("Command buffer size [%d] too small\n", buf_size);
1044 return -EINVAL;
1045 }
1046
1047 cmd = (struct spcom_user_command *)buf;
1048 cmd_id = (int) cmd->cmd_id;
1049
1050 pr_debug("cmd_id [0x%x]\n", cmd_id);
1051
1052 if (!ch && cmd_id != SPCOM_CMD_CREATE_CHANNEL
1053 && cmd_id != SPCOM_CMD_RESTART_SP) {
1054 pr_err("channel context is null\n");
1055 return -EINVAL;
1056 }
1057
1058 switch (cmd_id) {
1059 case SPCOM_CMD_SEND:
1060 ret = spcom_handle_send_command(ch, buf, buf_size);
1061 break;
1062 case SPCOM_CMD_SEND_MODIFIED:
1063 ret = spcom_handle_send_modified_command(ch, buf, buf_size);
1064 break;
1065 case SPCOM_CMD_LOCK_ION_BUF:
1066 ret = spcom_handle_lock_ion_buf_command(ch, buf, buf_size);
1067 break;
1068 case SPCOM_CMD_UNLOCK_ION_BUF:
1069 ret = spcom_handle_unlock_ion_buf_command(ch, buf, buf_size);
1070 break;
1071 case SPCOM_CMD_CREATE_CHANNEL:
1072 ret = spcom_handle_create_channel_command(buf, buf_size);
1073 break;
1074 case SPCOM_CMD_RESTART_SP:
1075 ret = spcom_handle_restart_sp_command();
1076 break;
1077 default:
1078 pr_err("Invalid Command Id [0x%x]\n", (int) cmd->cmd_id);
1079 ret = -EINVAL;
1080 }
1081
1082 return ret;
1083}
1084
1085/**
1086 * spcom_handle_get_req_size() - Handle user space get request size command
1087 *
1088 * @ch: channel handle
1089 * @buf: command buffer.
1090 * @size: command buffer size.
1091 *
1092 * Return: size in bytes on success, negative value on failure.
1093 */
1094static int spcom_handle_get_req_size(struct spcom_channel *ch,
1095 void *buf,
1096 uint32_t size)
1097{
1098 int ret = -1;
1099 uint32_t next_req_size = 0;
1100
1101 if (size < sizeof(next_req_size)) {
1102 pr_err("buf size [%d] too small\n", (int) size);
1103 return -EINVAL;
1104 }
1105
1106 ret = spcom_get_next_request_size(ch);
1107 if (ret < 0)
1108 return ret;
1109 next_req_size = (uint32_t) ret;
1110
1111 memcpy(buf, &next_req_size, sizeof(next_req_size));
1112 pr_debug("next_req_size [%d]\n", next_req_size);
1113
1114 return sizeof(next_req_size); /* can't exceed user buffer size */
1115}
1116
1117/**
1118 * spcom_handle_read_req_resp() - Handle user space get request/response command
1119 *
1120 * @ch: channel handle
1121 * @buf: command buffer.
1122 * @size: command buffer size.
1123 *
1124 * Return: size in bytes on success, negative value on failure.
1125 */
1126static int spcom_handle_read_req_resp(struct spcom_channel *ch,
1127 void *buf,
1128 uint32_t size)
1129{
1130 int ret;
1131 struct spcom_msg_hdr *hdr;
1132 void *rx_buf;
1133 int rx_buf_size;
1134 uint32_t timeout_msec = 0; /* client only */
1135
1136 /* Check if remote side connect */
1137 if (!spcom_is_channel_connected(ch)) {
1138 pr_err("ch [%s] remote side not connect\n", ch->name);
1139 return -ENOTCONN;
1140 }
1141
1142 /* Check param validity */
1143 if (size > SPCOM_MAX_RESPONSE_SIZE) {
1144 pr_err("ch [%s] invalid size [%d]\n",
1145 ch->name, size);
1146 return -EINVAL;
1147 }
1148
1149 /* Allocate Buffers*/
1150 rx_buf_size = sizeof(*hdr) + size;
1151 rx_buf = kzalloc(rx_buf_size, GFP_KERNEL);
1152 if (!rx_buf)
1153 return -ENOMEM;
1154
1155 /*
1156 * client response timeout depends on the request
1157 * handling time on the remote side .
1158 */
1159 if (!ch->is_server) {
1160 timeout_msec = ch->response_timeout_msec;
1161 pr_debug("response_timeout_msec = %d\n", (int) timeout_msec);
1162 }
1163
1164 ret = spcom_rx(ch, rx_buf, rx_buf_size, timeout_msec);
1165 if (ret < 0) {
1166 pr_err("rx error %d\n", ret);
1167 goto exit_err;
1168 } else {
1169 size = ret; /* actual_rx_size */
1170 }
1171
1172 hdr = rx_buf;
1173
1174 if (ch->is_server) {
1175 ch->txn_id = hdr->txn_id;
1176 pr_debug("request txn_id [0x%x]\n", ch->txn_id);
1177 }
1178
1179 /* copy data to user without the header */
1180 if (size > sizeof(*hdr)) {
1181 size -= sizeof(*hdr);
1182 memcpy(buf, hdr->buf, size);
1183 } else {
1184 pr_err("rx size [%d] too small\n", size);
1185 ret = -EFAULT;
1186 goto exit_err;
1187 }
1188
1189 kfree(rx_buf);
1190 return size;
1191exit_err:
1192 kfree(rx_buf);
1193 return ret;
1194}
1195
1196/**
1197 * spcom_handle_read() - Handle user space read request/response or
1198 * request-size command
1199 *
1200 * @ch: channel handle
1201 * @buf: command buffer.
1202 * @size: command buffer size.
1203 *
1204 * A special size SPCOM_GET_NEXT_REQUEST_SIZE, which is bigger than the max
1205 * response/request tells the kernel that user space only need the size.
1206 *
1207 * Return: size in bytes on success, negative value on failure.
1208 */
1209static int spcom_handle_read(struct spcom_channel *ch,
1210 void *buf,
1211 uint32_t size)
1212{
1213 int ret = -1;
1214
1215 if (size == SPCOM_GET_NEXT_REQUEST_SIZE) {
1216 pr_debug("get next request size, ch [%s]\n", ch->name);
1217 ch->is_server = true;
1218 ret = spcom_handle_get_req_size(ch, buf, size);
1219 } else {
1220 pr_debug("get request/response, ch [%s]\n", ch->name);
1221 ret = spcom_handle_read_req_resp(ch, buf, size);
1222 }
1223
1224 pr_debug("ch [%s] , size = %d\n", ch->name, size);
1225
1226 return ret;
1227}
1228
1229/*======================================================================*/
1230/* CHAR DEVICE USER SPACE INTERFACE */
1231/*======================================================================*/
1232
1233/**
1234 * file_to_filename() - get the filename from file pointer.
1235 *
1236 * @filp: file pointer
1237 *
1238 * it is used for debug prints.
1239 *
1240 * Return: filename string or "unknown".
1241 */
1242static char *file_to_filename(struct file *filp)
1243{
1244 struct dentry *dentry = NULL;
1245 char *filename = NULL;
1246
1247 if (!filp || !filp->f_path.dentry)
1248 return "unknown";
1249
1250 dentry = filp->f_path.dentry;
1251 filename = dentry->d_iname;
1252
1253 return filename;
1254}
1255
1256/**
1257 * spcom_device_open() - handle channel file open() from user space.
1258 *
1259 * @filp: file pointer
1260 *
1261 * The file name (without path) is the channel name.
1262 * Register rpmsg driver matching with channel name.
1263 * Store the channel context in the file private date pointer for future
1264 * read/write/close operations.
1265 */
1266static int spcom_device_open(struct inode *inode, struct file *filp)
1267{
1268 struct spcom_channel *ch;
1269 int ret;
1270 const char *name = file_to_filename(filp);
1271 u32 pid = current_pid();
1272
1273 pr_debug("open file [%s]\n", name);
1274
1275 if (strcmp(name, "unknown") == 0) {
1276 pr_err("name is unknown\n");
1277 return -EINVAL;
1278 }
1279
1280 if (strcmp(name, DEVICE_NAME) == 0) {
1281 pr_debug("root dir skipped\n");
1282 return 0;
1283 }
1284
1285 if (strcmp(name, "sp_ssr") == 0) {
1286 pr_debug("sp_ssr dev node skipped\n");
1287 return 0;
1288 }
1289
1290 ch = spcom_find_channel_by_name(name);
1291 if (!ch) {
1292 pr_err("channel %s doesn't exist, load App first\n", name);
1293 return -ENODEV;
1294 }
1295
1296 mutex_lock(&ch->lock);
1297 if (!spcom_is_channel_open(ch)) {
1298 reinit_completion(&ch->connect);
1299 /* channel was closed need to register drv again */
1300 ret = spcom_register_rpmsg_drv(ch);
1301 if (ret < 0) {
1302 pr_err("register rpmsg driver failed %d\n", ret);
1303 mutex_unlock(&ch->lock);
1304 return ret;
1305 }
1306 }
1307 /* only one client/server may use the channel */
1308 if (ch->is_busy) {
1309 pr_err("channel [%s] is BUSY, already in use by pid [%d]\n",
1310 name, ch->pid);
1311 mutex_unlock(&ch->lock);
1312 return -EBUSY;
1313 }
1314
1315 ch->is_busy = true;
1316 ch->pid = pid;
1317 mutex_unlock(&ch->lock);
1318
1319 filp->private_data = ch;
1320 return 0;
1321}
1322
1323/**
1324 * spcom_device_release() - handle channel file close() from user space.
1325 *
1326 * @filp: file pointer
1327 *
1328 * The file name (without path) is the channel name.
1329 * Open the relevant glink channel.
1330 * Store the channel context in the file private
1331 * date pointer for future read/write/close
1332 * operations.
1333 */
1334static int spcom_device_release(struct inode *inode, struct file *filp)
1335{
1336 struct spcom_channel *ch;
1337 const char *name = file_to_filename(filp);
1338 int ret = 0;
1339
1340 if (strcmp(name, "unknown") == 0) {
1341 pr_err("name is unknown\n");
1342 return -EINVAL;
1343 }
1344
1345 if (strcmp(name, DEVICE_NAME) == 0) {
1346 pr_debug("root dir skipped\n");
1347 return 0;
1348 }
1349
1350 if (strcmp(name, "sp_ssr") == 0) {
1351 pr_debug("sp_ssr dev node skipped\n");
1352 return 0;
1353 }
1354
1355 ch = filp->private_data;
1356 if (!ch) {
1357 pr_debug("ch is NULL, file name %s\n", file_to_filename(filp));
1358 return -ENODEV;
1359 }
1360
1361 mutex_lock(&ch->lock);
1362 /* channel might be already closed or disconnected */
1363 if (!spcom_is_channel_open(ch)) {
1364 pr_debug("ch [%s] already closed\n", name);
1365 mutex_unlock(&ch->lock);
1366 return 0;
1367 }
1368
1369 ch->is_busy = false;
1370 ch->pid = 0;
1371 if (ch->rpmsg_rx_buf) {
Jordan Crousece20ceb2019-03-13 10:16:31 -06001372 pr_debug("ch [%s] discarting unconsumed rx packet actual_rx_size=%zd\n",
Amir Samuelov2f0b9d52019-01-28 09:58:52 +02001373 name, ch->actual_rx_size);
1374 kfree(ch->rpmsg_rx_buf);
1375 ch->rpmsg_rx_buf = NULL;
1376 }
1377 ch->actual_rx_size = 0;
1378 mutex_unlock(&ch->lock);
1379 filp->private_data = NULL;
1380
1381 return ret;
1382}
1383
1384/**
1385 * spcom_device_write() - handle channel file write() from user space.
1386 *
1387 * @filp: file pointer
1388 *
1389 * Return: On Success - same size as number of bytes to write.
1390 * On Failure - negative value.
1391 */
1392static ssize_t spcom_device_write(struct file *filp,
1393 const char __user *user_buff,
1394 size_t size, loff_t *f_pos)
1395{
1396 int ret;
1397 char *buf;
1398 struct spcom_channel *ch;
1399 const char *name = file_to_filename(filp);
1400 int buf_size = 0;
1401
1402 if (!user_buff || !f_pos || !filp) {
1403 pr_err("invalid null parameters\n");
1404 return -EINVAL;
1405 }
1406
1407 if (*f_pos != 0) {
1408 pr_err("offset should be zero, no sparse buffer\n");
1409 return -EINVAL;
1410 }
1411
1412 if (!name) {
1413 pr_err("name is NULL\n");
1414 return -EINVAL;
1415 }
1416 pr_debug("write file [%s] size [%d] pos [%d]\n",
1417 name, (int) size, (int) *f_pos);
1418
1419 if (strcmp(name, "unknown") == 0) {
1420 pr_err("name is unknown\n");
1421 return -EINVAL;
1422 }
1423
1424 ch = filp->private_data;
1425 if (!ch) {
1426 if (strcmp(name, DEVICE_NAME) != 0) {
1427 pr_err("invalid ch pointer, command not allowed\n");
1428 return -EINVAL;
1429 }
1430 pr_debug("control device - no channel context\n");
1431 } else {
1432 /* Check if remote side connect */
1433 if (!spcom_is_channel_connected(ch)) {
1434 pr_err("ch [%s] remote side not connect\n", ch->name);
1435 return -ENOTCONN;
1436 }
1437 }
1438
1439 if (size > SPCOM_MAX_COMMAND_SIZE) {
1440 pr_err("size [%d] > max size [%d]\n",
1441 (int) size, (int) SPCOM_MAX_COMMAND_SIZE);
1442 return -EINVAL;
1443 }
1444 buf_size = size; /* explicit casting size_t to int */
1445 buf = kzalloc(size, GFP_KERNEL);
1446 if (buf == NULL)
1447 return -ENOMEM;
1448
1449 ret = copy_from_user(buf, user_buff, size);
1450 if (ret) {
1451 pr_err("Unable to copy from user (err %d)\n", ret);
1452 kfree(buf);
1453 return -EFAULT;
1454 }
1455
1456 ret = spcom_handle_write(ch, buf, buf_size);
1457 if (ret) {
1458 pr_err("handle command error [%d]\n", ret);
1459 kfree(buf);
1460 return ret;
1461 }
1462
1463 kfree(buf);
1464
1465 return size;
1466}
1467
1468/**
1469 * spcom_device_read() - handle channel file read() from user space.
1470 *
1471 * @filp: file pointer
1472 *
1473 * Return: number of bytes to read on success, negative value on
1474 * failure.
1475 */
1476static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
1477 size_t size, loff_t *f_pos)
1478{
1479 int ret = 0;
1480 int actual_size = 0;
1481 char *buf;
1482 struct spcom_channel *ch;
1483 const char *name = file_to_filename(filp);
1484 uint32_t buf_size = 0;
1485
1486 pr_debug("read file [%s], size = %d bytes\n", name, (int) size);
1487
1488 if (strcmp(name, "unknown") == 0) {
1489 pr_err("name is unknown\n");
1490 return -EINVAL;
1491 }
1492
1493 if (!user_buff || !f_pos ||
1494 (size == 0) || (size > SPCOM_MAX_READ_SIZE)) {
1495 pr_err("invalid parameters\n");
1496 return -EINVAL;
1497 }
1498 buf_size = size; /* explicit casting size_t to uint32_t */
1499
1500 ch = filp->private_data;
1501
1502 if (ch == NULL) {
1503 pr_err("invalid ch pointer, file [%s]\n", name);
1504 return -EINVAL;
1505 }
1506
1507 if (!spcom_is_channel_open(ch)) {
1508 pr_err("ch is not open, file [%s]\n", name);
1509 return -EINVAL;
1510 }
1511
1512 buf = kzalloc(size, GFP_KERNEL);
1513 if (buf == NULL)
1514 return -ENOMEM;
1515
1516 ret = spcom_handle_read(ch, buf, buf_size);
1517 if (ret < 0) {
1518 if (ret != -ERESTARTSYS)
1519 pr_err("read error [%d]\n", ret);
1520 kfree(buf);
1521 return ret;
1522 }
1523 actual_size = ret;
1524 if ((actual_size == 0) || (actual_size > size)) {
1525 pr_err("invalid actual_size [%d]\n", actual_size);
1526 kfree(buf);
1527 return -EFAULT;
1528 }
1529
1530 ret = copy_to_user(user_buff, buf, actual_size);
1531 if (ret) {
1532 pr_err("Unable to copy to user, err = %d\n", ret);
1533 kfree(buf);
1534 return -EFAULT;
1535 }
1536
1537 kfree(buf);
1538 pr_debug("ch [%s] ret [%d]\n", name, (int) actual_size);
1539
1540 return actual_size;
1541}
1542
1543/**
1544 * spcom_device_poll() - handle channel file poll() from user space.
1545 *
1546 * @filp: file pointer
1547 *
1548 * This allows user space to wait/check for channel connection,
1549 * or wait for SSR event.
1550 *
1551 * Return: event bitmask on success, set POLLERR on failure.
1552 */
1553static unsigned int spcom_device_poll(struct file *filp,
1554 struct poll_table_struct *poll_table)
1555{
1556 /*
1557 * when user call with timeout -1 for blocking mode,
1558 * any bit must be set in response
1559 */
1560 unsigned int ret = SPCOM_POLL_READY_FLAG;
1561 unsigned long mask;
1562 struct spcom_channel *ch;
1563 const char *name = file_to_filename(filp);
1564 bool wait = false;
1565 bool done = false;
1566 /* Event types always implicitly polled for */
1567 unsigned long reserved = POLLERR | POLLHUP | POLLNVAL;
1568 int ready = 0;
1569
1570 if (strcmp(name, "unknown") == 0) {
1571 pr_err("name is unknown\n");
1572 return -EINVAL;
1573 }
1574
1575 if (!poll_table) {
1576 pr_err("invalid parameters\n");
1577 return -EINVAL;
1578 }
1579
1580 ch = filp->private_data;
1581 mask = poll_requested_events(poll_table);
1582
1583 pr_debug("== ch [%s] mask [0x%x] ==\n", name, (int) mask);
1584
1585 /* user space API has poll use "short" and not "long" */
1586 mask &= 0x0000FFFF;
1587
1588 wait = mask & SPCOM_POLL_WAIT_FLAG;
1589 if (wait)
1590 pr_debug("ch [%s] wait for event flag is ON\n", name);
1591
1592 // mask will be used in output, clean input bits
1593 mask &= (unsigned long)~SPCOM_POLL_WAIT_FLAG;
1594 mask &= (unsigned long)~SPCOM_POLL_READY_FLAG;
1595 mask &= (unsigned long)~reserved;
1596
1597 switch (mask) {
1598 case SPCOM_POLL_LINK_STATE:
1599 pr_debug("ch [%s] SPCOM_POLL_LINK_STATE\n", name);
1600 if (wait) {
1601 reinit_completion(&spcom_dev->rpmsg_state_change);
1602 ready = wait_for_completion_interruptible(
1603 &spcom_dev->rpmsg_state_change);
1604 pr_debug("ch [%s] poll LINK_STATE signaled\n", name);
1605 }
1606 done = atomic_read(&spcom_dev->rpmsg_dev_count) > 0;
1607 break;
1608 case SPCOM_POLL_CH_CONNECT:
1609 /*
1610 * ch is not expected to be NULL since user must call open()
1611 * to get FD before it can call poll().
1612 * open() will fail if no ch related to the char-device.
1613 */
1614 if (ch == NULL) {
1615 pr_err("invalid ch pointer, file [%s]\n", name);
1616 return POLLERR;
1617 }
1618 pr_debug("ch [%s] SPCOM_POLL_CH_CONNECT\n", name);
1619 if (wait) {
1620 reinit_completion(&ch->connect);
1621 ready = wait_for_completion_interruptible(&ch->connect);
1622 pr_debug("ch [%s] poll CH_CONNECT signaled\n", name);
1623 }
1624 mutex_lock(&ch->lock);
Konstantin Dorfmana9f299b2019-02-13 15:05:19 +02001625 done = (ch->rpdev != NULL);
1626 pr_debug("ch [%s] reported done=%d\n", name, done);
Amir Samuelov2f0b9d52019-01-28 09:58:52 +02001627 mutex_unlock(&ch->lock);
1628 break;
1629 default:
1630 pr_err("ch [%s] poll, invalid mask [0x%x]\n",
1631 name, (int) mask);
1632 ret = POLLERR;
1633 break;
1634 }
1635
1636 if (ready < 0) { /* wait was interrupted */
1637 pr_debug("ch [%s] poll interrupted, ret [%d]\n", name, ready);
1638 ret = POLLERR | SPCOM_POLL_READY_FLAG | mask;
1639 }
1640 if (done)
1641 ret |= mask;
1642
1643 pr_debug("ch [%s] poll, mask = 0x%x, ret=0x%x\n",
1644 name, (int) mask, ret);
1645
1646 return ret;
1647}
1648
1649/* file operation supported from user space */
1650static const struct file_operations fops = {
1651 .read = spcom_device_read,
1652 .poll = spcom_device_poll,
1653 .write = spcom_device_write,
1654 .open = spcom_device_open,
1655 .release = spcom_device_release,
1656};
1657
1658/**
1659 * spcom_create_channel_chardev() - Create a channel char-dev node file
1660 * for user space interface
1661 */
1662static int spcom_create_channel_chardev(const char *name)
1663{
1664 int ret;
1665 struct device *dev;
1666 struct spcom_channel *ch;
1667 dev_t devt;
1668 struct class *cls = spcom_dev->driver_class;
1669 struct device *parent = spcom_dev->class_dev;
1670 void *priv;
1671 struct cdev *cdev;
1672
1673 pr_debug("Add channel [%s]\n", name);
1674
1675 ch = spcom_find_channel_by_name(name);
1676 if (ch) {
1677 pr_err("channel [%s] already exist\n", name);
1678 return -EINVAL;
1679 }
1680
1681 ch = spcom_find_channel_by_name(""); /* find reserved channel */
1682 if (!ch) {
1683 pr_err("no free channel\n");
1684 return -ENODEV;
1685 }
1686
1687 ret = spcom_init_channel(ch, name);
1688 if (ret < 0) {
1689 pr_err("can't init channel %d\n", ret);
1690 return ret;
1691 }
1692
1693 ret = spcom_register_rpmsg_drv(ch);
1694 if (ret < 0) {
1695 pr_err("register rpmsg driver failed %d\n", ret);
1696 goto exit_destroy_channel;
1697 }
1698
1699 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
1700 if (!cdev) {
1701 ret = -ENOMEM;
1702 goto exit_unregister_drv;
1703 }
1704
1705 devt = spcom_dev->device_no + atomic_read(&spcom_dev->chdev_count);
1706 priv = ch;
1707 dev = device_create(cls, parent, devt, priv, name);
1708 if (IS_ERR(dev)) {
1709 pr_err("device_create failed\n");
1710 ret = -ENODEV;
1711 goto exit_free_cdev;
1712 }
1713
1714 cdev_init(cdev, &fops);
1715 cdev->owner = THIS_MODULE;
1716
1717 ret = cdev_add(cdev, devt, 1);
1718 if (ret < 0) {
1719 pr_err("cdev_add failed %d\n", ret);
1720 ret = -ENODEV;
1721 goto exit_destroy_device;
1722 }
1723 atomic_inc(&spcom_dev->chdev_count);
1724 mutex_lock(&ch->lock);
1725 ch->cdev = cdev;
1726 ch->dev = dev;
1727 mutex_unlock(&ch->lock);
1728
1729 return 0;
1730
1731exit_destroy_device:
1732 device_destroy(spcom_dev->driver_class, devt);
1733exit_free_cdev:
1734 kfree(cdev);
1735exit_unregister_drv:
1736 ret = spcom_unregister_rpmsg_drv(ch);
1737 if (ret != 0)
1738 pr_err("can't unregister rpmsg drv %d\n", ret);
1739exit_destroy_channel:
1740 // empty channel leaves free slot for next time
1741 mutex_lock(&ch->lock);
1742 memset(ch->name, 0, SPCOM_CHANNEL_NAME_SIZE);
1743 mutex_unlock(&ch->lock);
1744 return -EFAULT;
1745}
1746
1747static int spcom_register_chardev(void)
1748{
1749 int ret;
1750 unsigned int baseminor = 0;
1751 unsigned int count = 1;
1752 void *priv = spcom_dev;
1753
1754 ret = alloc_chrdev_region(&spcom_dev->device_no, baseminor, count,
1755 DEVICE_NAME);
1756 if (ret < 0) {
1757 pr_err("alloc_chrdev_region failed %d\n", ret);
1758 return ret;
1759 }
1760
1761 spcom_dev->driver_class = class_create(THIS_MODULE, DEVICE_NAME);
1762 if (IS_ERR(spcom_dev->driver_class)) {
1763 ret = -ENOMEM;
1764 pr_err("class_create failed %d\n", ret);
1765 goto exit_unreg_chrdev_region;
1766 }
1767
1768 spcom_dev->class_dev = device_create(spcom_dev->driver_class, NULL,
1769 spcom_dev->device_no, priv,
1770 DEVICE_NAME);
1771
1772 if (IS_ERR(spcom_dev->class_dev)) {
1773 pr_err("class_device_create failed %d\n", ret);
1774 ret = -ENOMEM;
1775 goto exit_destroy_class;
1776 }
1777
1778 cdev_init(&spcom_dev->cdev, &fops);
1779 spcom_dev->cdev.owner = THIS_MODULE;
1780
1781 ret = cdev_add(&spcom_dev->cdev,
1782 MKDEV(MAJOR(spcom_dev->device_no), 0),
1783 SPCOM_MAX_CHANNELS);
1784 if (ret < 0) {
1785 pr_err("cdev_add failed %d\n", ret);
1786 goto exit_destroy_device;
1787 }
1788
1789 pr_debug("char device created\n");
1790
1791 return 0;
1792
1793exit_destroy_device:
1794 device_destroy(spcom_dev->driver_class, spcom_dev->device_no);
1795exit_destroy_class:
1796 class_destroy(spcom_dev->driver_class);
1797exit_unreg_chrdev_region:
1798 unregister_chrdev_region(spcom_dev->device_no, 1);
1799 return ret;
1800}
1801
1802static void spcom_unregister_chrdev(void)
1803{
1804 cdev_del(&spcom_dev->cdev);
1805 device_destroy(spcom_dev->driver_class, spcom_dev->device_no);
1806 class_destroy(spcom_dev->driver_class);
1807 unregister_chrdev_region(spcom_dev->device_no,
1808 atomic_read(&spcom_dev->chdev_count));
1809
1810}
1811
1812static int spcom_parse_dt(struct device_node *np)
1813{
1814 int ret;
1815 const char *propname = "qcom,spcom-ch-names";
1816 int num_ch;
1817 int i;
1818 const char *name;
1819
1820 num_ch = of_property_count_strings(np, propname);
1821 if (num_ch < 0) {
1822 pr_err("wrong format of predefined channels definition [%d]\n",
1823 num_ch);
1824 return num_ch;
1825 }
1826 if (num_ch > ARRAY_SIZE(spcom_dev->predefined_ch_name)) {
1827 pr_err("too many predefined channels [%d]\n", num_ch);
1828 return -EINVAL;
1829 }
1830
1831 pr_debug("num of predefined channels [%d]\n", num_ch);
1832 for (i = 0; i < num_ch; i++) {
1833 ret = of_property_read_string_index(np, propname, i, &name);
1834 if (ret) {
1835 pr_err("failed to read DT channel [%d] name\n", i);
1836 return -EFAULT;
1837 }
1838 strlcpy(spcom_dev->predefined_ch_name[i],
1839 name,
1840 sizeof(spcom_dev->predefined_ch_name[i]));
1841
1842 pr_debug("found ch [%s]\n", name);
1843 }
1844
1845 return num_ch;
1846}
1847
1848/*
1849 * the function is running on system workqueue context,
1850 * processes delayed (by rpmsg rx callback) packets:
1851 * each paket belong to its destination spcom channel ch
1852 */
1853static void spcom_signal_rx_done(struct work_struct *ignored)
1854{
1855 struct spcom_channel *ch;
1856 struct rx_buff_list *rx_item;
1857 struct spcom_msg_hdr *hdr;
1858 unsigned long flags;
1859
1860 spin_lock_irqsave(&spcom_dev->rx_lock, flags);
1861 while (!list_empty(&spcom_dev->rx_list_head)) {
1862 /* detach last entry */
1863 rx_item = list_last_entry(&spcom_dev->rx_list_head,
1864 struct rx_buff_list, list);
1865 list_del(&rx_item->list);
1866 spin_unlock_irqrestore(&spcom_dev->rx_lock, flags);
1867
1868 if (!rx_item) {
1869 pr_err("empty entry in pending rx list\n");
1870 spin_lock_irqsave(&spcom_dev->rx_lock, flags);
1871 continue;
1872 }
1873 ch = rx_item->ch;
1874 hdr = (struct spcom_msg_hdr *)rx_item->rpmsg_rx_buf;
1875 mutex_lock(&ch->lock);
1876
1877 if (ch->comm_role_undefined) {
1878 ch->comm_role_undefined = false;
1879 ch->is_server = true;
1880 ch->txn_id = hdr->txn_id;
1881 pr_debug("ch [%s] first packet txn_id=%d, it is server\n",
1882 ch->name, ch->txn_id);
1883 }
1884
1885 if (ch->rpmsg_abort) {
1886 if (ch->rpmsg_rx_buf) {
Jordan Crousece20ceb2019-03-13 10:16:31 -06001887 pr_debug("ch [%s] rx aborted free %zd bytes\n",
Amir Samuelov2f0b9d52019-01-28 09:58:52 +02001888 ch->name, ch->actual_rx_size);
1889 kfree(ch->rpmsg_rx_buf);
1890 ch->actual_rx_size = 0;
1891 }
1892 goto rx_aborted;
1893 }
1894 if (ch->rpmsg_rx_buf) {
Jordan Crousece20ceb2019-03-13 10:16:31 -06001895 pr_err("ch [%s] previous buffer not consumed %zd bytes\n",
Amir Samuelov2f0b9d52019-01-28 09:58:52 +02001896 ch->name, ch->actual_rx_size);
1897 kfree(ch->rpmsg_rx_buf);
1898 ch->rpmsg_rx_buf = NULL;
1899 ch->actual_rx_size = 0;
1900 }
1901 if (!ch->is_server && (hdr->txn_id != ch->txn_id)) {
1902 pr_err("ch [%s] rx dropped txn_id %d, ch->txn_id %d\n",
1903 ch->name, hdr->txn_id, ch->txn_id);
1904 goto rx_aborted;
1905 }
1906 ch->rpmsg_rx_buf = rx_item->rpmsg_rx_buf;
1907 ch->actual_rx_size = rx_item->rx_buf_size;
1908 complete_all(&ch->rx_done);
1909 mutex_unlock(&ch->lock);
1910
1911 kfree(rx_item);
1912
1913 /* lock for the next list entry */
1914 spin_lock_irqsave(&spcom_dev->rx_lock, flags);
1915 }
1916 spin_unlock_irqrestore(&spcom_dev->rx_lock, flags);
1917 return;
1918rx_aborted:
1919 mutex_unlock(&ch->lock);
1920 kfree(rx_item->rpmsg_rx_buf);
1921 kfree(rx_item);
1922}
1923
1924static int spcom_rpdev_cb(struct rpmsg_device *rpdev,
1925 void *data, int len, void *priv, u32 src)
1926{
1927 struct spcom_channel *ch;
1928 static DECLARE_WORK(rpmsg_rx_consumer, spcom_signal_rx_done);
1929 struct rx_buff_list *rx_item;
1930 unsigned long flags;
1931
1932 if (!rpdev || !data) {
1933 pr_err("rpdev or data is NULL\n");
1934 return -EINVAL;
1935 }
1936 pr_debug("incoming msg from %s\n", rpdev->id.name);
1937 ch = dev_get_drvdata(&rpdev->dev);
1938 if (!ch) {
1939 pr_err("%s: invalid ch\n", __func__);
1940 return -EINVAL;
1941 }
1942 if (len > SPCOM_RX_BUF_SIZE || len <= 0) {
1943 pr_err("got msg size %d, max allowed %d\n",
1944 len, SPCOM_RX_BUF_SIZE);
1945 return -EINVAL;
1946 }
1947
1948 rx_item = kzalloc(sizeof(*rx_item), GFP_ATOMIC);
1949 if (!rx_item)
1950 return -ENOMEM;
1951
1952 rx_item->rpmsg_rx_buf = kmemdup(data, len, GFP_ATOMIC);
1953 if (!rx_item->rpmsg_rx_buf)
1954 return -ENOMEM;
1955
1956 rx_item->rx_buf_size = len;
1957 rx_item->ch = ch;
1958
1959 spin_lock_irqsave(&spcom_dev->rx_lock, flags);
1960 list_add(&rx_item->list, &spcom_dev->rx_list_head);
1961 spin_unlock_irqrestore(&spcom_dev->rx_lock, flags);
1962 pr_debug("signaling rx item for %s, received %d bytes\n",
1963 rpdev->id.name, len);
1964
1965 schedule_work(&rpmsg_rx_consumer);
1966 return 0;
1967}
1968
1969static int spcom_rpdev_probe(struct rpmsg_device *rpdev)
1970{
1971 const char *name;
1972 struct spcom_channel *ch;
1973
1974 if (!rpdev) {
1975 pr_err("rpdev is NULL\n");
1976 return -EINVAL;
1977 }
1978 name = rpdev->id.name;
1979 pr_debug("new channel %s rpmsg_device arrived\n", name);
1980 ch = spcom_find_channel_by_name(name);
1981 if (!ch) {
1982 pr_err("channel %s not found\n", name);
1983 return -ENODEV;
1984 }
1985 mutex_lock(&ch->lock);
1986 ch->rpdev = rpdev;
1987 ch->rpmsg_abort = false;
1988 ch->txn_id = INITIAL_TXN_ID;
1989 complete_all(&ch->connect);
1990 mutex_unlock(&ch->lock);
1991
1992 dev_set_drvdata(&rpdev->dev, ch);
1993
1994 /* used to evaluate underlying transport link up/down */
1995 atomic_inc(&spcom_dev->rpmsg_dev_count);
1996 if (atomic_read(&spcom_dev->rpmsg_dev_count) == 1)
1997 complete_all(&spcom_dev->rpmsg_state_change);
1998
1999 return 0;
2000}
2001
2002static void spcom_rpdev_remove(struct rpmsg_device *rpdev)
2003{
2004 struct spcom_channel *ch;
2005 int i;
2006
2007 if (!rpdev) {
2008 pr_err("rpdev is NULL\n");
2009 return;
2010 }
2011
2012 dev_info(&rpdev->dev, "rpmsg device %s removed\n", rpdev->id.name);
2013 ch = dev_get_drvdata(&rpdev->dev);
2014 if (!ch) {
2015 pr_err("channel %s not found\n", rpdev->id.name);
2016 return;
2017 }
2018
2019 mutex_lock(&ch->lock);
2020 // unlock all ion buffers of sp_kernel channel
2021 if (strcmp(ch->name, "sp_kernel") == 0) {
2022 for (i = 0; i < ARRAY_SIZE(ch->dmabuf_handle_table); i++) {
2023 if (ch->dmabuf_handle_table[i] != NULL) {
2024 pr_debug("unlocked ion buf #%d fd [%d]\n",
2025 i, ch->dmabuf_fd_table[i]);
2026 dma_buf_put(ch->dmabuf_handle_table[i]);
2027 ch->dmabuf_handle_table[i] = NULL;
2028 ch->dmabuf_fd_table[i] = -1;
2029 }
2030 }
2031 }
2032
2033 ch->rpdev = NULL;
2034 ch->rpmsg_abort = true;
2035 ch->txn_id = 0;
2036 complete_all(&ch->rx_done);
2037 mutex_unlock(&ch->lock);
2038
2039 /* used to evaluate underlying transport link up/down */
2040 if (atomic_dec_and_test(&spcom_dev->rpmsg_dev_count))
2041 complete_all(&spcom_dev->rpmsg_state_change);
2042
2043}
2044
2045/* register rpmsg driver to match with channel ch_name */
2046static int spcom_register_rpmsg_drv(struct spcom_channel *ch)
2047{
2048 struct rpmsg_driver *rpdrv;
2049 struct rpmsg_device_id *match;
2050 char *drv_name;
2051 int ret;
2052
2053 if (ch->rpdrv) {
2054 pr_err("ch:%s, rpmsg driver %s already registered\n", ch->name,
2055 ch->rpdrv->id_table->name);
2056 return -ENODEV;
2057 }
2058
2059 rpdrv = kzalloc(sizeof(*rpdrv), GFP_KERNEL);
2060 if (!rpdrv)
2061 return -ENOMEM;
2062
2063 /* zalloc array of two to NULL terminate the match list */
2064 match = kzalloc(2 * sizeof(*match), GFP_KERNEL);
2065 if (!match) {
2066 kfree(rpdrv);
2067 return -ENOMEM;
2068 }
2069 snprintf(match->name, RPMSG_NAME_SIZE, "%s", ch->name);
2070
2071 drv_name = kasprintf(GFP_KERNEL, "%s_%s", "spcom_rpmsg_drv", ch->name);
2072 if (!drv_name) {
2073 pr_err("can't allocate drv_name for %s\n", ch->name);
2074 kfree(rpdrv);
2075 kfree(match);
2076 return -ENOMEM;
2077 }
2078
2079 rpdrv->probe = spcom_rpdev_probe;
2080 rpdrv->remove = spcom_rpdev_remove;
2081 rpdrv->callback = spcom_rpdev_cb;
2082 rpdrv->id_table = match;
2083 rpdrv->drv.name = drv_name;
2084 ret = register_rpmsg_driver(rpdrv);
2085 if (ret) {
2086 pr_err("can't register rpmsg_driver for %s\n", ch->name);
2087 kfree(rpdrv);
2088 kfree(match);
2089 kfree(drv_name);
2090 return ret;
2091 }
2092 mutex_lock(&ch->lock);
2093 ch->rpdrv = rpdrv;
2094 ch->rpmsg_abort = false;
2095 mutex_unlock(&ch->lock);
2096
2097 return 0;
2098}
2099
2100static int spcom_unregister_rpmsg_drv(struct spcom_channel *ch)
2101{
2102 if (!ch->rpdrv)
2103 return -ENODEV;
2104 unregister_rpmsg_driver(ch->rpdrv);
2105
2106 mutex_lock(&ch->lock);
2107 kfree(ch->rpdrv->drv.name);
2108 kfree((void *)ch->rpdrv->id_table);
2109 kfree(ch->rpdrv);
2110 ch->rpdrv = NULL;
2111 ch->rpmsg_abort = true; /* will unblock spcom_rx() */
2112 mutex_unlock(&ch->lock);
2113 return 0;
2114}
2115
2116static int spcom_probe(struct platform_device *pdev)
2117{
2118 int ret;
2119 struct spcom_device *dev = NULL;
2120 struct device_node *np;
2121
2122 if (!pdev) {
2123 pr_err("invalid pdev\n");
2124 return -ENODEV;
2125 }
2126
2127 np = pdev->dev.of_node;
2128 if (!np) {
2129 pr_err("invalid DT node\n");
2130 return -EINVAL;
2131 }
2132
2133 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2134 if (dev == NULL)
2135 return -ENOMEM;
2136
2137 spcom_dev = dev;
2138 spcom_dev->pdev = pdev;
2139 /* start counting exposed channel char devices from 1 */
2140 atomic_set(&spcom_dev->chdev_count, 1);
2141 init_completion(&spcom_dev->rpmsg_state_change);
2142 atomic_set(&spcom_dev->rpmsg_dev_count, 0);
2143
2144 INIT_LIST_HEAD(&spcom_dev->rx_list_head);
2145 spin_lock_init(&spcom_dev->rx_lock);
2146
2147 ret = spcom_register_chardev();
2148 if (ret) {
2149 pr_err("create character device failed\n");
2150 goto fail_while_chardev_reg;
2151 }
2152
2153 ret = spcom_parse_dt(np);
2154 if (ret < 0)
2155 goto fail_reg_chardev;
2156
2157 ret = spcom_create_predefined_channels_chardev();
2158 if (ret < 0) {
2159 pr_err("create character device failed\n");
2160 goto fail_reg_chardev;
2161 }
2162 pr_debug("Driver Initialization ok\n");
2163 return 0;
2164
2165fail_reg_chardev:
2166 pr_err("failed to init driver\n");
2167 spcom_unregister_chrdev();
2168fail_while_chardev_reg:
2169 kfree(dev);
2170 spcom_dev = NULL;
2171
2172 return -ENODEV;
2173}
2174
2175static const struct of_device_id spcom_match_table[] = {
2176 { .compatible = "qcom,spcom", },
2177 { },
2178};
2179
2180static struct platform_driver spcom_driver = {
2181 .probe = spcom_probe,
2182 .driver = {
2183 .name = DEVICE_NAME,
2184 .of_match_table = of_match_ptr(spcom_match_table),
2185 },
2186};
2187
2188static int __init spcom_init(void)
2189{
2190 int ret;
2191
2192 ret = platform_driver_register(&spcom_driver);
2193 if (ret)
2194 pr_err("spcom_driver register failed %d\n", ret);
2195
2196 return ret;
2197}
2198module_init(spcom_init);
2199
2200MODULE_LICENSE("GPL v2");
2201MODULE_DESCRIPTION("Secure Processor Communication");