blob: 327adcf117c18efbd9e454c001a6578d30c2088e [file] [log] [blame]
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001/*
2 * Copyright (c) 2015, Sony Mobile Communications AB.
3 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/mfd/syscon.h>
18#include <linux/module.h>
19#include <linux/of_irq.h>
20#include <linux/of_platform.h>
21#include <linux/platform_device.h>
22#include <linux/regmap.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/soc/qcom/smd.h>
26#include <linux/soc/qcom/smem.h>
27#include <linux/wait.h>
28
29/*
30 * The Qualcomm Shared Memory communication solution provides point-to-point
31 * channels for clients to send and receive streaming or packet based data.
32 *
33 * Each channel consists of a control item (channel info) and a ring buffer
34 * pair. The channel info carry information related to channel state, flow
35 * control and the offsets within the ring buffer.
36 *
37 * All allocated channels are listed in an allocation table, identifying the
38 * pair of items by name, type and remote processor.
39 *
40 * Upon creating a new channel the remote processor allocates channel info and
41 * ring buffer items from the smem heap and populate the allocation table. An
42 * interrupt is sent to the other end of the channel and a scan for new
43 * channels should be done. A channel never goes away, it will only change
44 * state.
45 *
46 * The remote processor signals it intent for bring up the communication
47 * channel by setting the state of its end of the channel to "opening" and
48 * sends out an interrupt. We detect this change and register a smd device to
49 * consume the channel. Upon finding a consumer we finish the handshake and the
50 * channel is up.
51 *
52 * Upon closing a channel, the remote processor will update the state of its
53 * end of the channel and signal us, we will then unregister any attached
54 * device and close our end of the channel.
55 *
56 * Devices attached to a channel can use the qcom_smd_send function to push
57 * data to the channel, this is done by copying the data into the tx ring
58 * buffer, updating the pointers in the channel info and signaling the remote
59 * processor.
60 *
61 * The remote processor does the equivalent when it transfer data and upon
62 * receiving the interrupt we check the channel info for new data and delivers
63 * this to the attached device. If the device is not ready to receive the data
64 * we leave it in the ring buffer for now.
65 */
66
67struct smd_channel_info;
68struct smd_channel_info_word;
69
70#define SMD_ALLOC_TBL_COUNT 2
71#define SMD_ALLOC_TBL_SIZE 64
72
73/*
74 * This lists the various smem heap items relevant for the allocation table and
75 * smd channel entries.
76 */
77static const struct {
78 unsigned alloc_tbl_id;
79 unsigned info_base_id;
80 unsigned fifo_base_id;
81} smem_items[SMD_ALLOC_TBL_COUNT] = {
82 {
83 .alloc_tbl_id = 13,
84 .info_base_id = 14,
85 .fifo_base_id = 338
86 },
87 {
88 .alloc_tbl_id = 14,
89 .info_base_id = 266,
90 .fifo_base_id = 202,
91 },
92};
93
94/**
95 * struct qcom_smd_edge - representing a remote processor
96 * @smd: handle to qcom_smd
97 * @of_node: of_node handle for information related to this edge
98 * @edge_id: identifier of this edge
99 * @irq: interrupt for signals on this edge
100 * @ipc_regmap: regmap handle holding the outgoing ipc register
101 * @ipc_offset: offset within @ipc_regmap of the register for ipc
102 * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
103 * @channels: list of all channels detected on this edge
104 * @channels_lock: guard for modifications of @channels
105 * @allocated: array of bitmaps representing already allocated channels
106 * @need_rescan: flag that the @work needs to scan smem for new channels
107 * @smem_available: last available amount of smem triggering a channel scan
108 * @work: work item for edge house keeping
109 */
110struct qcom_smd_edge {
111 struct qcom_smd *smd;
112 struct device_node *of_node;
113 unsigned edge_id;
114
115 int irq;
116
117 struct regmap *ipc_regmap;
118 int ipc_offset;
119 int ipc_bit;
120
121 struct list_head channels;
122 spinlock_t channels_lock;
123
124 DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
125
126 bool need_rescan;
127 unsigned smem_available;
128
129 struct work_struct work;
130};
131
132/*
133 * SMD channel states.
134 */
135enum smd_channel_state {
136 SMD_CHANNEL_CLOSED,
137 SMD_CHANNEL_OPENING,
138 SMD_CHANNEL_OPENED,
139 SMD_CHANNEL_FLUSHING,
140 SMD_CHANNEL_CLOSING,
141 SMD_CHANNEL_RESET,
142 SMD_CHANNEL_RESET_OPENING
143};
144
145/**
146 * struct qcom_smd_channel - smd channel struct
147 * @edge: qcom_smd_edge this channel is living on
148 * @qsdev: reference to a associated smd client device
149 * @name: name of the channel
150 * @state: local state of the channel
151 * @remote_state: remote state of the channel
152 * @tx_info: byte aligned outgoing channel info
153 * @rx_info: byte aligned incoming channel info
154 * @tx_info_word: word aligned outgoing channel info
155 * @rx_info_word: word aligned incoming channel info
156 * @tx_lock: lock to make writes to the channel mutually exclusive
157 * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
158 * @tx_fifo: pointer to the outgoing ring buffer
159 * @rx_fifo: pointer to the incoming ring buffer
160 * @fifo_size: size of each ring buffer
161 * @bounce_buffer: bounce buffer for reading wrapped packets
162 * @cb: callback function registered for this channel
163 * @recv_lock: guard for rx info modifications and cb pointer
164 * @pkt_size: size of the currently handled packet
165 * @list: lite entry for @channels in qcom_smd_edge
166 */
167struct qcom_smd_channel {
168 struct qcom_smd_edge *edge;
169
170 struct qcom_smd_device *qsdev;
171
172 char *name;
173 enum smd_channel_state state;
174 enum smd_channel_state remote_state;
175
176 struct smd_channel_info *tx_info;
177 struct smd_channel_info *rx_info;
178
179 struct smd_channel_info_word *tx_info_word;
180 struct smd_channel_info_word *rx_info_word;
181
182 struct mutex tx_lock;
183 wait_queue_head_t fblockread_event;
184
185 void *tx_fifo;
186 void *rx_fifo;
187 int fifo_size;
188
189 void *bounce_buffer;
190 int (*cb)(struct qcom_smd_device *, const void *, size_t);
191
192 spinlock_t recv_lock;
193
194 int pkt_size;
195
196 struct list_head list;
197};
198
199/**
200 * struct qcom_smd - smd struct
201 * @dev: device struct
202 * @num_edges: number of entries in @edges
203 * @edges: array of edges to be handled
204 */
205struct qcom_smd {
206 struct device *dev;
207
208 unsigned num_edges;
209 struct qcom_smd_edge edges[0];
210};
211
212/*
213 * Format of the smd_info smem items, for byte aligned channels.
214 */
215struct smd_channel_info {
216 u32 state;
217 u8 fDSR;
218 u8 fCTS;
219 u8 fCD;
220 u8 fRI;
221 u8 fHEAD;
222 u8 fTAIL;
223 u8 fSTATE;
224 u8 fBLOCKREADINTR;
225 u32 tail;
226 u32 head;
227};
228
229/*
230 * Format of the smd_info smem items, for word aligned channels.
231 */
232struct smd_channel_info_word {
233 u32 state;
234 u32 fDSR;
235 u32 fCTS;
236 u32 fCD;
237 u32 fRI;
238 u32 fHEAD;
239 u32 fTAIL;
240 u32 fSTATE;
241 u32 fBLOCKREADINTR;
242 u32 tail;
243 u32 head;
244};
245
246#define GET_RX_CHANNEL_INFO(channel, param) \
247 (channel->rx_info_word ? \
248 channel->rx_info_word->param : \
249 channel->rx_info->param)
250
251#define SET_RX_CHANNEL_INFO(channel, param, value) \
252 (channel->rx_info_word ? \
253 (channel->rx_info_word->param = value) : \
254 (channel->rx_info->param = value))
255
256#define GET_TX_CHANNEL_INFO(channel, param) \
257 (channel->tx_info_word ? \
258 channel->tx_info_word->param : \
259 channel->tx_info->param)
260
261#define SET_TX_CHANNEL_INFO(channel, param, value) \
262 (channel->tx_info_word ? \
263 (channel->tx_info_word->param = value) : \
264 (channel->tx_info->param = value))
265
266/**
267 * struct qcom_smd_alloc_entry - channel allocation entry
268 * @name: channel name
269 * @cid: channel index
270 * @flags: channel flags and edge id
271 * @ref_count: reference count of the channel
272 */
273struct qcom_smd_alloc_entry {
274 u8 name[20];
275 u32 cid;
276 u32 flags;
277 u32 ref_count;
278} __packed;
279
280#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
281#define SMD_CHANNEL_FLAGS_STREAM BIT(8)
282#define SMD_CHANNEL_FLAGS_PACKET BIT(9)
283
284/*
285 * Each smd packet contains a 20 byte header, with the first 4 being the length
286 * of the packet.
287 */
288#define SMD_PACKET_HEADER_LEN 20
289
290/*
291 * Signal the remote processor associated with 'channel'.
292 */
293static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
294{
295 struct qcom_smd_edge *edge = channel->edge;
296
297 regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
298}
299
300/*
301 * Initialize the tx channel info
302 */
303static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
304{
305 SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
306 SET_TX_CHANNEL_INFO(channel, fDSR, 0);
307 SET_TX_CHANNEL_INFO(channel, fCTS, 0);
308 SET_TX_CHANNEL_INFO(channel, fCD, 0);
309 SET_TX_CHANNEL_INFO(channel, fRI, 0);
310 SET_TX_CHANNEL_INFO(channel, fHEAD, 0);
311 SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
312 SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
313 SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
314 SET_TX_CHANNEL_INFO(channel, head, 0);
315 SET_TX_CHANNEL_INFO(channel, tail, 0);
316
317 qcom_smd_signal_channel(channel);
318
319 channel->state = SMD_CHANNEL_CLOSED;
320 channel->pkt_size = 0;
321}
322
323/*
324 * Calculate the amount of data available in the rx fifo
325 */
326static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
327{
328 unsigned head;
329 unsigned tail;
330
331 head = GET_RX_CHANNEL_INFO(channel, head);
332 tail = GET_RX_CHANNEL_INFO(channel, tail);
333
334 return (head - tail) & (channel->fifo_size - 1);
335}
336
337/*
338 * Set tx channel state and inform the remote processor
339 */
340static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
341 int state)
342{
343 struct qcom_smd_edge *edge = channel->edge;
344 bool is_open = state == SMD_CHANNEL_OPENED;
345
346 if (channel->state == state)
347 return;
348
349 dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
350
351 SET_TX_CHANNEL_INFO(channel, fDSR, is_open);
352 SET_TX_CHANNEL_INFO(channel, fCTS, is_open);
353 SET_TX_CHANNEL_INFO(channel, fCD, is_open);
354
355 SET_TX_CHANNEL_INFO(channel, state, state);
356 SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
357
358 channel->state = state;
359 qcom_smd_signal_channel(channel);
360}
361
362/*
363 * Copy count bytes of data using 32bit accesses, if that's required.
364 */
365static void smd_copy_to_fifo(void __iomem *_dst,
366 const void *_src,
367 size_t count,
368 bool word_aligned)
369{
370 u32 *dst = (u32 *)_dst;
371 u32 *src = (u32 *)_src;
372
373 if (word_aligned) {
374 count /= sizeof(u32);
375 while (count--)
376 writel_relaxed(*src++, dst++);
377 } else {
378 memcpy_toio(_dst, _src, count);
379 }
380}
381
382/*
383 * Copy count bytes of data using 32bit accesses, if that is required.
384 */
385static void smd_copy_from_fifo(void *_dst,
386 const void __iomem *_src,
387 size_t count,
388 bool word_aligned)
389{
390 u32 *dst = (u32 *)_dst;
391 u32 *src = (u32 *)_src;
392
393 if (word_aligned) {
394 count /= sizeof(u32);
395 while (count--)
396 *dst++ = readl_relaxed(src++);
397 } else {
398 memcpy_fromio(_dst, _src, count);
399 }
400}
401
402/*
403 * Read count bytes of data from the rx fifo into buf, but don't advance the
404 * tail.
405 */
406static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
407 void *buf, size_t count)
408{
409 bool word_aligned;
410 unsigned tail;
411 size_t len;
412
413 word_aligned = channel->rx_info_word != NULL;
414 tail = GET_RX_CHANNEL_INFO(channel, tail);
415
416 len = min_t(size_t, count, channel->fifo_size - tail);
417 if (len) {
418 smd_copy_from_fifo(buf,
419 channel->rx_fifo + tail,
420 len,
421 word_aligned);
422 }
423
424 if (len != count) {
425 smd_copy_from_fifo(buf + len,
426 channel->rx_fifo,
427 count - len,
428 word_aligned);
429 }
430
431 return count;
432}
433
434/*
435 * Advance the rx tail by count bytes.
436 */
437static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
438 size_t count)
439{
440 unsigned tail;
441
442 tail = GET_RX_CHANNEL_INFO(channel, tail);
443 tail += count;
444 tail &= (channel->fifo_size - 1);
445 SET_RX_CHANNEL_INFO(channel, tail, tail);
446}
447
448/*
449 * Read out a single packet from the rx fifo and deliver it to the device
450 */
451static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
452{
453 struct qcom_smd_device *qsdev = channel->qsdev;
454 unsigned tail;
455 size_t len;
456 void *ptr;
457 int ret;
458
459 if (!channel->cb)
460 return 0;
461
462 tail = GET_RX_CHANNEL_INFO(channel, tail);
463
464 /* Use bounce buffer if the data wraps */
465 if (tail + channel->pkt_size >= channel->fifo_size) {
466 ptr = channel->bounce_buffer;
467 len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
468 } else {
469 ptr = channel->rx_fifo + tail;
470 len = channel->pkt_size;
471 }
472
473 ret = channel->cb(qsdev, ptr, len);
474 if (ret < 0)
475 return ret;
476
477 /* Only forward the tail if the client consumed the data */
478 qcom_smd_channel_advance(channel, len);
479
480 channel->pkt_size = 0;
481
482 return 0;
483}
484
485/*
486 * Per channel interrupt handling
487 */
488static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
489{
490 bool need_state_scan = false;
491 int remote_state;
492 u32 pktlen;
493 int avail;
494 int ret;
495
496 /* Handle state changes */
497 remote_state = GET_RX_CHANNEL_INFO(channel, state);
498 if (remote_state != channel->remote_state) {
499 channel->remote_state = remote_state;
500 need_state_scan = true;
501 }
502 /* Indicate that we have seen any state change */
503 SET_RX_CHANNEL_INFO(channel, fSTATE, 0);
504
505 /* Signal waiting qcom_smd_send() about the interrupt */
506 if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR))
507 wake_up_interruptible(&channel->fblockread_event);
508
509 /* Don't consume any data until we've opened the channel */
510 if (channel->state != SMD_CHANNEL_OPENED)
511 goto out;
512
513 /* Indicate that we've seen the new data */
514 SET_RX_CHANNEL_INFO(channel, fHEAD, 0);
515
516 /* Consume data */
517 for (;;) {
518 avail = qcom_smd_channel_get_rx_avail(channel);
519
520 if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
521 qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
522 qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
523 channel->pkt_size = pktlen;
524 } else if (channel->pkt_size && avail >= channel->pkt_size) {
525 ret = qcom_smd_channel_recv_single(channel);
526 if (ret)
527 break;
528 } else {
529 break;
530 }
531 }
532
533 /* Indicate that we have seen and updated tail */
534 SET_RX_CHANNEL_INFO(channel, fTAIL, 1);
535
536 /* Signal the remote that we've consumed the data (if requested) */
537 if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) {
538 /* Ensure ordering of channel info updates */
539 wmb();
540
541 qcom_smd_signal_channel(channel);
542 }
543
544out:
545 return need_state_scan;
546}
547
548/*
549 * The edge interrupts are triggered by the remote processor on state changes,
550 * channel info updates or when new channels are created.
551 */
552static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
553{
554 struct qcom_smd_edge *edge = data;
555 struct qcom_smd_channel *channel;
556 unsigned available;
557 bool kick_worker = false;
558
559 /*
560 * Handle state changes or data on each of the channels on this edge
561 */
562 spin_lock(&edge->channels_lock);
563 list_for_each_entry(channel, &edge->channels, list) {
564 spin_lock(&channel->recv_lock);
565 kick_worker |= qcom_smd_channel_intr(channel);
566 spin_unlock(&channel->recv_lock);
567 }
568 spin_unlock(&edge->channels_lock);
569
570 /*
571 * Creating a new channel requires allocating an smem entry, so we only
572 * have to scan if the amount of available space in smem have changed
573 * since last scan.
574 */
575 available = qcom_smem_get_free_space(edge->edge_id);
576 if (available != edge->smem_available) {
577 edge->smem_available = available;
578 edge->need_rescan = true;
579 kick_worker = true;
580 }
581
582 if (kick_worker)
583 schedule_work(&edge->work);
584
585 return IRQ_HANDLED;
586}
587
588/*
589 * Delivers any outstanding packets in the rx fifo, can be used after probe of
590 * the clients to deliver any packets that wasn't delivered before the client
591 * was setup.
592 */
593static void qcom_smd_channel_resume(struct qcom_smd_channel *channel)
594{
595 unsigned long flags;
596
597 spin_lock_irqsave(&channel->recv_lock, flags);
598 qcom_smd_channel_intr(channel);
599 spin_unlock_irqrestore(&channel->recv_lock, flags);
600}
601
602/*
603 * Calculate how much space is available in the tx fifo.
604 */
605static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
606{
607 unsigned head;
608 unsigned tail;
609 unsigned mask = channel->fifo_size - 1;
610
611 head = GET_TX_CHANNEL_INFO(channel, head);
612 tail = GET_TX_CHANNEL_INFO(channel, tail);
613
614 return mask - ((head - tail) & mask);
615}
616
617/*
618 * Write count bytes of data into channel, possibly wrapping in the ring buffer
619 */
620static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
621 const void *data,
622 size_t count)
623{
624 bool word_aligned;
625 unsigned head;
626 size_t len;
627
628 word_aligned = channel->tx_info_word != NULL;
629 head = GET_TX_CHANNEL_INFO(channel, head);
630
631 len = min_t(size_t, count, channel->fifo_size - head);
632 if (len) {
633 smd_copy_to_fifo(channel->tx_fifo + head,
634 data,
635 len,
636 word_aligned);
637 }
638
639 if (len != count) {
640 smd_copy_to_fifo(channel->tx_fifo,
641 data + len,
642 count - len,
643 word_aligned);
644 }
645
646 head += count;
647 head &= (channel->fifo_size - 1);
648 SET_TX_CHANNEL_INFO(channel, head, head);
649
650 return count;
651}
652
653/**
654 * qcom_smd_send - write data to smd channel
655 * @channel: channel handle
656 * @data: buffer of data to write
657 * @len: number of bytes to write
658 *
659 * This is a blocking write of len bytes into the channel's tx ring buffer and
660 * signal the remote end. It will sleep until there is enough space available
661 * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
662 * polling.
663 */
664int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
665{
666 u32 hdr[5] = {len,};
667 int tlen = sizeof(hdr) + len;
668 int ret;
669
670 /* Word aligned channels only accept word size aligned data */
671 if (channel->rx_info_word != NULL && len % 4)
672 return -EINVAL;
673
674 ret = mutex_lock_interruptible(&channel->tx_lock);
675 if (ret)
676 return ret;
677
678 while (qcom_smd_get_tx_avail(channel) < tlen) {
679 if (channel->state != SMD_CHANNEL_OPENED) {
680 ret = -EPIPE;
681 goto out;
682 }
683
684 SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
685
686 ret = wait_event_interruptible(channel->fblockread_event,
687 qcom_smd_get_tx_avail(channel) >= tlen ||
688 channel->state != SMD_CHANNEL_OPENED);
689 if (ret)
690 goto out;
691
692 SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
693 }
694
695 SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
696
697 qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
698 qcom_smd_write_fifo(channel, data, len);
699
700 SET_TX_CHANNEL_INFO(channel, fHEAD, 1);
701
702 /* Ensure ordering of channel info updates */
703 wmb();
704
705 qcom_smd_signal_channel(channel);
706
707out:
708 mutex_unlock(&channel->tx_lock);
709
710 return ret;
711}
712EXPORT_SYMBOL(qcom_smd_send);
713
714static struct qcom_smd_device *to_smd_device(struct device *dev)
715{
716 return container_of(dev, struct qcom_smd_device, dev);
717}
718
719static struct qcom_smd_driver *to_smd_driver(struct device *dev)
720{
721 struct qcom_smd_device *qsdev = to_smd_device(dev);
722
723 return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
724}
725
726static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
727{
728 return of_driver_match_device(dev, drv);
729}
730
731/*
732 * Probe the smd client.
733 *
734 * The remote side have indicated that it want the channel to be opened, so
735 * complete the state handshake and probe our client driver.
736 */
737static int qcom_smd_dev_probe(struct device *dev)
738{
739 struct qcom_smd_device *qsdev = to_smd_device(dev);
740 struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
741 struct qcom_smd_channel *channel = qsdev->channel;
742 size_t bb_size;
743 int ret;
744
745 /*
746 * Packets are maximum 4k, but reduce if the fifo is smaller
747 */
748 bb_size = min(channel->fifo_size, SZ_4K);
749 channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
750 if (!channel->bounce_buffer)
751 return -ENOMEM;
752
753 channel->cb = qsdrv->callback;
754
755 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
756
757 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
758
759 ret = qsdrv->probe(qsdev);
760 if (ret)
761 goto err;
762
763 qcom_smd_channel_resume(channel);
764
765 return 0;
766
767err:
768 dev_err(&qsdev->dev, "probe failed\n");
769
770 channel->cb = NULL;
771 kfree(channel->bounce_buffer);
772 channel->bounce_buffer = NULL;
773
774 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
775 return ret;
776}
777
778/*
779 * Remove the smd client.
780 *
781 * The channel is going away, for some reason, so remove the smd client and
782 * reset the channel state.
783 */
784static int qcom_smd_dev_remove(struct device *dev)
785{
786 struct qcom_smd_device *qsdev = to_smd_device(dev);
787 struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
788 struct qcom_smd_channel *channel = qsdev->channel;
789 unsigned long flags;
790
791 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
792
793 /*
794 * Make sure we don't race with the code receiving data.
795 */
796 spin_lock_irqsave(&channel->recv_lock, flags);
797 channel->cb = NULL;
798 spin_unlock_irqrestore(&channel->recv_lock, flags);
799
800 /* Wake up any sleepers in qcom_smd_send() */
801 wake_up_interruptible(&channel->fblockread_event);
802
803 /*
804 * We expect that the client might block in remove() waiting for any
805 * outstanding calls to qcom_smd_send() to wake up and finish.
806 */
807 if (qsdrv->remove)
808 qsdrv->remove(qsdev);
809
810 /*
811 * The client is now gone, cleanup and reset the channel state.
812 */
813 channel->qsdev = NULL;
814 kfree(channel->bounce_buffer);
815 channel->bounce_buffer = NULL;
816
817 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
818
819 qcom_smd_channel_reset(channel);
820
821 return 0;
822}
823
824static struct bus_type qcom_smd_bus = {
825 .name = "qcom_smd",
826 .match = qcom_smd_dev_match,
827 .probe = qcom_smd_dev_probe,
828 .remove = qcom_smd_dev_remove,
829};
830
831/*
832 * Release function for the qcom_smd_device object.
833 */
834static void qcom_smd_release_device(struct device *dev)
835{
836 struct qcom_smd_device *qsdev = to_smd_device(dev);
837
838 kfree(qsdev);
839}
840
841/*
842 * Finds the device_node for the smd child interested in this channel.
843 */
844static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
845 const char *channel)
846{
847 struct device_node *child;
848 const char *name;
849 const char *key;
850 int ret;
851
852 for_each_available_child_of_node(edge_node, child) {
853 key = "qcom,smd-channels";
854 ret = of_property_read_string(child, key, &name);
855 if (ret) {
856 of_node_put(child);
857 continue;
858 }
859
860 if (strcmp(name, channel) == 0)
861 return child;
862 }
863
864 return NULL;
865}
866
867/*
868 * Create a smd client device for channel that is being opened.
869 */
870static int qcom_smd_create_device(struct qcom_smd_channel *channel)
871{
872 struct qcom_smd_device *qsdev;
873 struct qcom_smd_edge *edge = channel->edge;
874 struct device_node *node;
875 struct qcom_smd *smd = edge->smd;
876 int ret;
877
878 if (channel->qsdev)
879 return -EEXIST;
880
881 node = qcom_smd_match_channel(edge->of_node, channel->name);
882 if (!node) {
883 dev_dbg(smd->dev, "no match for '%s'\n", channel->name);
884 return -ENXIO;
885 }
886
887 dev_dbg(smd->dev, "registering '%s'\n", channel->name);
888
889 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
890 if (!qsdev)
891 return -ENOMEM;
892
893 dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name);
894 qsdev->dev.parent = smd->dev;
895 qsdev->dev.bus = &qcom_smd_bus;
896 qsdev->dev.release = qcom_smd_release_device;
897 qsdev->dev.of_node = node;
898
899 qsdev->channel = channel;
900
901 channel->qsdev = qsdev;
902
903 ret = device_register(&qsdev->dev);
904 if (ret) {
905 dev_err(smd->dev, "device_register failed: %d\n", ret);
906 put_device(&qsdev->dev);
907 }
908
909 return ret;
910}
911
912/*
913 * Destroy a smd client device for a channel that's going away.
914 */
915static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
916{
917 struct device *dev;
918
919 BUG_ON(!channel->qsdev);
920
921 dev = &channel->qsdev->dev;
922
923 device_unregister(dev);
924 of_node_put(dev->of_node);
925 put_device(dev);
926}
927
928/**
929 * qcom_smd_driver_register - register a smd driver
930 * @qsdrv: qcom_smd_driver struct
931 */
932int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
933{
934 qsdrv->driver.bus = &qcom_smd_bus;
935 return driver_register(&qsdrv->driver);
936}
937EXPORT_SYMBOL(qcom_smd_driver_register);
938
939/**
940 * qcom_smd_driver_unregister - unregister a smd driver
941 * @qsdrv: qcom_smd_driver struct
942 */
943void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
944{
945 driver_unregister(&qsdrv->driver);
946}
947EXPORT_SYMBOL(qcom_smd_driver_unregister);
948
949/*
950 * Allocate the qcom_smd_channel object for a newly found smd channel,
951 * retrieving and validating the smem items involved.
952 */
953static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
954 unsigned smem_info_item,
955 unsigned smem_fifo_item,
956 char *name)
957{
958 struct qcom_smd_channel *channel;
959 struct qcom_smd *smd = edge->smd;
960 size_t fifo_size;
961 size_t info_size;
962 void *fifo_base;
963 void *info;
964 int ret;
965
966 channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
967 if (!channel)
968 return ERR_PTR(-ENOMEM);
969
970 channel->edge = edge;
971 channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
972 if (!channel->name)
973 return ERR_PTR(-ENOMEM);
974
975 mutex_init(&channel->tx_lock);
976 spin_lock_init(&channel->recv_lock);
977 init_waitqueue_head(&channel->fblockread_event);
978
979 ret = qcom_smem_get(edge->edge_id, smem_info_item, (void **)&info, &info_size);
980 if (ret)
981 goto free_name_and_channel;
982
983 /*
984 * Use the size of the item to figure out which channel info struct to
985 * use.
986 */
987 if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
988 channel->tx_info_word = info;
989 channel->rx_info_word = info + sizeof(struct smd_channel_info_word);
990 } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
991 channel->tx_info = info;
992 channel->rx_info = info + sizeof(struct smd_channel_info);
993 } else {
994 dev_err(smd->dev,
995 "channel info of size %zu not supported\n", info_size);
996 ret = -EINVAL;
997 goto free_name_and_channel;
998 }
999
1000 ret = qcom_smem_get(edge->edge_id, smem_fifo_item, &fifo_base, &fifo_size);
1001 if (ret)
1002 goto free_name_and_channel;
1003
1004 /* The channel consist of a rx and tx fifo of equal size */
1005 fifo_size /= 2;
1006
1007 dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
1008 name, info_size, fifo_size);
1009
1010 channel->tx_fifo = fifo_base;
1011 channel->rx_fifo = fifo_base + fifo_size;
1012 channel->fifo_size = fifo_size;
1013
1014 qcom_smd_channel_reset(channel);
1015
1016 return channel;
1017
1018free_name_and_channel:
1019 devm_kfree(smd->dev, channel->name);
1020 devm_kfree(smd->dev, channel);
1021
1022 return ERR_PTR(ret);
1023}
1024
1025/*
1026 * Scans the allocation table for any newly allocated channels, calls
1027 * qcom_smd_create_channel() to create representations of these and add
1028 * them to the edge's list of channels.
1029 */
1030static void qcom_discover_channels(struct qcom_smd_edge *edge)
1031{
1032 struct qcom_smd_alloc_entry *alloc_tbl;
1033 struct qcom_smd_alloc_entry *entry;
1034 struct qcom_smd_channel *channel;
1035 struct qcom_smd *smd = edge->smd;
1036 unsigned long flags;
1037 unsigned fifo_id;
1038 unsigned info_id;
1039 int ret;
1040 int tbl;
1041 int i;
1042
1043 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
1044 ret = qcom_smem_get(edge->edge_id,
1045 smem_items[tbl].alloc_tbl_id,
1046 (void **)&alloc_tbl,
1047 NULL);
1048 if (ret < 0)
1049 continue;
1050
1051 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
1052 entry = &alloc_tbl[i];
1053 if (test_bit(i, edge->allocated[tbl]))
1054 continue;
1055
1056 if (entry->ref_count == 0)
1057 continue;
1058
1059 if (!entry->name[0])
1060 continue;
1061
1062 if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET))
1063 continue;
1064
1065 if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
1066 continue;
1067
1068 info_id = smem_items[tbl].info_base_id + entry->cid;
1069 fifo_id = smem_items[tbl].fifo_base_id + entry->cid;
1070
1071 channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
1072 if (IS_ERR(channel))
1073 continue;
1074
1075 spin_lock_irqsave(&edge->channels_lock, flags);
1076 list_add(&channel->list, &edge->channels);
1077 spin_unlock_irqrestore(&edge->channels_lock, flags);
1078
1079 dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
1080 set_bit(i, edge->allocated[tbl]);
1081 }
1082 }
1083
1084 schedule_work(&edge->work);
1085}
1086
1087/*
1088 * This per edge worker scans smem for any new channels and register these. It
1089 * then scans all registered channels for state changes that should be handled
1090 * by creating or destroying smd client devices for the registered channels.
1091 *
1092 * LOCKING: edge->channels_lock is not needed to be held during the traversal
1093 * of the channels list as it's done synchronously with the only writer.
1094 */
1095static void qcom_channel_state_worker(struct work_struct *work)
1096{
1097 struct qcom_smd_channel *channel;
1098 struct qcom_smd_edge *edge = container_of(work,
1099 struct qcom_smd_edge,
1100 work);
1101 unsigned remote_state;
1102
1103 /*
1104 * Rescan smem if we have reason to belive that there are new channels.
1105 */
1106 if (edge->need_rescan) {
1107 edge->need_rescan = false;
1108 qcom_discover_channels(edge);
1109 }
1110
1111 /*
1112 * Register a device for any closed channel where the remote processor
1113 * is showing interest in opening the channel.
1114 */
1115 list_for_each_entry(channel, &edge->channels, list) {
1116 if (channel->state != SMD_CHANNEL_CLOSED)
1117 continue;
1118
1119 remote_state = GET_RX_CHANNEL_INFO(channel, state);
1120 if (remote_state != SMD_CHANNEL_OPENING &&
1121 remote_state != SMD_CHANNEL_OPENED)
1122 continue;
1123
1124 qcom_smd_create_device(channel);
1125 }
1126
1127 /*
1128 * Unregister the device for any channel that is opened where the
1129 * remote processor is closing the channel.
1130 */
1131 list_for_each_entry(channel, &edge->channels, list) {
1132 if (channel->state != SMD_CHANNEL_OPENING &&
1133 channel->state != SMD_CHANNEL_OPENED)
1134 continue;
1135
1136 remote_state = GET_RX_CHANNEL_INFO(channel, state);
1137 if (remote_state == SMD_CHANNEL_OPENING ||
1138 remote_state == SMD_CHANNEL_OPENED)
1139 continue;
1140
1141 qcom_smd_destroy_device(channel);
1142 }
1143}
1144
1145/*
1146 * Parses an of_node describing an edge.
1147 */
1148static int qcom_smd_parse_edge(struct device *dev,
1149 struct device_node *node,
1150 struct qcom_smd_edge *edge)
1151{
1152 struct device_node *syscon_np;
1153 const char *key;
1154 int irq;
1155 int ret;
1156
1157 INIT_LIST_HEAD(&edge->channels);
1158 spin_lock_init(&edge->channels_lock);
1159
1160 INIT_WORK(&edge->work, qcom_channel_state_worker);
1161
1162 edge->of_node = of_node_get(node);
1163
1164 irq = irq_of_parse_and_map(node, 0);
1165 if (irq < 0) {
1166 dev_err(dev, "required smd interrupt missing\n");
1167 return -EINVAL;
1168 }
1169
1170 ret = devm_request_irq(dev, irq,
1171 qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
1172 node->name, edge);
1173 if (ret) {
1174 dev_err(dev, "failed to request smd irq\n");
1175 return ret;
1176 }
1177
1178 edge->irq = irq;
1179
1180 key = "qcom,smd-edge";
1181 ret = of_property_read_u32(node, key, &edge->edge_id);
1182 if (ret) {
1183 dev_err(dev, "edge missing %s property\n", key);
1184 return -EINVAL;
1185 }
1186
1187 syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
1188 if (!syscon_np) {
1189 dev_err(dev, "no qcom,ipc node\n");
1190 return -ENODEV;
1191 }
1192
1193 edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
1194 if (IS_ERR(edge->ipc_regmap))
1195 return PTR_ERR(edge->ipc_regmap);
1196
1197 key = "qcom,ipc";
1198 ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
1199 if (ret < 0) {
1200 dev_err(dev, "no offset in %s\n", key);
1201 return -EINVAL;
1202 }
1203
1204 ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
1205 if (ret < 0) {
1206 dev_err(dev, "no bit in %s\n", key);
1207 return -EINVAL;
1208 }
1209
1210 return 0;
1211}
1212
1213static int qcom_smd_probe(struct platform_device *pdev)
1214{
1215 struct qcom_smd_edge *edge;
1216 struct device_node *node;
1217 struct qcom_smd *smd;
1218 size_t array_size;
1219 int num_edges;
1220 int ret;
1221 int i = 0;
1222
1223 /* Wait for smem */
1224 ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL);
1225 if (ret == -EPROBE_DEFER)
1226 return ret;
1227
1228 num_edges = of_get_available_child_count(pdev->dev.of_node);
1229 array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
1230 smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
1231 if (!smd)
1232 return -ENOMEM;
1233 smd->dev = &pdev->dev;
1234
1235 smd->num_edges = num_edges;
1236 for_each_available_child_of_node(pdev->dev.of_node, node) {
1237 edge = &smd->edges[i++];
1238 edge->smd = smd;
1239
1240 ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
1241 if (ret)
1242 continue;
1243
1244 edge->need_rescan = true;
1245 schedule_work(&edge->work);
1246 }
1247
1248 platform_set_drvdata(pdev, smd);
1249
1250 return 0;
1251}
1252
1253/*
1254 * Shut down all smd clients by making sure that each edge stops processing
1255 * events and scanning for new channels, then call destroy on the devices.
1256 */
1257static int qcom_smd_remove(struct platform_device *pdev)
1258{
1259 struct qcom_smd_channel *channel;
1260 struct qcom_smd_edge *edge;
1261 struct qcom_smd *smd = platform_get_drvdata(pdev);
1262 int i;
1263
1264 for (i = 0; i < smd->num_edges; i++) {
1265 edge = &smd->edges[i];
1266
1267 disable_irq(edge->irq);
1268 cancel_work_sync(&edge->work);
1269
1270 list_for_each_entry(channel, &edge->channels, list) {
1271 if (!channel->qsdev)
1272 continue;
1273
1274 qcom_smd_destroy_device(channel);
1275 }
1276 }
1277
1278 return 0;
1279}
1280
1281static const struct of_device_id qcom_smd_of_match[] = {
1282 { .compatible = "qcom,smd" },
1283 {}
1284};
1285MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
1286
1287static struct platform_driver qcom_smd_driver = {
1288 .probe = qcom_smd_probe,
1289 .remove = qcom_smd_remove,
1290 .driver = {
1291 .name = "qcom-smd",
1292 .of_match_table = qcom_smd_of_match,
1293 },
1294};
1295
1296static int __init qcom_smd_init(void)
1297{
1298 int ret;
1299
1300 ret = bus_register(&qcom_smd_bus);
1301 if (ret) {
1302 pr_err("failed to register smd bus: %d\n", ret);
1303 return ret;
1304 }
1305
1306 return platform_driver_register(&qcom_smd_driver);
1307}
1308postcore_initcall(qcom_smd_init);
1309
1310static void __exit qcom_smd_exit(void)
1311{
1312 platform_driver_unregister(&qcom_smd_driver);
1313 bus_unregister(&qcom_smd_bus);
1314}
1315module_exit(qcom_smd_exit);
1316
1317MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1318MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
1319MODULE_LICENSE("GPL v2");