blob: 3efa3670ce1820412f138621d5a8038dabde8824 [file] [log] [blame]
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001/*
2 * Copyright (c) 2015, Sony Mobile Communications AB.
3 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/mfd/syscon.h>
18#include <linux/module.h>
19#include <linux/of_irq.h>
20#include <linux/of_platform.h>
21#include <linux/platform_device.h>
22#include <linux/regmap.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/soc/qcom/smd.h>
26#include <linux/soc/qcom/smem.h>
27#include <linux/wait.h>
28
29/*
30 * The Qualcomm Shared Memory communication solution provides point-to-point
31 * channels for clients to send and receive streaming or packet based data.
32 *
33 * Each channel consists of a control item (channel info) and a ring buffer
34 * pair. The channel info carry information related to channel state, flow
35 * control and the offsets within the ring buffer.
36 *
37 * All allocated channels are listed in an allocation table, identifying the
38 * pair of items by name, type and remote processor.
39 *
40 * Upon creating a new channel the remote processor allocates channel info and
41 * ring buffer items from the smem heap and populate the allocation table. An
42 * interrupt is sent to the other end of the channel and a scan for new
43 * channels should be done. A channel never goes away, it will only change
44 * state.
45 *
46 * The remote processor signals it intent for bring up the communication
47 * channel by setting the state of its end of the channel to "opening" and
48 * sends out an interrupt. We detect this change and register a smd device to
49 * consume the channel. Upon finding a consumer we finish the handshake and the
50 * channel is up.
51 *
52 * Upon closing a channel, the remote processor will update the state of its
53 * end of the channel and signal us, we will then unregister any attached
54 * device and close our end of the channel.
55 *
56 * Devices attached to a channel can use the qcom_smd_send function to push
57 * data to the channel, this is done by copying the data into the tx ring
58 * buffer, updating the pointers in the channel info and signaling the remote
59 * processor.
60 *
61 * The remote processor does the equivalent when it transfer data and upon
62 * receiving the interrupt we check the channel info for new data and delivers
63 * this to the attached device. If the device is not ready to receive the data
64 * we leave it in the ring buffer for now.
65 */
66
67struct smd_channel_info;
Stephen Boydf02dc822015-09-02 15:46:46 -070068struct smd_channel_info_pair;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -070069struct smd_channel_info_word;
Stephen Boydf02dc822015-09-02 15:46:46 -070070struct smd_channel_info_word_pair;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -070071
72#define SMD_ALLOC_TBL_COUNT 2
73#define SMD_ALLOC_TBL_SIZE 64
74
75/*
76 * This lists the various smem heap items relevant for the allocation table and
77 * smd channel entries.
78 */
79static const struct {
80 unsigned alloc_tbl_id;
81 unsigned info_base_id;
82 unsigned fifo_base_id;
83} smem_items[SMD_ALLOC_TBL_COUNT] = {
84 {
85 .alloc_tbl_id = 13,
86 .info_base_id = 14,
87 .fifo_base_id = 338
88 },
89 {
90 .alloc_tbl_id = 14,
91 .info_base_id = 266,
92 .fifo_base_id = 202,
93 },
94};
95
96/**
97 * struct qcom_smd_edge - representing a remote processor
98 * @smd: handle to qcom_smd
99 * @of_node: of_node handle for information related to this edge
100 * @edge_id: identifier of this edge
Andy Gross93dbed92015-08-26 14:42:45 -0500101 * @remote_pid: identifier of remote processor
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700102 * @irq: interrupt for signals on this edge
103 * @ipc_regmap: regmap handle holding the outgoing ipc register
104 * @ipc_offset: offset within @ipc_regmap of the register for ipc
105 * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
106 * @channels: list of all channels detected on this edge
107 * @channels_lock: guard for modifications of @channels
108 * @allocated: array of bitmaps representing already allocated channels
109 * @need_rescan: flag that the @work needs to scan smem for new channels
110 * @smem_available: last available amount of smem triggering a channel scan
111 * @work: work item for edge house keeping
112 */
113struct qcom_smd_edge {
114 struct qcom_smd *smd;
115 struct device_node *of_node;
116 unsigned edge_id;
Andy Gross93dbed92015-08-26 14:42:45 -0500117 unsigned remote_pid;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700118
119 int irq;
120
121 struct regmap *ipc_regmap;
122 int ipc_offset;
123 int ipc_bit;
124
125 struct list_head channels;
126 spinlock_t channels_lock;
127
128 DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
129
130 bool need_rescan;
131 unsigned smem_available;
132
133 struct work_struct work;
134};
135
136/*
137 * SMD channel states.
138 */
139enum smd_channel_state {
140 SMD_CHANNEL_CLOSED,
141 SMD_CHANNEL_OPENING,
142 SMD_CHANNEL_OPENED,
143 SMD_CHANNEL_FLUSHING,
144 SMD_CHANNEL_CLOSING,
145 SMD_CHANNEL_RESET,
146 SMD_CHANNEL_RESET_OPENING
147};
148
149/**
150 * struct qcom_smd_channel - smd channel struct
151 * @edge: qcom_smd_edge this channel is living on
152 * @qsdev: reference to a associated smd client device
153 * @name: name of the channel
154 * @state: local state of the channel
155 * @remote_state: remote state of the channel
Stephen Boydf02dc822015-09-02 15:46:46 -0700156 * @info: byte aligned outgoing/incoming channel info
157 * @info_word: word aligned outgoing/incoming channel info
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700158 * @tx_lock: lock to make writes to the channel mutually exclusive
159 * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
160 * @tx_fifo: pointer to the outgoing ring buffer
161 * @rx_fifo: pointer to the incoming ring buffer
162 * @fifo_size: size of each ring buffer
163 * @bounce_buffer: bounce buffer for reading wrapped packets
164 * @cb: callback function registered for this channel
165 * @recv_lock: guard for rx info modifications and cb pointer
166 * @pkt_size: size of the currently handled packet
167 * @list: lite entry for @channels in qcom_smd_edge
168 */
169struct qcom_smd_channel {
170 struct qcom_smd_edge *edge;
171
172 struct qcom_smd_device *qsdev;
173
174 char *name;
175 enum smd_channel_state state;
176 enum smd_channel_state remote_state;
177
Stephen Boydf02dc822015-09-02 15:46:46 -0700178 struct smd_channel_info_pair *info;
179 struct smd_channel_info_word_pair *info_word;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700180
181 struct mutex tx_lock;
182 wait_queue_head_t fblockread_event;
183
184 void *tx_fifo;
185 void *rx_fifo;
186 int fifo_size;
187
188 void *bounce_buffer;
189 int (*cb)(struct qcom_smd_device *, const void *, size_t);
190
191 spinlock_t recv_lock;
192
193 int pkt_size;
194
195 struct list_head list;
196};
197
198/**
199 * struct qcom_smd - smd struct
200 * @dev: device struct
201 * @num_edges: number of entries in @edges
202 * @edges: array of edges to be handled
203 */
204struct qcom_smd {
205 struct device *dev;
206
207 unsigned num_edges;
208 struct qcom_smd_edge edges[0];
209};
210
211/*
212 * Format of the smd_info smem items, for byte aligned channels.
213 */
214struct smd_channel_info {
215 u32 state;
216 u8 fDSR;
217 u8 fCTS;
218 u8 fCD;
219 u8 fRI;
220 u8 fHEAD;
221 u8 fTAIL;
222 u8 fSTATE;
223 u8 fBLOCKREADINTR;
224 u32 tail;
225 u32 head;
226};
227
Stephen Boydf02dc822015-09-02 15:46:46 -0700228struct smd_channel_info_pair {
229 struct smd_channel_info tx;
230 struct smd_channel_info rx;
231};
232
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700233/*
234 * Format of the smd_info smem items, for word aligned channels.
235 */
236struct smd_channel_info_word {
237 u32 state;
238 u32 fDSR;
239 u32 fCTS;
240 u32 fCD;
241 u32 fRI;
242 u32 fHEAD;
243 u32 fTAIL;
244 u32 fSTATE;
245 u32 fBLOCKREADINTR;
246 u32 tail;
247 u32 head;
248};
249
Stephen Boydf02dc822015-09-02 15:46:46 -0700250struct smd_channel_info_word_pair {
251 struct smd_channel_info_word tx;
252 struct smd_channel_info_word rx;
253};
254
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700255#define GET_RX_CHANNEL_INFO(channel, param) \
Stephen Boydf02dc822015-09-02 15:46:46 -0700256 (channel->info_word ? \
257 channel->info_word->rx.param : \
258 channel->info->rx.param)
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700259
260#define SET_RX_CHANNEL_INFO(channel, param, value) \
Stephen Boydf02dc822015-09-02 15:46:46 -0700261 (channel->info_word ? \
262 (channel->info_word->rx.param = value) : \
263 (channel->info->rx.param = value))
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700264
265#define GET_TX_CHANNEL_INFO(channel, param) \
Stephen Boydf02dc822015-09-02 15:46:46 -0700266 (channel->info_word ? \
267 channel->info_word->tx.param : \
268 channel->info->tx.param)
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700269
270#define SET_TX_CHANNEL_INFO(channel, param, value) \
Stephen Boydf02dc822015-09-02 15:46:46 -0700271 (channel->info_word ? \
272 (channel->info_word->tx.param = value) : \
273 (channel->info->tx.param = value))
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700274
275/**
276 * struct qcom_smd_alloc_entry - channel allocation entry
277 * @name: channel name
278 * @cid: channel index
279 * @flags: channel flags and edge id
280 * @ref_count: reference count of the channel
281 */
282struct qcom_smd_alloc_entry {
283 u8 name[20];
284 u32 cid;
285 u32 flags;
286 u32 ref_count;
287} __packed;
288
289#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
290#define SMD_CHANNEL_FLAGS_STREAM BIT(8)
291#define SMD_CHANNEL_FLAGS_PACKET BIT(9)
292
293/*
294 * Each smd packet contains a 20 byte header, with the first 4 being the length
295 * of the packet.
296 */
297#define SMD_PACKET_HEADER_LEN 20
298
299/*
300 * Signal the remote processor associated with 'channel'.
301 */
302static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
303{
304 struct qcom_smd_edge *edge = channel->edge;
305
306 regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
307}
308
309/*
310 * Initialize the tx channel info
311 */
312static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
313{
314 SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
315 SET_TX_CHANNEL_INFO(channel, fDSR, 0);
316 SET_TX_CHANNEL_INFO(channel, fCTS, 0);
317 SET_TX_CHANNEL_INFO(channel, fCD, 0);
318 SET_TX_CHANNEL_INFO(channel, fRI, 0);
319 SET_TX_CHANNEL_INFO(channel, fHEAD, 0);
320 SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
321 SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
Bjorn Andersson208487a2015-08-24 13:38:46 -0700322 SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700323 SET_TX_CHANNEL_INFO(channel, head, 0);
324 SET_TX_CHANNEL_INFO(channel, tail, 0);
325
326 qcom_smd_signal_channel(channel);
327
328 channel->state = SMD_CHANNEL_CLOSED;
329 channel->pkt_size = 0;
330}
331
332/*
333 * Calculate the amount of data available in the rx fifo
334 */
335static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
336{
337 unsigned head;
338 unsigned tail;
339
340 head = GET_RX_CHANNEL_INFO(channel, head);
341 tail = GET_RX_CHANNEL_INFO(channel, tail);
342
343 return (head - tail) & (channel->fifo_size - 1);
344}
345
346/*
347 * Set tx channel state and inform the remote processor
348 */
349static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
350 int state)
351{
352 struct qcom_smd_edge *edge = channel->edge;
353 bool is_open = state == SMD_CHANNEL_OPENED;
354
355 if (channel->state == state)
356 return;
357
358 dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
359
360 SET_TX_CHANNEL_INFO(channel, fDSR, is_open);
361 SET_TX_CHANNEL_INFO(channel, fCTS, is_open);
362 SET_TX_CHANNEL_INFO(channel, fCD, is_open);
363
364 SET_TX_CHANNEL_INFO(channel, state, state);
365 SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
366
367 channel->state = state;
368 qcom_smd_signal_channel(channel);
369}
370
371/*
372 * Copy count bytes of data using 32bit accesses, if that's required.
373 */
374static void smd_copy_to_fifo(void __iomem *_dst,
375 const void *_src,
376 size_t count,
377 bool word_aligned)
378{
379 u32 *dst = (u32 *)_dst;
380 u32 *src = (u32 *)_src;
381
382 if (word_aligned) {
383 count /= sizeof(u32);
384 while (count--)
385 writel_relaxed(*src++, dst++);
386 } else {
387 memcpy_toio(_dst, _src, count);
388 }
389}
390
391/*
392 * Copy count bytes of data using 32bit accesses, if that is required.
393 */
394static void smd_copy_from_fifo(void *_dst,
395 const void __iomem *_src,
396 size_t count,
397 bool word_aligned)
398{
399 u32 *dst = (u32 *)_dst;
400 u32 *src = (u32 *)_src;
401
402 if (word_aligned) {
403 count /= sizeof(u32);
404 while (count--)
405 *dst++ = readl_relaxed(src++);
406 } else {
407 memcpy_fromio(_dst, _src, count);
408 }
409}
410
411/*
412 * Read count bytes of data from the rx fifo into buf, but don't advance the
413 * tail.
414 */
415static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
416 void *buf, size_t count)
417{
418 bool word_aligned;
419 unsigned tail;
420 size_t len;
421
Stephen Boydf02dc822015-09-02 15:46:46 -0700422 word_aligned = channel->info_word;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700423 tail = GET_RX_CHANNEL_INFO(channel, tail);
424
425 len = min_t(size_t, count, channel->fifo_size - tail);
426 if (len) {
427 smd_copy_from_fifo(buf,
428 channel->rx_fifo + tail,
429 len,
430 word_aligned);
431 }
432
433 if (len != count) {
434 smd_copy_from_fifo(buf + len,
435 channel->rx_fifo,
436 count - len,
437 word_aligned);
438 }
439
440 return count;
441}
442
443/*
444 * Advance the rx tail by count bytes.
445 */
446static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
447 size_t count)
448{
449 unsigned tail;
450
451 tail = GET_RX_CHANNEL_INFO(channel, tail);
452 tail += count;
453 tail &= (channel->fifo_size - 1);
454 SET_RX_CHANNEL_INFO(channel, tail, tail);
455}
456
457/*
458 * Read out a single packet from the rx fifo and deliver it to the device
459 */
460static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
461{
462 struct qcom_smd_device *qsdev = channel->qsdev;
463 unsigned tail;
464 size_t len;
465 void *ptr;
466 int ret;
467
468 if (!channel->cb)
469 return 0;
470
471 tail = GET_RX_CHANNEL_INFO(channel, tail);
472
473 /* Use bounce buffer if the data wraps */
474 if (tail + channel->pkt_size >= channel->fifo_size) {
475 ptr = channel->bounce_buffer;
476 len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
477 } else {
478 ptr = channel->rx_fifo + tail;
479 len = channel->pkt_size;
480 }
481
482 ret = channel->cb(qsdev, ptr, len);
483 if (ret < 0)
484 return ret;
485
486 /* Only forward the tail if the client consumed the data */
487 qcom_smd_channel_advance(channel, len);
488
489 channel->pkt_size = 0;
490
491 return 0;
492}
493
494/*
495 * Per channel interrupt handling
496 */
497static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
498{
499 bool need_state_scan = false;
500 int remote_state;
501 u32 pktlen;
502 int avail;
503 int ret;
504
505 /* Handle state changes */
506 remote_state = GET_RX_CHANNEL_INFO(channel, state);
507 if (remote_state != channel->remote_state) {
508 channel->remote_state = remote_state;
509 need_state_scan = true;
510 }
511 /* Indicate that we have seen any state change */
512 SET_RX_CHANNEL_INFO(channel, fSTATE, 0);
513
514 /* Signal waiting qcom_smd_send() about the interrupt */
515 if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR))
516 wake_up_interruptible(&channel->fblockread_event);
517
518 /* Don't consume any data until we've opened the channel */
519 if (channel->state != SMD_CHANNEL_OPENED)
520 goto out;
521
522 /* Indicate that we've seen the new data */
523 SET_RX_CHANNEL_INFO(channel, fHEAD, 0);
524
525 /* Consume data */
526 for (;;) {
527 avail = qcom_smd_channel_get_rx_avail(channel);
528
529 if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
530 qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
531 qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
532 channel->pkt_size = pktlen;
533 } else if (channel->pkt_size && avail >= channel->pkt_size) {
534 ret = qcom_smd_channel_recv_single(channel);
535 if (ret)
536 break;
537 } else {
538 break;
539 }
540 }
541
542 /* Indicate that we have seen and updated tail */
543 SET_RX_CHANNEL_INFO(channel, fTAIL, 1);
544
545 /* Signal the remote that we've consumed the data (if requested) */
546 if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) {
547 /* Ensure ordering of channel info updates */
548 wmb();
549
550 qcom_smd_signal_channel(channel);
551 }
552
553out:
554 return need_state_scan;
555}
556
557/*
558 * The edge interrupts are triggered by the remote processor on state changes,
559 * channel info updates or when new channels are created.
560 */
561static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
562{
563 struct qcom_smd_edge *edge = data;
564 struct qcom_smd_channel *channel;
565 unsigned available;
566 bool kick_worker = false;
567
568 /*
569 * Handle state changes or data on each of the channels on this edge
570 */
571 spin_lock(&edge->channels_lock);
572 list_for_each_entry(channel, &edge->channels, list) {
573 spin_lock(&channel->recv_lock);
574 kick_worker |= qcom_smd_channel_intr(channel);
575 spin_unlock(&channel->recv_lock);
576 }
577 spin_unlock(&edge->channels_lock);
578
579 /*
580 * Creating a new channel requires allocating an smem entry, so we only
581 * have to scan if the amount of available space in smem have changed
582 * since last scan.
583 */
Andy Gross93dbed92015-08-26 14:42:45 -0500584 available = qcom_smem_get_free_space(edge->remote_pid);
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700585 if (available != edge->smem_available) {
586 edge->smem_available = available;
587 edge->need_rescan = true;
588 kick_worker = true;
589 }
590
591 if (kick_worker)
592 schedule_work(&edge->work);
593
594 return IRQ_HANDLED;
595}
596
597/*
598 * Delivers any outstanding packets in the rx fifo, can be used after probe of
599 * the clients to deliver any packets that wasn't delivered before the client
600 * was setup.
601 */
602static void qcom_smd_channel_resume(struct qcom_smd_channel *channel)
603{
604 unsigned long flags;
605
606 spin_lock_irqsave(&channel->recv_lock, flags);
607 qcom_smd_channel_intr(channel);
608 spin_unlock_irqrestore(&channel->recv_lock, flags);
609}
610
611/*
612 * Calculate how much space is available in the tx fifo.
613 */
614static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
615{
616 unsigned head;
617 unsigned tail;
618 unsigned mask = channel->fifo_size - 1;
619
620 head = GET_TX_CHANNEL_INFO(channel, head);
621 tail = GET_TX_CHANNEL_INFO(channel, tail);
622
623 return mask - ((head - tail) & mask);
624}
625
626/*
627 * Write count bytes of data into channel, possibly wrapping in the ring buffer
628 */
629static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
630 const void *data,
631 size_t count)
632{
633 bool word_aligned;
634 unsigned head;
635 size_t len;
636
Stephen Boydf02dc822015-09-02 15:46:46 -0700637 word_aligned = channel->info_word;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700638 head = GET_TX_CHANNEL_INFO(channel, head);
639
640 len = min_t(size_t, count, channel->fifo_size - head);
641 if (len) {
642 smd_copy_to_fifo(channel->tx_fifo + head,
643 data,
644 len,
645 word_aligned);
646 }
647
648 if (len != count) {
649 smd_copy_to_fifo(channel->tx_fifo,
650 data + len,
651 count - len,
652 word_aligned);
653 }
654
655 head += count;
656 head &= (channel->fifo_size - 1);
657 SET_TX_CHANNEL_INFO(channel, head, head);
658
659 return count;
660}
661
662/**
663 * qcom_smd_send - write data to smd channel
664 * @channel: channel handle
665 * @data: buffer of data to write
666 * @len: number of bytes to write
667 *
668 * This is a blocking write of len bytes into the channel's tx ring buffer and
669 * signal the remote end. It will sleep until there is enough space available
670 * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
671 * polling.
672 */
673int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
674{
675 u32 hdr[5] = {len,};
676 int tlen = sizeof(hdr) + len;
677 int ret;
678
679 /* Word aligned channels only accept word size aligned data */
Stephen Boydf02dc822015-09-02 15:46:46 -0700680 if (channel->info_word && len % 4)
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700681 return -EINVAL;
682
683 ret = mutex_lock_interruptible(&channel->tx_lock);
684 if (ret)
685 return ret;
686
687 while (qcom_smd_get_tx_avail(channel) < tlen) {
688 if (channel->state != SMD_CHANNEL_OPENED) {
689 ret = -EPIPE;
690 goto out;
691 }
692
Bjorn Andersson208487a2015-08-24 13:38:46 -0700693 SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700694
695 ret = wait_event_interruptible(channel->fblockread_event,
696 qcom_smd_get_tx_avail(channel) >= tlen ||
697 channel->state != SMD_CHANNEL_OPENED);
698 if (ret)
699 goto out;
700
Bjorn Andersson208487a2015-08-24 13:38:46 -0700701 SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700702 }
703
704 SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
705
706 qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
707 qcom_smd_write_fifo(channel, data, len);
708
709 SET_TX_CHANNEL_INFO(channel, fHEAD, 1);
710
711 /* Ensure ordering of channel info updates */
712 wmb();
713
714 qcom_smd_signal_channel(channel);
715
716out:
717 mutex_unlock(&channel->tx_lock);
718
719 return ret;
720}
721EXPORT_SYMBOL(qcom_smd_send);
722
723static struct qcom_smd_device *to_smd_device(struct device *dev)
724{
725 return container_of(dev, struct qcom_smd_device, dev);
726}
727
728static struct qcom_smd_driver *to_smd_driver(struct device *dev)
729{
730 struct qcom_smd_device *qsdev = to_smd_device(dev);
731
732 return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
733}
734
735static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
736{
Bjorn Andersson1a7caca2015-08-28 10:39:20 -0700737 struct qcom_smd_device *qsdev = to_smd_device(dev);
738 struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver);
739 const struct qcom_smd_id *match = qsdrv->smd_match_table;
740 const char *name = qsdev->channel->name;
741
742 if (match) {
743 while (match->name[0]) {
744 if (!strcmp(match->name, name))
745 return 1;
746 match++;
747 }
748 }
749
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700750 return of_driver_match_device(dev, drv);
751}
752
753/*
754 * Probe the smd client.
755 *
756 * The remote side have indicated that it want the channel to be opened, so
757 * complete the state handshake and probe our client driver.
758 */
759static int qcom_smd_dev_probe(struct device *dev)
760{
761 struct qcom_smd_device *qsdev = to_smd_device(dev);
762 struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
763 struct qcom_smd_channel *channel = qsdev->channel;
764 size_t bb_size;
765 int ret;
766
767 /*
768 * Packets are maximum 4k, but reduce if the fifo is smaller
769 */
770 bb_size = min(channel->fifo_size, SZ_4K);
771 channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
772 if (!channel->bounce_buffer)
773 return -ENOMEM;
774
775 channel->cb = qsdrv->callback;
776
777 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
778
779 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
780
781 ret = qsdrv->probe(qsdev);
782 if (ret)
783 goto err;
784
785 qcom_smd_channel_resume(channel);
786
787 return 0;
788
789err:
790 dev_err(&qsdev->dev, "probe failed\n");
791
792 channel->cb = NULL;
793 kfree(channel->bounce_buffer);
794 channel->bounce_buffer = NULL;
795
796 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
797 return ret;
798}
799
800/*
801 * Remove the smd client.
802 *
803 * The channel is going away, for some reason, so remove the smd client and
804 * reset the channel state.
805 */
806static int qcom_smd_dev_remove(struct device *dev)
807{
808 struct qcom_smd_device *qsdev = to_smd_device(dev);
809 struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
810 struct qcom_smd_channel *channel = qsdev->channel;
811 unsigned long flags;
812
813 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
814
815 /*
816 * Make sure we don't race with the code receiving data.
817 */
818 spin_lock_irqsave(&channel->recv_lock, flags);
819 channel->cb = NULL;
820 spin_unlock_irqrestore(&channel->recv_lock, flags);
821
822 /* Wake up any sleepers in qcom_smd_send() */
823 wake_up_interruptible(&channel->fblockread_event);
824
825 /*
826 * We expect that the client might block in remove() waiting for any
827 * outstanding calls to qcom_smd_send() to wake up and finish.
828 */
829 if (qsdrv->remove)
830 qsdrv->remove(qsdev);
831
832 /*
833 * The client is now gone, cleanup and reset the channel state.
834 */
835 channel->qsdev = NULL;
836 kfree(channel->bounce_buffer);
837 channel->bounce_buffer = NULL;
838
839 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
840
841 qcom_smd_channel_reset(channel);
842
843 return 0;
844}
845
846static struct bus_type qcom_smd_bus = {
847 .name = "qcom_smd",
848 .match = qcom_smd_dev_match,
849 .probe = qcom_smd_dev_probe,
850 .remove = qcom_smd_dev_remove,
851};
852
853/*
854 * Release function for the qcom_smd_device object.
855 */
856static void qcom_smd_release_device(struct device *dev)
857{
858 struct qcom_smd_device *qsdev = to_smd_device(dev);
859
860 kfree(qsdev);
861}
862
863/*
864 * Finds the device_node for the smd child interested in this channel.
865 */
866static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
867 const char *channel)
868{
869 struct device_node *child;
870 const char *name;
871 const char *key;
872 int ret;
873
874 for_each_available_child_of_node(edge_node, child) {
875 key = "qcom,smd-channels";
876 ret = of_property_read_string(child, key, &name);
877 if (ret) {
878 of_node_put(child);
879 continue;
880 }
881
882 if (strcmp(name, channel) == 0)
883 return child;
884 }
885
886 return NULL;
887}
888
889/*
890 * Create a smd client device for channel that is being opened.
891 */
892static int qcom_smd_create_device(struct qcom_smd_channel *channel)
893{
894 struct qcom_smd_device *qsdev;
895 struct qcom_smd_edge *edge = channel->edge;
896 struct device_node *node;
897 struct qcom_smd *smd = edge->smd;
898 int ret;
899
900 if (channel->qsdev)
901 return -EEXIST;
902
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700903 dev_dbg(smd->dev, "registering '%s'\n", channel->name);
904
905 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
906 if (!qsdev)
907 return -ENOMEM;
908
Bjorn Andersson1a7caca2015-08-28 10:39:20 -0700909 node = qcom_smd_match_channel(edge->of_node, channel->name);
910 dev_set_name(&qsdev->dev, "%s.%s",
911 edge->of_node->name,
912 node ? node->name : channel->name);
913
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -0700914 qsdev->dev.parent = smd->dev;
915 qsdev->dev.bus = &qcom_smd_bus;
916 qsdev->dev.release = qcom_smd_release_device;
917 qsdev->dev.of_node = node;
918
919 qsdev->channel = channel;
920
921 channel->qsdev = qsdev;
922
923 ret = device_register(&qsdev->dev);
924 if (ret) {
925 dev_err(smd->dev, "device_register failed: %d\n", ret);
926 put_device(&qsdev->dev);
927 }
928
929 return ret;
930}
931
932/*
933 * Destroy a smd client device for a channel that's going away.
934 */
935static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
936{
937 struct device *dev;
938
939 BUG_ON(!channel->qsdev);
940
941 dev = &channel->qsdev->dev;
942
943 device_unregister(dev);
944 of_node_put(dev->of_node);
945 put_device(dev);
946}
947
948/**
949 * qcom_smd_driver_register - register a smd driver
950 * @qsdrv: qcom_smd_driver struct
951 */
952int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
953{
954 qsdrv->driver.bus = &qcom_smd_bus;
955 return driver_register(&qsdrv->driver);
956}
957EXPORT_SYMBOL(qcom_smd_driver_register);
958
959/**
960 * qcom_smd_driver_unregister - unregister a smd driver
961 * @qsdrv: qcom_smd_driver struct
962 */
963void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
964{
965 driver_unregister(&qsdrv->driver);
966}
967EXPORT_SYMBOL(qcom_smd_driver_unregister);
968
969/*
970 * Allocate the qcom_smd_channel object for a newly found smd channel,
971 * retrieving and validating the smem items involved.
972 */
973static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
974 unsigned smem_info_item,
975 unsigned smem_fifo_item,
976 char *name)
977{
978 struct qcom_smd_channel *channel;
979 struct qcom_smd *smd = edge->smd;
980 size_t fifo_size;
981 size_t info_size;
982 void *fifo_base;
983 void *info;
984 int ret;
985
986 channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
987 if (!channel)
988 return ERR_PTR(-ENOMEM);
989
990 channel->edge = edge;
991 channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
992 if (!channel->name)
993 return ERR_PTR(-ENOMEM);
994
995 mutex_init(&channel->tx_lock);
996 spin_lock_init(&channel->recv_lock);
997 init_waitqueue_head(&channel->fblockread_event);
998
Stephen Boyd1a039642015-09-02 15:46:44 -0700999 info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
1000 if (IS_ERR(info)) {
1001 ret = PTR_ERR(info);
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001002 goto free_name_and_channel;
Stephen Boyd1a039642015-09-02 15:46:44 -07001003 }
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001004
1005 /*
1006 * Use the size of the item to figure out which channel info struct to
1007 * use.
1008 */
1009 if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
Stephen Boydf02dc822015-09-02 15:46:46 -07001010 channel->info_word = info;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001011 } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
Stephen Boydf02dc822015-09-02 15:46:46 -07001012 channel->info = info;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001013 } else {
1014 dev_err(smd->dev,
1015 "channel info of size %zu not supported\n", info_size);
1016 ret = -EINVAL;
1017 goto free_name_and_channel;
1018 }
1019
Stephen Boyd1a039642015-09-02 15:46:44 -07001020 fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
1021 if (IS_ERR(fifo_base)) {
1022 ret = PTR_ERR(fifo_base);
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001023 goto free_name_and_channel;
Stephen Boyd1a039642015-09-02 15:46:44 -07001024 }
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001025
1026 /* The channel consist of a rx and tx fifo of equal size */
1027 fifo_size /= 2;
1028
1029 dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
1030 name, info_size, fifo_size);
1031
1032 channel->tx_fifo = fifo_base;
1033 channel->rx_fifo = fifo_base + fifo_size;
1034 channel->fifo_size = fifo_size;
1035
1036 qcom_smd_channel_reset(channel);
1037
1038 return channel;
1039
1040free_name_and_channel:
1041 devm_kfree(smd->dev, channel->name);
1042 devm_kfree(smd->dev, channel);
1043
1044 return ERR_PTR(ret);
1045}
1046
1047/*
1048 * Scans the allocation table for any newly allocated channels, calls
1049 * qcom_smd_create_channel() to create representations of these and add
1050 * them to the edge's list of channels.
1051 */
1052static void qcom_discover_channels(struct qcom_smd_edge *edge)
1053{
1054 struct qcom_smd_alloc_entry *alloc_tbl;
1055 struct qcom_smd_alloc_entry *entry;
1056 struct qcom_smd_channel *channel;
1057 struct qcom_smd *smd = edge->smd;
1058 unsigned long flags;
1059 unsigned fifo_id;
1060 unsigned info_id;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001061 int tbl;
1062 int i;
1063
1064 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
Stephen Boyd1a039642015-09-02 15:46:44 -07001065 alloc_tbl = qcom_smem_get(edge->remote_pid,
1066 smem_items[tbl].alloc_tbl_id, NULL);
1067 if (IS_ERR(alloc_tbl))
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001068 continue;
1069
1070 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
1071 entry = &alloc_tbl[i];
1072 if (test_bit(i, edge->allocated[tbl]))
1073 continue;
1074
1075 if (entry->ref_count == 0)
1076 continue;
1077
1078 if (!entry->name[0])
1079 continue;
1080
1081 if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET))
1082 continue;
1083
1084 if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
1085 continue;
1086
1087 info_id = smem_items[tbl].info_base_id + entry->cid;
1088 fifo_id = smem_items[tbl].fifo_base_id + entry->cid;
1089
1090 channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
1091 if (IS_ERR(channel))
1092 continue;
1093
1094 spin_lock_irqsave(&edge->channels_lock, flags);
1095 list_add(&channel->list, &edge->channels);
1096 spin_unlock_irqrestore(&edge->channels_lock, flags);
1097
1098 dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
1099 set_bit(i, edge->allocated[tbl]);
1100 }
1101 }
1102
1103 schedule_work(&edge->work);
1104}
1105
1106/*
1107 * This per edge worker scans smem for any new channels and register these. It
1108 * then scans all registered channels for state changes that should be handled
1109 * by creating or destroying smd client devices for the registered channels.
1110 *
1111 * LOCKING: edge->channels_lock is not needed to be held during the traversal
1112 * of the channels list as it's done synchronously with the only writer.
1113 */
1114static void qcom_channel_state_worker(struct work_struct *work)
1115{
1116 struct qcom_smd_channel *channel;
1117 struct qcom_smd_edge *edge = container_of(work,
1118 struct qcom_smd_edge,
1119 work);
1120 unsigned remote_state;
1121
1122 /*
1123 * Rescan smem if we have reason to belive that there are new channels.
1124 */
1125 if (edge->need_rescan) {
1126 edge->need_rescan = false;
1127 qcom_discover_channels(edge);
1128 }
1129
1130 /*
1131 * Register a device for any closed channel where the remote processor
1132 * is showing interest in opening the channel.
1133 */
1134 list_for_each_entry(channel, &edge->channels, list) {
1135 if (channel->state != SMD_CHANNEL_CLOSED)
1136 continue;
1137
1138 remote_state = GET_RX_CHANNEL_INFO(channel, state);
1139 if (remote_state != SMD_CHANNEL_OPENING &&
1140 remote_state != SMD_CHANNEL_OPENED)
1141 continue;
1142
1143 qcom_smd_create_device(channel);
1144 }
1145
1146 /*
1147 * Unregister the device for any channel that is opened where the
1148 * remote processor is closing the channel.
1149 */
1150 list_for_each_entry(channel, &edge->channels, list) {
1151 if (channel->state != SMD_CHANNEL_OPENING &&
1152 channel->state != SMD_CHANNEL_OPENED)
1153 continue;
1154
1155 remote_state = GET_RX_CHANNEL_INFO(channel, state);
1156 if (remote_state == SMD_CHANNEL_OPENING ||
1157 remote_state == SMD_CHANNEL_OPENED)
1158 continue;
1159
1160 qcom_smd_destroy_device(channel);
1161 }
1162}
1163
1164/*
1165 * Parses an of_node describing an edge.
1166 */
1167static int qcom_smd_parse_edge(struct device *dev,
1168 struct device_node *node,
1169 struct qcom_smd_edge *edge)
1170{
1171 struct device_node *syscon_np;
1172 const char *key;
1173 int irq;
1174 int ret;
1175
1176 INIT_LIST_HEAD(&edge->channels);
1177 spin_lock_init(&edge->channels_lock);
1178
1179 INIT_WORK(&edge->work, qcom_channel_state_worker);
1180
1181 edge->of_node = of_node_get(node);
1182
1183 irq = irq_of_parse_and_map(node, 0);
1184 if (irq < 0) {
1185 dev_err(dev, "required smd interrupt missing\n");
1186 return -EINVAL;
1187 }
1188
1189 ret = devm_request_irq(dev, irq,
1190 qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
1191 node->name, edge);
1192 if (ret) {
1193 dev_err(dev, "failed to request smd irq\n");
1194 return ret;
1195 }
1196
1197 edge->irq = irq;
1198
1199 key = "qcom,smd-edge";
1200 ret = of_property_read_u32(node, key, &edge->edge_id);
1201 if (ret) {
1202 dev_err(dev, "edge missing %s property\n", key);
1203 return -EINVAL;
1204 }
1205
Andy Gross93dbed92015-08-26 14:42:45 -05001206 edge->remote_pid = QCOM_SMEM_HOST_ANY;
1207 key = "qcom,remote-pid";
1208 of_property_read_u32(node, key, &edge->remote_pid);
1209
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001210 syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
1211 if (!syscon_np) {
1212 dev_err(dev, "no qcom,ipc node\n");
1213 return -ENODEV;
1214 }
1215
1216 edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
1217 if (IS_ERR(edge->ipc_regmap))
1218 return PTR_ERR(edge->ipc_regmap);
1219
1220 key = "qcom,ipc";
1221 ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
1222 if (ret < 0) {
1223 dev_err(dev, "no offset in %s\n", key);
1224 return -EINVAL;
1225 }
1226
1227 ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
1228 if (ret < 0) {
1229 dev_err(dev, "no bit in %s\n", key);
1230 return -EINVAL;
1231 }
1232
1233 return 0;
1234}
1235
1236static int qcom_smd_probe(struct platform_device *pdev)
1237{
1238 struct qcom_smd_edge *edge;
1239 struct device_node *node;
1240 struct qcom_smd *smd;
1241 size_t array_size;
1242 int num_edges;
1243 int ret;
1244 int i = 0;
Stephen Boyd1a039642015-09-02 15:46:44 -07001245 void *p;
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001246
1247 /* Wait for smem */
Stephen Boyd1a039642015-09-02 15:46:44 -07001248 p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
1249 if (PTR_ERR(p) == -EPROBE_DEFER)
1250 return PTR_ERR(p);
Bjorn Anderssonf2ab3292015-07-27 20:20:30 -07001251
1252 num_edges = of_get_available_child_count(pdev->dev.of_node);
1253 array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
1254 smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
1255 if (!smd)
1256 return -ENOMEM;
1257 smd->dev = &pdev->dev;
1258
1259 smd->num_edges = num_edges;
1260 for_each_available_child_of_node(pdev->dev.of_node, node) {
1261 edge = &smd->edges[i++];
1262 edge->smd = smd;
1263
1264 ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
1265 if (ret)
1266 continue;
1267
1268 edge->need_rescan = true;
1269 schedule_work(&edge->work);
1270 }
1271
1272 platform_set_drvdata(pdev, smd);
1273
1274 return 0;
1275}
1276
1277/*
1278 * Shut down all smd clients by making sure that each edge stops processing
1279 * events and scanning for new channels, then call destroy on the devices.
1280 */
1281static int qcom_smd_remove(struct platform_device *pdev)
1282{
1283 struct qcom_smd_channel *channel;
1284 struct qcom_smd_edge *edge;
1285 struct qcom_smd *smd = platform_get_drvdata(pdev);
1286 int i;
1287
1288 for (i = 0; i < smd->num_edges; i++) {
1289 edge = &smd->edges[i];
1290
1291 disable_irq(edge->irq);
1292 cancel_work_sync(&edge->work);
1293
1294 list_for_each_entry(channel, &edge->channels, list) {
1295 if (!channel->qsdev)
1296 continue;
1297
1298 qcom_smd_destroy_device(channel);
1299 }
1300 }
1301
1302 return 0;
1303}
1304
1305static const struct of_device_id qcom_smd_of_match[] = {
1306 { .compatible = "qcom,smd" },
1307 {}
1308};
1309MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
1310
1311static struct platform_driver qcom_smd_driver = {
1312 .probe = qcom_smd_probe,
1313 .remove = qcom_smd_remove,
1314 .driver = {
1315 .name = "qcom-smd",
1316 .of_match_table = qcom_smd_of_match,
1317 },
1318};
1319
1320static int __init qcom_smd_init(void)
1321{
1322 int ret;
1323
1324 ret = bus_register(&qcom_smd_bus);
1325 if (ret) {
1326 pr_err("failed to register smd bus: %d\n", ret);
1327 return ret;
1328 }
1329
1330 return platform_driver_register(&qcom_smd_driver);
1331}
1332postcore_initcall(qcom_smd_init);
1333
1334static void __exit qcom_smd_exit(void)
1335{
1336 platform_driver_unregister(&qcom_smd_driver);
1337 bus_unregister(&qcom_smd_bus);
1338}
1339module_exit(qcom_smd_exit);
1340
1341MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1342MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
1343MODULE_LICENSE("GPL v2");