blob: 384347d7dd428044c1ba6471f699690275a97ae5 [file] [log] [blame]
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Chris Lewfa6135e2016-08-01 13:29:46 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/debugfs.h>
13#include <linux/err.h>
14#include <linux/fs.h>
15#include <linux/gfp.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/ipc_logging.h>
19#include <linux/irq.h>
20#include <linux/kernel.h>
21#include <linux/kthread.h>
22#include <linux/list.h>
23#include <linux/module.h>
24#include <linux/mutex.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/platform_device.h>
28#include <linux/printk.h>
29#include <linux/sched.h>
30#include <linux/seq_file.h>
31#include <linux/sizes.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/srcu.h>
35#include <linux/wait.h>
36#include <soc/qcom/smem.h>
37#include <soc/qcom/tracer_pkt.h>
38#include "glink_core_if.h"
39#include "glink_private.h"
40#include "glink_xprt_if.h"
41
42#define XPRT_NAME "smem"
43#define FIFO_FULL_RESERVE 8
44#define FIFO_ALIGNMENT 8
45#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
46#define SMEM_CH_DESC_SIZE 32
47#define RPM_TOC_ID 0x67727430
48#define RPM_TX_FIFO_ID 0x61703272
49#define RPM_RX_FIFO_ID 0x72326170
50#define RPM_TOC_SIZE 256
51#define RPM_MAX_TOC_ENTRIES 20
52#define RPM_FIFO_ADDR_ALIGN_BYTES 3
53#define TRACER_PKT_FEATURE BIT(2)
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -070054#define DEFERRED_CMDS_THRESHOLD 25
Chris Lewfa6135e2016-08-01 13:29:46 -070055/**
56 * enum command_types - definition of the types of commands sent/received
57 * @VERSION_CMD: Version and feature set supported
58 * @VERSION_ACK_CMD: Response for @VERSION_CMD
59 * @OPEN_CMD: Open a channel
60 * @CLOSE_CMD: Close a channel
61 * @OPEN_ACK_CMD: Response to @OPEN_CMD
62 * @RX_INTENT_CMD: RX intent for a channel was queued
63 * @RX_DONE_CMD: Use of RX intent for a channel is complete
64 * @RX_INTENT_REQ_CMD: Request to have RX intent queued
65 * @RX_INTENT_REQ_ACK_CMD: Response for @RX_INTENT_REQ_CMD
66 * @TX_DATA_CMD: Start of a data transfer
67 * @ZERO_COPY_TX_DATA_CMD: Start of a data transfer with zero copy
68 * @CLOSE_ACK_CMD: Response for @CLOSE_CMD
69 * @TX_DATA_CONT_CMD: Continuation or end of a data transfer
70 * @READ_NOTIF_CMD: Request for a notification when this cmd is read
71 * @RX_DONE_W_REUSE_CMD: Same as @RX_DONE but also reuse the used intent
72 * @SIGNALS_CMD: Sideband signals
73 * @TRACER_PKT_CMD: Start of a Tracer Packet Command
74 * @TRACER_PKT_CONT_CMD: Continuation or end of a Tracer Packet Command
75 */
76enum command_types {
77 VERSION_CMD,
78 VERSION_ACK_CMD,
79 OPEN_CMD,
80 CLOSE_CMD,
81 OPEN_ACK_CMD,
82 RX_INTENT_CMD,
83 RX_DONE_CMD,
84 RX_INTENT_REQ_CMD,
85 RX_INTENT_REQ_ACK_CMD,
86 TX_DATA_CMD,
87 ZERO_COPY_TX_DATA_CMD,
88 CLOSE_ACK_CMD,
89 TX_DATA_CONT_CMD,
90 READ_NOTIF_CMD,
91 RX_DONE_W_REUSE_CMD,
92 SIGNALS_CMD,
93 TRACER_PKT_CMD,
94 TRACER_PKT_CONT_CMD,
95};
96
97/**
98 * struct channel_desc - description of a channel fifo with a remote entity
99 * @read_index: The read index for the fifo where data should be
100 * consumed from.
101 * @write_index: The write index for the fifo where data should produced
102 * to.
103 *
104 * This structure resides in SMEM and contains the control information for the
105 * fifo data pipes of the channel. There is one physical channel between us
106 * and a remote entity.
107 */
108struct channel_desc {
109 uint32_t read_index;
110 uint32_t write_index;
111};
112
113/**
114 * struct mailbox_config_info - description of a mailbox tranposrt channel
115 * @tx_read_index: Offset into the tx fifo where data should be read from.
116 * @tx_write_index: Offset into the tx fifo where new data will be placed.
117 * @tx_size: Size of the transmit fifo in bytes.
118 * @rx_read_index: Offset into the rx fifo where data should be read from.
119 * @rx_write_index: Offset into the rx fifo where new data will be placed.
120 * @rx_size: Size of the receive fifo in bytes.
121 * @fifo: The fifos for the channel.
122 */
123struct mailbox_config_info {
124 uint32_t tx_read_index;
125 uint32_t tx_write_index;
126 uint32_t tx_size;
127 uint32_t rx_read_index;
128 uint32_t rx_write_index;
129 uint32_t rx_size;
130 char fifo[]; /* tx fifo, then rx fifo */
131};
132
133/**
134 * struct edge_info - local information for managing a single complete edge
135 * @xprt_if: The transport interface registered with the
136 * glink core associated with this edge.
137 * @xprt_cfg: The transport configuration for the glink core
138 * assocaited with this edge.
139 * @intentless: True if this edge runs in intentless mode.
140 * @irq_disabled: Flag indicating the whether interrupt is enabled
141 * or disabled.
142 * @remote_proc_id: The SMEM processor id for the remote side.
143 * @rx_reset_reg: Reference to the register to reset the rx irq
144 * line, if applicable.
145 * @out_irq_reg: Reference to the register to send an irq to the
146 * remote side.
147 * @out_irq_mask: Mask written to @out_irq_reg to trigger the
148 * correct irq.
149 * @irq_line: The incoming interrupt line.
150 * @tx_irq_count: Number of interrupts triggered.
151 * @rx_irq_count: Number of interrupts received.
152 * @tx_ch_desc: Reference to the channel description structure
153 * for tx in SMEM for this edge.
154 * @rx_ch_desc: Reference to the channel description structure
155 * for rx in SMEM for this edge.
156 * @tx_fifo: Reference to the transmit fifo in SMEM.
157 * @rx_fifo: Reference to the receive fifo in SMEM.
158 * @tx_fifo_size: Total size of @tx_fifo.
159 * @rx_fifo_size: Total size of @rx_fifo.
160 * @read_from_fifo: Memcpy for this edge.
161 * @write_to_fifo: Memcpy for this edge.
162 * @write_lock: Lock to serialize access to @tx_fifo.
163 * @tx_blocked_queue: Queue of entities waiting for the remote side to
164 * signal @tx_fifo has flushed and is now empty.
165 * @tx_resume_needed: A tx resume signal needs to be sent to the glink
166 * core once the remote side indicates @tx_fifo has
167 * flushed.
168 * @tx_blocked_signal_sent: Flag to indicate the flush signal has already
169 * been sent, and a response is pending from the
170 * remote side. Protected by @write_lock.
171 * @kwork: Work to be executed when an irq is received.
172 * @kworker: Handle to the entity processing of
173 deferred commands.
174 * @tasklet Handle to tasklet to process incoming data
175 packets in atomic manner.
176 * @task: Handle to the task context used to run @kworker.
177 * @use_ref: Active uses of this transport use this to grab
178 * a reference. Used for ssr synchronization.
179 * @in_ssr: Signals if this transport is in ssr.
180 * @rx_lock: Used to serialize concurrent instances of rx
181 * processing.
182 * @deferred_cmds: List of deferred commands that need to be
183 * processed in process context.
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700184 * @deferred_cmds_cnt: Number of deferred commands in queue.
Chris Lewa9a78ae2017-05-11 16:47:37 -0700185 * @rt_vote_lock: Serialize access to RT rx votes
186 * @rt_votes: Vote count for RT rx thread priority
Chris Lewfa6135e2016-08-01 13:29:46 -0700187 * @num_pw_states: Size of @ramp_time_us.
188 * @ramp_time_us: Array of ramp times in microseconds where array
189 * index position represents a power state.
190 * @mailbox: Mailbox transport channel description reference.
191 */
192struct edge_info {
193 struct glink_transport_if xprt_if;
194 struct glink_core_transport_cfg xprt_cfg;
195 bool intentless;
196 bool irq_disabled;
197 uint32_t remote_proc_id;
198 void __iomem *rx_reset_reg;
199 void __iomem *out_irq_reg;
200 uint32_t out_irq_mask;
201 uint32_t irq_line;
202 uint32_t tx_irq_count;
203 uint32_t rx_irq_count;
204 struct channel_desc *tx_ch_desc;
205 struct channel_desc *rx_ch_desc;
206 void __iomem *tx_fifo;
207 void __iomem *rx_fifo;
208 uint32_t tx_fifo_size;
209 uint32_t rx_fifo_size;
210 void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes);
211 void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes);
212 spinlock_t write_lock;
213 wait_queue_head_t tx_blocked_queue;
214 bool tx_resume_needed;
215 bool tx_blocked_signal_sent;
216 struct kthread_work kwork;
217 struct kthread_worker kworker;
Dhoat Harpale9d73372017-03-10 21:23:03 +0530218 struct work_struct wakeup_work;
Chris Lewfa6135e2016-08-01 13:29:46 -0700219 struct task_struct *task;
220 struct tasklet_struct tasklet;
221 struct srcu_struct use_ref;
222 bool in_ssr;
223 spinlock_t rx_lock;
224 struct list_head deferred_cmds;
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700225 uint32_t deferred_cmds_cnt;
Chris Lewa9a78ae2017-05-11 16:47:37 -0700226 spinlock_t rt_vote_lock;
227 uint32_t rt_votes;
Chris Lewfa6135e2016-08-01 13:29:46 -0700228 uint32_t num_pw_states;
229 unsigned long *ramp_time_us;
230 struct mailbox_config_info *mailbox;
231};
232
233/**
234 * struct deferred_cmd - description of a command to be processed later
235 * @list_node: Used to put this command on a list in the edge.
236 * @id: ID of the command.
237 * @param1: Parameter one of the command.
238 * @param2: Parameter two of the command.
239 * @data: Extra data associated with the command, if applicable.
240 *
241 * This structure stores the relevant information of a command that was removed
242 * from the fifo but needs to be processed at a later time.
243 */
244struct deferred_cmd {
245 struct list_head list_node;
246 uint16_t id;
247 uint16_t param1;
248 uint32_t param2;
249 void *data;
250};
251
252static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
253 const struct glink_core_version *version,
254 uint32_t features);
255static void register_debugfs_info(struct edge_info *einfo);
256
257static struct edge_info *edge_infos[NUM_SMEM_SUBSYSTEMS];
258static DEFINE_MUTEX(probe_lock);
259static struct glink_core_version versions[] = {
260 {1, TRACER_PKT_FEATURE, negotiate_features_v1},
261};
262
263/**
264 * send_irq() - send an irq to a remote entity as an event signal
265 * @einfo: Which remote entity that should receive the irq.
266 */
267static void send_irq(struct edge_info *einfo)
268{
269 /*
270 * Any data associated with this event must be visable to the remote
271 * before the interrupt is triggered
272 */
273 wmb();
274 writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
Karthikeyan Ramasubramaniandfc5d4a2016-10-14 08:42:30 -0600275 if (einfo->remote_proc_id != SMEM_SPSS)
276 writel_relaxed(0, einfo->out_irq_reg);
Chris Lewfa6135e2016-08-01 13:29:46 -0700277 einfo->tx_irq_count++;
278}
279
280/**
281 * read_from_fifo() - memcpy from fifo memory
282 * @dest: Destination address.
283 * @src: Source address.
284 * @num_bytes: Number of bytes to copy.
285 *
286 * Return: Destination address.
287 */
288static void *read_from_fifo(void *dest, const void *src, size_t num_bytes)
289{
290 memcpy_fromio(dest, src, num_bytes);
291 return dest;
292}
293
294/**
295 * write_to_fifo() - memcpy to fifo memory
296 * @dest: Destination address.
297 * @src: Source address.
298 * @num_bytes: Number of bytes to copy.
299 *
300 * Return: Destination address.
301 */
302static void *write_to_fifo(void *dest, const void *src, size_t num_bytes)
303{
304 memcpy_toio(dest, src, num_bytes);
305 return dest;
306}
307
308/**
309 * memcpy32_toio() - memcpy to word access only memory
310 * @dest: Destination address.
311 * @src: Source address.
312 * @num_bytes: Number of bytes to copy.
313 *
314 * Return: Destination address.
315 */
316static void *memcpy32_toio(void *dest, const void *src, size_t num_bytes)
317{
318 uint32_t *dest_local = (uint32_t *)dest;
319 uint32_t *src_local = (uint32_t *)src;
320
321 if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
322 return ERR_PTR(-EINVAL);
323 if (WARN_ON(!dest_local ||
324 ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
325 return ERR_PTR(-EINVAL);
326 if (WARN_ON(!src_local ||
327 ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
328 return ERR_PTR(-EINVAL);
329 num_bytes /= sizeof(uint32_t);
330
331 while (num_bytes--)
332 __raw_writel_no_log(*src_local++, dest_local++);
333
334 return dest;
335}
336
337/**
338 * memcpy32_fromio() - memcpy from word access only memory
339 * @dest: Destination address.
340 * @src: Source address.
341 * @num_bytes: Number of bytes to copy.
342 *
343 * Return: Destination address.
344 */
345static void *memcpy32_fromio(void *dest, const void *src, size_t num_bytes)
346{
347 uint32_t *dest_local = (uint32_t *)dest;
348 uint32_t *src_local = (uint32_t *)src;
349
350 if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
351 return ERR_PTR(-EINVAL);
352 if (WARN_ON(!dest_local ||
353 ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
354 return ERR_PTR(-EINVAL);
355 if (WARN_ON(!src_local ||
356 ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
357 return ERR_PTR(-EINVAL);
358 num_bytes /= sizeof(uint32_t);
359
360 while (num_bytes--)
361 *dest_local++ = __raw_readl_no_log(src_local++);
362
363 return dest;
364}
365
366/**
367 * fifo_read_avail() - how many bytes are available to be read from an edge
368 * @einfo: The concerned edge to query.
369 *
370 * Return: The number of bytes available to be read from edge.
371 */
372static uint32_t fifo_read_avail(struct edge_info *einfo)
373{
374 uint32_t read_index = einfo->rx_ch_desc->read_index;
375 uint32_t write_index = einfo->rx_ch_desc->write_index;
376 uint32_t fifo_size = einfo->rx_fifo_size;
377 uint32_t bytes_avail;
378
379 bytes_avail = write_index - read_index;
380 if (write_index < read_index)
381 /*
382 * Case: W < R - Write has wrapped
383 * --------------------------------
384 * In this case, the write operation has wrapped past the end
385 * of the FIFO which means that now calculating the amount of
386 * data in the FIFO results in a negative number. This can be
387 * easily fixed by adding the fifo_size to the value. Even
388 * though the values are unsigned, subtraction is always done
389 * using 2's complement which means that the result will still
390 * be correct once the FIFO size has been added to the negative
391 * result.
392 *
393 * Example:
394 * '-' = data in fifo
395 * '.' = empty
396 *
397 * 0 1
398 * 0123456789012345
399 * |-----w.....r----|
400 * 0 N
401 *
402 * write = 5 = 101b
403 * read = 11 = 1011b
404 * Data in FIFO
405 * (write - read) + fifo_size = (101b - 1011b) + 10000b
406 * = 11111010b + 10000b = 1010b = 10
407 */
408 bytes_avail += fifo_size;
409
410 return bytes_avail;
411}
412
413/**
414 * fifo_write_avail() - how many bytes can be written to the edge
415 * @einfo: The concerned edge to query.
416 *
417 * Calculates the number of bytes that can be transmitted at this time.
418 * Automatically reserves some space to maintain alignment when the fifo is
419 * completely full, and reserves space so that the flush command can always be
420 * transmitted when needed.
421 *
422 * Return: The number of bytes available to be read from edge.
423 */
424static uint32_t fifo_write_avail(struct edge_info *einfo)
425{
426 uint32_t read_index = einfo->tx_ch_desc->read_index;
427 uint32_t write_index = einfo->tx_ch_desc->write_index;
428 uint32_t fifo_size = einfo->tx_fifo_size;
429 uint32_t bytes_avail = read_index - write_index;
430
431 if (read_index <= write_index)
432 bytes_avail += fifo_size;
433 if (bytes_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
434 bytes_avail = 0;
435 else
436 bytes_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
437
438 return bytes_avail;
439}
440
441/**
442 * fifo_read() - read data from an edge
443 * @einfo: The concerned edge to read from.
444 * @_data: Buffer to copy the read data into.
445 * @len: The ammount of data to read in bytes.
446 *
447 * Return: The number of bytes read.
448 */
449static int fifo_read(struct edge_info *einfo, void *_data, int len)
450{
451 void *ptr;
452 void *ret;
453 void *data = _data;
454 int orig_len = len;
455 uint32_t read_index = einfo->rx_ch_desc->read_index;
456 uint32_t write_index = einfo->rx_ch_desc->write_index;
457 uint32_t fifo_size = einfo->rx_fifo_size;
458 uint32_t n;
459
460 while (len) {
461 ptr = einfo->rx_fifo + read_index;
462 if (read_index <= write_index)
463 n = write_index - read_index;
464 else
465 n = fifo_size - read_index;
466
467 if (n == 0)
468 break;
469 if (n > len)
470 n = len;
471
472 ret = einfo->read_from_fifo(data, ptr, n);
473 if (IS_ERR(ret))
474 return PTR_ERR(ret);
475
476 data += n;
477 len -= n;
478 read_index += n;
479 if (read_index >= fifo_size)
480 read_index -= fifo_size;
481 }
482 einfo->rx_ch_desc->read_index = read_index;
483
484 return orig_len - len;
485}
486
487/**
488 * fifo_write_body() - Copy transmit data into an edge
489 * @einfo: The concerned edge to copy into.
490 * @_data: Buffer of data to copy from.
491 * @len: Size of data to copy in bytes.
492 * @write_index: Index into the channel where the data should be copied.
493 *
494 * Return: Number of bytes remaining to be copied into the edge.
495 */
496static int fifo_write_body(struct edge_info *einfo, const void *_data,
497 int len, uint32_t *write_index)
498{
499 void *ptr;
500 void *ret;
501 const void *data = _data;
502 uint32_t read_index = einfo->tx_ch_desc->read_index;
503 uint32_t fifo_size = einfo->tx_fifo_size;
504 uint32_t n;
505
506 while (len) {
507 ptr = einfo->tx_fifo + *write_index;
508 if (*write_index < read_index) {
509 n = read_index - *write_index - FIFO_FULL_RESERVE;
510 } else {
511 if (read_index < FIFO_FULL_RESERVE)
512 n = fifo_size + read_index - *write_index -
513 FIFO_FULL_RESERVE;
514 else
515 n = fifo_size - *write_index;
516 }
517
518 if (n == 0)
519 break;
520 if (n > len)
521 n = len;
522
523 ret = einfo->write_to_fifo(ptr, data, n);
524 if (IS_ERR(ret))
525 return PTR_ERR(ret);
526
527 data += n;
528 len -= n;
529 *write_index += n;
530 if (*write_index >= fifo_size)
531 *write_index -= fifo_size;
532 }
533 return len;
534}
535
536/**
537 * fifo_write() - Write data into an edge
538 * @einfo: The concerned edge to write to.
539 * @data: Buffer of data to write.
540 * @len: Length of data to write, in bytes.
541 *
542 * Wrapper around fifo_write_body() to manage additional details that are
543 * necessary for a complete write event. Does not manage concurrency. Clients
544 * should use fifo_write_avail() to check if there is sufficent space before
545 * calling fifo_write().
546 *
547 * Return: Number of bytes written to the edge.
548 */
549static int fifo_write(struct edge_info *einfo, const void *data, int len)
550{
551 int orig_len = len;
552 uint32_t write_index = einfo->tx_ch_desc->write_index;
553
554 len = fifo_write_body(einfo, data, len, &write_index);
555 if (unlikely(len < 0))
556 return len;
557 einfo->tx_ch_desc->write_index = write_index;
558 send_irq(einfo);
559
560 return orig_len - len;
561}
562
563/**
564 * fifo_write_complex() - writes a transaction of multiple buffers to an edge
565 * @einfo: The concerned edge to write to.
566 * @data1: The first buffer of data to write.
567 * @len1: The length of the first buffer in bytes.
568 * @data2: The second buffer of data to write.
569 * @len2: The length of the second buffer in bytes.
570 * @data3: The thirs buffer of data to write.
571 * @len3: The length of the third buffer in bytes.
572 *
573 * A variant of fifo_write() which optimizes the usecase found in tx(). The
574 * remote side expects all or none of the transmitted data to be available.
575 * This prevents the tx() usecase from calling fifo_write() multiple times. The
576 * alternative would be an allocation and additional memcpy to create a buffer
577 * to copy all the data segments into one location before calling fifo_write().
578 *
579 * Return: Number of bytes written to the edge.
580 */
581static int fifo_write_complex(struct edge_info *einfo,
582 const void *data1, int len1,
583 const void *data2, int len2,
584 const void *data3, int len3)
585{
586 int orig_len = len1 + len2 + len3;
587 uint32_t write_index = einfo->tx_ch_desc->write_index;
588
589 len1 = fifo_write_body(einfo, data1, len1, &write_index);
590 if (unlikely(len1 < 0))
591 return len1;
592 len2 = fifo_write_body(einfo, data2, len2, &write_index);
593 if (unlikely(len2 < 0))
594 return len2;
595 len3 = fifo_write_body(einfo, data3, len3, &write_index);
596 if (unlikely(len3 < 0))
597 return len3;
598
599 einfo->tx_ch_desc->write_index = write_index;
600 send_irq(einfo);
601
602 return orig_len - len1 - len2 - len3;
603}
604
605/**
606 * send_tx_blocked_signal() - send the flush command as we are blocked from tx
607 * @einfo: The concerned edge which is blocked.
608 *
609 * Used to send a signal to the remote side that we have no more space to
610 * transmit data and therefore need the remote side to signal us when they have
611 * cleared some space by reading some data. This function relies upon the
612 * assumption that fifo_write_avail() will reserve some space so that the flush
613 * signal command can always be put into the transmit fifo, even when "everyone"
614 * else thinks that the transmit fifo is truely full. This function assumes
615 * that it is called with the write_lock already locked.
616 */
617static void send_tx_blocked_signal(struct edge_info *einfo)
618{
619 struct read_notif_request {
620 uint16_t cmd;
621 uint16_t reserved;
622 uint32_t reserved2;
623 };
624 struct read_notif_request read_notif_req;
625
626 read_notif_req.cmd = READ_NOTIF_CMD;
627 read_notif_req.reserved = 0;
628 read_notif_req.reserved2 = 0;
629
630 if (!einfo->tx_blocked_signal_sent) {
631 einfo->tx_blocked_signal_sent = true;
632 fifo_write(einfo, &read_notif_req, sizeof(read_notif_req));
633 }
634}
635
636/**
637 * fifo_tx() - transmit data on an edge
638 * @einfo: The concerned edge to transmit on.
639 * @data: Buffer of data to transmit.
640 * @len: Length of data to transmit in bytes.
641 *
642 * This helper function is the preferred interface to fifo_write() and should
643 * be used in the normal case for transmitting entities. fifo_tx() will block
644 * until there is sufficent room to transmit the requested ammount of data.
645 * fifo_tx() will manage any concurrency between multiple transmitters on a
646 * channel.
647 *
648 * Return: Number of bytes transmitted.
649 */
650static int fifo_tx(struct edge_info *einfo, const void *data, int len)
651{
652 unsigned long flags;
653 int ret;
654
655 DEFINE_WAIT(wait);
656
657 spin_lock_irqsave(&einfo->write_lock, flags);
658 while (fifo_write_avail(einfo) < len) {
659 send_tx_blocked_signal(einfo);
660 prepare_to_wait(&einfo->tx_blocked_queue, &wait,
661 TASK_UNINTERRUPTIBLE);
662 if (fifo_write_avail(einfo) < len && !einfo->in_ssr) {
663 spin_unlock_irqrestore(&einfo->write_lock, flags);
664 schedule();
665 spin_lock_irqsave(&einfo->write_lock, flags);
666 }
667 finish_wait(&einfo->tx_blocked_queue, &wait);
668 if (einfo->in_ssr) {
669 spin_unlock_irqrestore(&einfo->write_lock, flags);
670 return -EFAULT;
671 }
672 }
673 ret = fifo_write(einfo, data, len);
674 spin_unlock_irqrestore(&einfo->write_lock, flags);
675
676 return ret;
677}
678
679/**
680 * process_rx_data() - process received data from an edge
681 * @einfo: The edge the data was received on.
682 * @cmd_id: ID to specify the type of data.
683 * @rcid: The remote channel id associated with the data.
684 * @intend_id: The intent the data should be put in.
685 */
686static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
687 uint32_t rcid, uint32_t intent_id)
688{
689 struct command {
690 uint32_t frag_size;
691 uint32_t size_remaining;
692 };
693 struct command cmd;
694 struct glink_core_rx_intent *intent;
695 char trash[FIFO_ALIGNMENT];
696 int alignment;
697 bool err = false;
698
699 fifo_read(einfo, &cmd, sizeof(cmd));
700
701 intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
702 &einfo->xprt_if, rcid, intent_id);
703 if (intent == NULL) {
704 GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
705 intent_id);
706 err = true;
707 } else if (intent->data == NULL) {
708 if (einfo->intentless) {
Chris Lew7cff2be2017-04-12 15:11:08 -0700709 intent->data = kmalloc(cmd.frag_size,
710 __GFP_ATOMIC | __GFP_HIGH);
Chris Lewa3c44d22017-01-17 14:50:24 -0800711 if (!intent->data) {
Chris Lewfa6135e2016-08-01 13:29:46 -0700712 err = true;
Chris Lewa3c44d22017-01-17 14:50:24 -0800713 GLINK_ERR(
714 "%s: atomic alloc fail ch %d liid %d size %d\n",
715 __func__, rcid, intent_id,
716 cmd.frag_size);
717 } else {
Chris Lewfa6135e2016-08-01 13:29:46 -0700718 intent->intent_size = cmd.frag_size;
Chris Lewa3c44d22017-01-17 14:50:24 -0800719 }
Chris Lewfa6135e2016-08-01 13:29:46 -0700720 } else {
721 GLINK_ERR(
722 "%s: intent for ch %d liid %d has no data buff\n",
723 __func__, rcid, intent_id);
724 err = true;
725 }
726 }
727
728 if (!err &&
729 (intent->intent_size - intent->write_offset < cmd.frag_size ||
730 intent->write_offset + cmd.size_remaining > intent->intent_size)) {
731 GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
732 __func__,
733 cmd.frag_size,
734 cmd.size_remaining,
735 "will overflow ch",
736 rcid,
737 "intent",
738 intent_id);
739 err = true;
740 }
741
742 if (err) {
743 alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
744 alignment -= cmd.frag_size;
745 while (cmd.frag_size) {
746 if (cmd.frag_size > FIFO_ALIGNMENT) {
747 fifo_read(einfo, trash, FIFO_ALIGNMENT);
748 cmd.frag_size -= FIFO_ALIGNMENT;
749 } else {
750 fifo_read(einfo, trash, cmd.frag_size);
751 cmd.frag_size = 0;
752 }
753 }
754 if (alignment)
755 fifo_read(einfo, trash, alignment);
756 return;
757 }
758 fifo_read(einfo, intent->data + intent->write_offset, cmd.frag_size);
759 intent->write_offset += cmd.frag_size;
760 intent->pkt_size += cmd.frag_size;
761
762 alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
763 alignment -= cmd.frag_size;
764 if (alignment)
765 fifo_read(einfo, trash, alignment);
766
767 if (unlikely((cmd_id == TRACER_PKT_CMD ||
768 cmd_id == TRACER_PKT_CONT_CMD) && !cmd.size_remaining)) {
769 tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
770 intent->tracer_pkt = true;
771 }
772
773 einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
774 rcid,
775 intent,
776 cmd.size_remaining ?
777 false : true);
778}
779
780/**
781 * queue_cmd() - queue a deferred command for later processing
782 * @einfo: Edge to queue commands on.
783 * @cmd: Command to queue.
784 * @data: Command specific data to queue with the command.
785 *
786 * Return: True if queuing was successful, false otherwise.
787 */
788static bool queue_cmd(struct edge_info *einfo, void *cmd, void *data)
789{
790 struct command {
791 uint16_t id;
792 uint16_t param1;
793 uint32_t param2;
794 };
795 struct command *_cmd = cmd;
796 struct deferred_cmd *d_cmd;
797
798 d_cmd = kmalloc(sizeof(*d_cmd), GFP_ATOMIC);
799 if (!d_cmd) {
800 GLINK_ERR("%s: Discarding cmd %d\n", __func__, _cmd->id);
801 return false;
802 }
803 d_cmd->id = _cmd->id;
804 d_cmd->param1 = _cmd->param1;
805 d_cmd->param2 = _cmd->param2;
806 d_cmd->data = data;
807 list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds);
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700808 einfo->deferred_cmds_cnt++;
Kyle Yan65be4a52016-10-31 15:05:00 -0700809 kthread_queue_work(&einfo->kworker, &einfo->kwork);
Chris Lewfa6135e2016-08-01 13:29:46 -0700810 return true;
811}
812
813/**
814 * get_rx_fifo() - Find the rx fifo for an edge
815 * @einfo: Edge to find the fifo for.
816 *
817 * Return: True if fifo was found, false otherwise.
818 */
819static bool get_rx_fifo(struct edge_info *einfo)
820{
821 if (einfo->mailbox) {
822 einfo->rx_fifo = &einfo->mailbox->fifo[einfo->mailbox->tx_size];
823 einfo->rx_fifo_size = einfo->mailbox->rx_size;
824 } else {
825 einfo->rx_fifo = smem_get_entry(SMEM_GLINK_NATIVE_XPRT_FIFO_1,
826 &einfo->rx_fifo_size,
827 einfo->remote_proc_id,
828 SMEM_ITEM_CACHED_FLAG);
829 if (!einfo->rx_fifo)
Dhoat Harpal55342ec2017-04-03 17:04:11 +0530830 einfo->rx_fifo = smem_get_entry(
831 SMEM_GLINK_NATIVE_XPRT_FIFO_1,
832 &einfo->rx_fifo_size,
833 einfo->remote_proc_id,
834 0);
835 if (!einfo->rx_fifo)
Chris Lewfa6135e2016-08-01 13:29:46 -0700836 return false;
837 }
838
839 return true;
840}
841
842/**
843 * __rx_worker() - process received commands on a specific edge
844 * @einfo: Edge to process commands on.
845 * @atomic_ctx: Indicates if the caller is in atomic context and requires any
846 * non-atomic operations to be deferred.
847 */
848static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
849{
850 struct command {
851 uint16_t id;
852 uint16_t param1;
853 uint32_t param2;
854 };
855 struct intent_desc {
856 uint32_t size;
857 uint32_t id;
858 };
859 struct command cmd;
860 struct intent_desc intent;
861 struct intent_desc *intents;
862 int i;
863 bool granted;
864 unsigned long flags;
865 int rcu_id;
866 uint16_t rcid;
867 uint32_t name_len;
868 uint32_t len;
869 char *name;
870 char trash[FIFO_ALIGNMENT];
871 struct deferred_cmd *d_cmd;
872 void *cmd_data;
873
874 rcu_id = srcu_read_lock(&einfo->use_ref);
875
Arun Kumar Neelakantamef4b3f62017-05-31 18:20:34 +0530876 if (unlikely(!einfo->rx_fifo) && atomic_ctx) {
Chris Lewfa6135e2016-08-01 13:29:46 -0700877 if (!get_rx_fifo(einfo)) {
878 srcu_read_unlock(&einfo->use_ref, rcu_id);
879 return;
880 }
881 einfo->in_ssr = false;
882 einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
883 }
884
885 if (einfo->in_ssr) {
886 srcu_read_unlock(&einfo->use_ref, rcu_id);
887 return;
888 }
Chris Lewfa6135e2016-08-01 13:29:46 -0700889
Dhoat Harpale9d73372017-03-10 21:23:03 +0530890 if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
891 (waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
892 schedule_work(&einfo->wakeup_work);
Chris Lewfa6135e2016-08-01 13:29:46 -0700893
894 /*
895 * Access to the fifo needs to be synchronized, however only the calls
896 * into the core from process_rx_data() are compatible with an atomic
897 * processing context. For everything else, we need to do all the fifo
898 * processing, then unlock the lock for the call into the core. Data
899 * in the fifo is allowed to be processed immediately instead of being
900 * ordered with the commands because the channel open process prevents
901 * intents from being queued (which prevents data from being sent) until
902 * all the channel open commands are processed by the core, thus
903 * eliminating a race.
904 */
905 spin_lock_irqsave(&einfo->rx_lock, flags);
906 while (fifo_read_avail(einfo) ||
907 (!atomic_ctx && !list_empty(&einfo->deferred_cmds))) {
908 if (einfo->in_ssr)
909 break;
910
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700911 if (atomic_ctx && !einfo->intentless &&
912 einfo->deferred_cmds_cnt >= DEFERRED_CMDS_THRESHOLD)
913 break;
914
Chris Lewfa6135e2016-08-01 13:29:46 -0700915 if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) {
916 d_cmd = list_first_entry(&einfo->deferred_cmds,
917 struct deferred_cmd, list_node);
918 list_del(&d_cmd->list_node);
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700919 einfo->deferred_cmds_cnt--;
Chris Lewfa6135e2016-08-01 13:29:46 -0700920 cmd.id = d_cmd->id;
921 cmd.param1 = d_cmd->param1;
922 cmd.param2 = d_cmd->param2;
923 cmd_data = d_cmd->data;
924 kfree(d_cmd);
925 } else {
926 fifo_read(einfo, &cmd, sizeof(cmd));
927 cmd_data = NULL;
928 }
929
930 switch (cmd.id) {
931 case VERSION_CMD:
932 if (atomic_ctx) {
933 queue_cmd(einfo, &cmd, NULL);
934 break;
935 }
936 spin_unlock_irqrestore(&einfo->rx_lock, flags);
937 einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
938 &einfo->xprt_if,
939 cmd.param1,
940 cmd.param2);
941 spin_lock_irqsave(&einfo->rx_lock, flags);
942 break;
943 case VERSION_ACK_CMD:
944 if (atomic_ctx) {
945 queue_cmd(einfo, &cmd, NULL);
946 break;
947 }
948 spin_unlock_irqrestore(&einfo->rx_lock, flags);
949 einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
950 &einfo->xprt_if,
951 cmd.param1,
952 cmd.param2);
953 spin_lock_irqsave(&einfo->rx_lock, flags);
954 break;
955 case OPEN_CMD:
956 rcid = cmd.param1;
957 name_len = cmd.param2;
958
959 if (cmd_data) {
960 name = cmd_data;
961 } else {
962 len = ALIGN(name_len, FIFO_ALIGNMENT);
963 name = kmalloc(len, GFP_ATOMIC);
964 if (!name) {
965 pr_err("No memory available to rx ch open cmd name. Discarding cmd.\n");
966 while (len) {
967 fifo_read(einfo, trash,
968 FIFO_ALIGNMENT);
969 len -= FIFO_ALIGNMENT;
970 }
971 break;
972 }
973 fifo_read(einfo, name, len);
974 }
975 if (atomic_ctx) {
976 if (!queue_cmd(einfo, &cmd, name))
977 kfree(name);
978 break;
979 }
980
981 spin_unlock_irqrestore(&einfo->rx_lock, flags);
982 einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
983 &einfo->xprt_if,
984 rcid,
985 name,
986 SMEM_XPRT_ID);
987 kfree(name);
988 spin_lock_irqsave(&einfo->rx_lock, flags);
989 break;
990 case CLOSE_CMD:
991 if (atomic_ctx) {
992 queue_cmd(einfo, &cmd, NULL);
993 break;
994 }
995 spin_unlock_irqrestore(&einfo->rx_lock, flags);
996 einfo->xprt_if.glink_core_if_ptr->
997 rx_cmd_ch_remote_close(
998 &einfo->xprt_if,
999 cmd.param1);
1000 spin_lock_irqsave(&einfo->rx_lock, flags);
1001 break;
1002 case OPEN_ACK_CMD:
1003 if (atomic_ctx) {
1004 queue_cmd(einfo, &cmd, NULL);
1005 break;
1006 }
1007 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1008 einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
1009 &einfo->xprt_if,
1010 cmd.param1,
1011 SMEM_XPRT_ID);
1012 spin_lock_irqsave(&einfo->rx_lock, flags);
1013 break;
1014 case RX_INTENT_CMD:
1015 /*
1016 * One intent listed with this command. This is the
1017 * expected case and can be optimized over the general
1018 * case of an array of intents.
1019 */
1020 if (cmd.param2 == 1) {
1021 if (cmd_data) {
1022 intent.id = ((struct intent_desc *)
1023 cmd_data)->id;
1024 intent.size = ((struct intent_desc *)
1025 cmd_data)->size;
1026 kfree(cmd_data);
1027 } else {
1028 fifo_read(einfo, &intent,
1029 sizeof(intent));
1030 }
1031 if (atomic_ctx) {
1032 cmd_data = kmalloc(sizeof(intent),
1033 GFP_ATOMIC);
1034 if (!cmd_data) {
1035 GLINK_ERR(
1036 "%s: dropping cmd %d\n",
1037 __func__, cmd.id);
1038 break;
1039 }
1040 ((struct intent_desc *)cmd_data)->id =
1041 intent.id;
1042 ((struct intent_desc *)cmd_data)->size =
1043 intent.size;
1044 if (!queue_cmd(einfo, &cmd, cmd_data))
1045 kfree(cmd_data);
1046 break;
1047 }
1048 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1049 einfo->xprt_if.glink_core_if_ptr->
1050 rx_cmd_remote_rx_intent_put(
1051 &einfo->xprt_if,
1052 cmd.param1,
1053 intent.id,
1054 intent.size);
1055 spin_lock_irqsave(&einfo->rx_lock, flags);
1056 break;
1057 }
1058
1059 /* Array of intents to process */
1060 if (cmd_data) {
1061 intents = cmd_data;
1062 } else {
1063 intents = kmalloc_array(cmd.param2,
1064 sizeof(*intents), GFP_ATOMIC);
1065 if (!intents) {
1066 for (i = 0; i < cmd.param2; ++i)
1067 fifo_read(einfo, &intent,
1068 sizeof(intent));
1069 break;
1070 }
1071 fifo_read(einfo, intents,
1072 sizeof(*intents) * cmd.param2);
1073 }
1074 if (atomic_ctx) {
1075 if (!queue_cmd(einfo, &cmd, intents))
1076 kfree(intents);
1077 break;
1078 }
1079 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1080 for (i = 0; i < cmd.param2; ++i) {
1081 einfo->xprt_if.glink_core_if_ptr->
1082 rx_cmd_remote_rx_intent_put(
1083 &einfo->xprt_if,
1084 cmd.param1,
1085 intents[i].id,
1086 intents[i].size);
1087 }
1088 kfree(intents);
1089 spin_lock_irqsave(&einfo->rx_lock, flags);
1090 break;
1091 case RX_DONE_CMD:
1092 if (atomic_ctx) {
1093 queue_cmd(einfo, &cmd, NULL);
1094 break;
1095 }
1096 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1097 einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
1098 &einfo->xprt_if,
1099 cmd.param1,
1100 cmd.param2,
1101 false);
1102 spin_lock_irqsave(&einfo->rx_lock, flags);
1103 break;
1104 case RX_INTENT_REQ_CMD:
1105 if (atomic_ctx) {
1106 queue_cmd(einfo, &cmd, NULL);
1107 break;
1108 }
1109 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1110 einfo->xprt_if.glink_core_if_ptr->
1111 rx_cmd_remote_rx_intent_req(
1112 &einfo->xprt_if,
1113 cmd.param1,
1114 cmd.param2);
1115 spin_lock_irqsave(&einfo->rx_lock, flags);
1116 break;
1117 case RX_INTENT_REQ_ACK_CMD:
1118 if (atomic_ctx) {
1119 queue_cmd(einfo, &cmd, NULL);
1120 break;
1121 }
1122 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1123 granted = false;
1124 if (cmd.param2 == 1)
1125 granted = true;
1126 einfo->xprt_if.glink_core_if_ptr->
1127 rx_cmd_rx_intent_req_ack(
1128 &einfo->xprt_if,
1129 cmd.param1,
1130 granted);
1131 spin_lock_irqsave(&einfo->rx_lock, flags);
1132 break;
1133 case TX_DATA_CMD:
1134 case TX_DATA_CONT_CMD:
1135 case TRACER_PKT_CMD:
1136 case TRACER_PKT_CONT_CMD:
1137 process_rx_data(einfo, cmd.id, cmd.param1, cmd.param2);
1138 break;
1139 case CLOSE_ACK_CMD:
1140 if (atomic_ctx) {
1141 queue_cmd(einfo, &cmd, NULL);
1142 break;
1143 }
1144 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1145 einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
1146 &einfo->xprt_if,
1147 cmd.param1);
1148 spin_lock_irqsave(&einfo->rx_lock, flags);
1149 break;
1150 case READ_NOTIF_CMD:
1151 send_irq(einfo);
1152 break;
1153 case SIGNALS_CMD:
1154 if (atomic_ctx) {
1155 queue_cmd(einfo, &cmd, NULL);
1156 break;
1157 }
1158 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1159 einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
1160 &einfo->xprt_if,
1161 cmd.param1,
1162 cmd.param2);
1163 spin_lock_irqsave(&einfo->rx_lock, flags);
1164 break;
1165 case RX_DONE_W_REUSE_CMD:
1166 if (atomic_ctx) {
1167 queue_cmd(einfo, &cmd, NULL);
1168 break;
1169 }
1170 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1171 einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
1172 &einfo->xprt_if,
1173 cmd.param1,
1174 cmd.param2,
1175 true);
1176 spin_lock_irqsave(&einfo->rx_lock, flags);
1177 break;
1178 default:
1179 pr_err("Unrecognized command: %d\n", cmd.id);
1180 break;
1181 }
1182 }
1183 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1184 srcu_read_unlock(&einfo->use_ref, rcu_id);
1185}
1186
1187/**
1188 * rx_worker_atomic() - worker function to process received command in atomic
1189 * context.
1190 * @param: The param parameter passed during initialization of the tasklet.
1191 */
1192static void rx_worker_atomic(unsigned long param)
1193{
1194 struct edge_info *einfo = (struct edge_info *)param;
1195
1196 __rx_worker(einfo, true);
1197}
1198
1199/**
Dhoat Harpale9d73372017-03-10 21:23:03 +05301200 * tx_wakeup_worker() - worker function to wakeup tx blocked thread
1201 * @work: kwork associated with the edge to process commands on.
1202 */
1203static void tx_wakeup_worker(struct work_struct *work)
1204{
1205 struct edge_info *einfo;
1206 bool trigger_wakeup = false;
1207 unsigned long flags;
1208 int rcu_id;
1209
1210 einfo = container_of(work, struct edge_info, wakeup_work);
1211 rcu_id = srcu_read_lock(&einfo->use_ref);
1212 if (einfo->in_ssr) {
1213 srcu_read_unlock(&einfo->use_ref, rcu_id);
1214 return;
1215 }
1216 if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
1217 einfo->tx_resume_needed = false;
1218 einfo->xprt_if.glink_core_if_ptr->tx_resume(
1219 &einfo->xprt_if);
1220 }
1221 spin_lock_irqsave(&einfo->write_lock, flags);
1222 if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
1223 einfo->tx_blocked_signal_sent = false;
1224 trigger_wakeup = true;
1225 }
1226 spin_unlock_irqrestore(&einfo->write_lock, flags);
1227 if (trigger_wakeup)
1228 wake_up_all(&einfo->tx_blocked_queue);
1229 srcu_read_unlock(&einfo->use_ref, rcu_id);
1230}
1231
1232/**
Chris Lewfa6135e2016-08-01 13:29:46 -07001233 * rx_worker() - worker function to process received commands
1234 * @work: kwork associated with the edge to process commands on.
1235 */
1236static void rx_worker(struct kthread_work *work)
1237{
1238 struct edge_info *einfo;
1239
1240 einfo = container_of(work, struct edge_info, kwork);
1241 __rx_worker(einfo, false);
1242}
1243
1244irqreturn_t irq_handler(int irq, void *priv)
1245{
1246 struct edge_info *einfo = (struct edge_info *)priv;
1247
1248 if (einfo->rx_reset_reg)
1249 writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
1250
1251 tasklet_hi_schedule(&einfo->tasklet);
1252 einfo->rx_irq_count++;
1253
1254 return IRQ_HANDLED;
1255}
1256
1257/**
1258 * tx_cmd_version() - convert a version cmd to wire format and transmit
1259 * @if_ptr: The transport to transmit on.
1260 * @version: The version number to encode.
1261 * @features: The features information to encode.
1262 */
1263static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
1264 uint32_t features)
1265{
1266 struct command {
1267 uint16_t id;
1268 uint16_t version;
1269 uint32_t features;
1270 };
1271 struct command cmd;
1272 struct edge_info *einfo;
1273 int rcu_id;
1274
1275 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1276
1277 rcu_id = srcu_read_lock(&einfo->use_ref);
1278 if (einfo->in_ssr) {
1279 srcu_read_unlock(&einfo->use_ref, rcu_id);
1280 return;
1281 }
1282
1283 cmd.id = VERSION_CMD;
1284 cmd.version = version;
1285 cmd.features = features;
1286
1287 fifo_tx(einfo, &cmd, sizeof(cmd));
1288 srcu_read_unlock(&einfo->use_ref, rcu_id);
1289}
1290
1291/**
1292 * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
1293 * @if_ptr: The transport to transmit on.
1294 * @version: The version number to encode.
1295 * @features: The features information to encode.
1296 */
1297static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
1298 uint32_t version,
1299 uint32_t features)
1300{
1301 struct command {
1302 uint16_t id;
1303 uint16_t version;
1304 uint32_t features;
1305 };
1306 struct command cmd;
1307 struct edge_info *einfo;
1308 int rcu_id;
1309
1310 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1311
1312 rcu_id = srcu_read_lock(&einfo->use_ref);
1313 if (einfo->in_ssr) {
1314 srcu_read_unlock(&einfo->use_ref, rcu_id);
1315 return;
1316 }
1317
1318 cmd.id = VERSION_ACK_CMD;
1319 cmd.version = version;
1320 cmd.features = features;
1321
1322 fifo_tx(einfo, &cmd, sizeof(cmd));
1323 srcu_read_unlock(&einfo->use_ref, rcu_id);
1324}
1325
1326/**
1327 * set_version() - activate a negotiated version and feature set
1328 * @if_ptr: The transport to configure.
1329 * @version: The version to use.
1330 * @features: The features to use.
1331 *
1332 * Return: The supported capabilities of the transport.
1333 */
1334static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
1335 uint32_t features)
1336{
1337 struct edge_info *einfo;
1338 uint32_t ret;
1339 int rcu_id;
1340
1341 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1342
1343 rcu_id = srcu_read_lock(&einfo->use_ref);
1344 if (einfo->in_ssr) {
1345 srcu_read_unlock(&einfo->use_ref, rcu_id);
1346 return 0;
1347 }
1348
1349 ret = einfo->intentless ?
1350 GCAP_INTENTLESS | GCAP_SIGNALS : GCAP_SIGNALS;
1351
1352 if (features & TRACER_PKT_FEATURE)
1353 ret |= GCAP_TRACER_PKT;
1354
1355 srcu_read_unlock(&einfo->use_ref, rcu_id);
1356 return ret;
1357}
1358
1359/**
1360 * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
1361 * @if_ptr: The transport to transmit on.
1362 * @lcid: The local channel id to encode.
1363 * @name: The channel name to encode.
1364 * @req_xprt: The transport the core would like to migrate this channel to.
1365 *
1366 * Return: 0 on success or standard Linux error code.
1367 */
1368static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
1369 const char *name, uint16_t req_xprt)
1370{
1371 struct command {
1372 uint16_t id;
1373 uint16_t lcid;
1374 uint32_t length;
1375 };
1376 struct command cmd;
1377 struct edge_info *einfo;
1378 uint32_t buf_size;
1379 void *buf;
1380 int rcu_id;
1381
1382 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1383
1384 rcu_id = srcu_read_lock(&einfo->use_ref);
1385 if (einfo->in_ssr) {
1386 srcu_read_unlock(&einfo->use_ref, rcu_id);
1387 return -EFAULT;
1388 }
1389
1390 cmd.id = OPEN_CMD;
1391 cmd.lcid = lcid;
1392 cmd.length = strlen(name) + 1;
1393
1394 buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
1395
1396 buf = kzalloc(buf_size, GFP_KERNEL);
1397 if (!buf) {
1398 GLINK_ERR("%s: malloc fail for %d size buf\n",
1399 __func__, buf_size);
1400 srcu_read_unlock(&einfo->use_ref, rcu_id);
1401 return -ENOMEM;
1402 }
1403
1404 memcpy(buf, &cmd, sizeof(cmd));
1405 memcpy(buf + sizeof(cmd), name, cmd.length);
1406
1407 fifo_tx(einfo, buf, buf_size);
1408
1409 kfree(buf);
1410
1411 srcu_read_unlock(&einfo->use_ref, rcu_id);
1412 return 0;
1413}
1414
1415/**
1416 * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
1417 * @if_ptr: The transport to transmit on.
1418 * @lcid: The local channel id to encode.
1419 *
1420 * Return: 0 on success or standard Linux error code.
1421 */
1422static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
1423{
1424 struct command {
1425 uint16_t id;
1426 uint16_t lcid;
1427 uint32_t reserved;
1428 };
1429 struct command cmd;
1430 struct edge_info *einfo;
1431 int rcu_id;
1432
1433 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1434
1435 rcu_id = srcu_read_lock(&einfo->use_ref);
1436 if (einfo->in_ssr) {
1437 srcu_read_unlock(&einfo->use_ref, rcu_id);
1438 return -EFAULT;
1439 }
1440
1441 cmd.id = CLOSE_CMD;
1442 cmd.lcid = lcid;
1443 cmd.reserved = 0;
1444
1445 fifo_tx(einfo, &cmd, sizeof(cmd));
1446
1447 srcu_read_unlock(&einfo->use_ref, rcu_id);
1448 return 0;
1449}
1450
1451/**
1452 * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
1453 * and transmit
1454 * @if_ptr: The transport to transmit on.
1455 * @rcid: The remote channel id to encode.
1456 * @xprt_resp: The response to a transport migration request.
1457 */
1458static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
1459 uint32_t rcid, uint16_t xprt_resp)
1460{
1461 struct command {
1462 uint16_t id;
1463 uint16_t rcid;
1464 uint32_t reserved;
1465 };
1466 struct command cmd;
1467 struct edge_info *einfo;
1468 int rcu_id;
1469
1470 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1471
1472 rcu_id = srcu_read_lock(&einfo->use_ref);
1473 if (einfo->in_ssr) {
1474 srcu_read_unlock(&einfo->use_ref, rcu_id);
1475 return;
1476 }
1477
1478 cmd.id = OPEN_ACK_CMD;
1479 cmd.rcid = rcid;
1480 cmd.reserved = 0;
1481
1482 fifo_tx(einfo, &cmd, sizeof(cmd));
1483 srcu_read_unlock(&einfo->use_ref, rcu_id);
1484}
1485
1486/**
1487 * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
1488 * and transmit
1489 * @if_ptr: The transport to transmit on.
1490 * @rcid: The remote channel id to encode.
1491 */
1492static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
1493 uint32_t rcid)
1494{
1495 struct command {
1496 uint16_t id;
1497 uint16_t rcid;
1498 uint32_t reserved;
1499 };
1500 struct command cmd;
1501 struct edge_info *einfo;
1502 int rcu_id;
1503
1504 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1505
1506 rcu_id = srcu_read_lock(&einfo->use_ref);
1507 if (einfo->in_ssr) {
1508 srcu_read_unlock(&einfo->use_ref, rcu_id);
1509 return;
1510 }
1511
1512 cmd.id = CLOSE_ACK_CMD;
1513 cmd.rcid = rcid;
1514 cmd.reserved = 0;
1515
1516 fifo_tx(einfo, &cmd, sizeof(cmd));
1517 srcu_read_unlock(&einfo->use_ref, rcu_id);
1518}
1519
1520/**
1521 * ssr() - process a subsystem restart notification of a transport
1522 * @if_ptr: The transport to restart
1523 *
1524 * Return: 0 on success or standard Linux error code.
1525 */
1526static int ssr(struct glink_transport_if *if_ptr)
1527{
1528 struct edge_info *einfo;
1529 struct deferred_cmd *cmd;
1530
1531 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1532
1533 BUG_ON(einfo->remote_proc_id == SMEM_RPM);
1534
1535 einfo->in_ssr = true;
1536 wake_up_all(&einfo->tx_blocked_queue);
1537
1538 synchronize_srcu(&einfo->use_ref);
1539
1540 while (!list_empty(&einfo->deferred_cmds)) {
1541 cmd = list_first_entry(&einfo->deferred_cmds,
1542 struct deferred_cmd, list_node);
1543 list_del(&cmd->list_node);
1544 kfree(cmd->data);
1545 kfree(cmd);
1546 }
1547
1548 einfo->tx_resume_needed = false;
1549 einfo->tx_blocked_signal_sent = false;
1550 einfo->rx_fifo = NULL;
1551 einfo->rx_fifo_size = 0;
1552 einfo->tx_ch_desc->write_index = 0;
1553 einfo->rx_ch_desc->read_index = 0;
1554 einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
1555
1556 return 0;
1557}
1558
1559/**
1560 * int wait_link_down() - Check status of read/write indices
1561 * @if_ptr: The transport to check
1562 *
1563 * Return: 1 if indices are all zero, 0 otherwise
1564 */
1565int wait_link_down(struct glink_transport_if *if_ptr)
1566{
1567 struct edge_info *einfo;
1568
1569 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1570
1571 if (einfo->tx_ch_desc->write_index == 0 &&
1572 einfo->tx_ch_desc->read_index == 0 &&
1573 einfo->rx_ch_desc->write_index == 0 &&
1574 einfo->rx_ch_desc->read_index == 0)
1575 return 1;
1576 else
1577 return 0;
1578}
1579
1580/**
1581 * allocate_rx_intent() - allocate/reserve space for RX Intent
1582 * @if_ptr: The transport the intent is associated with.
1583 * @size: size of intent.
1584 * @intent: Pointer to the intent structure.
1585 *
1586 * Assign "data" with the buffer created, since the transport creates
1587 * a linear buffer and "iovec" with the "intent" itself, so that
1588 * the data can be passed to a client that receives only vector buffer.
1589 * Note that returning NULL for the pointer is valid (it means that space has
1590 * been reserved, but the actual pointer will be provided later).
1591 *
1592 * Return: 0 on success or standard Linux error code.
1593 */
1594static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
1595 struct glink_core_rx_intent *intent)
1596{
1597 void *t;
1598
1599 t = kmalloc(size, GFP_KERNEL);
1600 if (!t)
1601 return -ENOMEM;
1602
1603 intent->data = t;
1604 intent->iovec = (void *)intent;
1605 intent->vprovider = rx_linear_vbuf_provider;
1606 intent->pprovider = NULL;
1607 return 0;
1608}
1609
1610/**
1611 * deallocate_rx_intent() - Deallocate space created for RX Intent
1612 * @if_ptr: The transport the intent is associated with.
1613 * @intent: Pointer to the intent structure.
1614 *
1615 * Return: 0 on success or standard Linux error code.
1616 */
1617static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
1618 struct glink_core_rx_intent *intent)
1619{
1620 if (!intent || !intent->data)
1621 return -EINVAL;
1622
1623 kfree(intent->data);
1624 intent->data = NULL;
1625 intent->iovec = NULL;
1626 intent->vprovider = NULL;
1627 return 0;
1628}
1629
1630/**
1631 * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
1632 * transmit
1633 * @if_ptr: The transport to transmit on.
1634 * @lcid: The local channel id to encode.
1635 * @size: The intent size to encode.
1636 * @liid: The local intent id to encode.
1637 *
1638 * Return: 0 on success or standard Linux error code.
1639 */
1640static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
1641 uint32_t lcid, size_t size, uint32_t liid)
1642{
1643 struct command {
1644 uint16_t id;
1645 uint16_t lcid;
1646 uint32_t count;
1647 uint32_t size;
1648 uint32_t liid;
1649 };
1650 struct command cmd;
1651 struct edge_info *einfo;
1652 int rcu_id;
1653
1654 if (size > UINT_MAX) {
1655 pr_err("%s: size %zu is too large to encode\n", __func__, size);
1656 return -EMSGSIZE;
1657 }
1658
1659 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1660
1661 if (einfo->intentless)
1662 return -EOPNOTSUPP;
1663
1664 rcu_id = srcu_read_lock(&einfo->use_ref);
1665 if (einfo->in_ssr) {
1666 srcu_read_unlock(&einfo->use_ref, rcu_id);
1667 return -EFAULT;
1668 }
1669
1670 cmd.id = RX_INTENT_CMD;
1671 cmd.lcid = lcid;
1672 cmd.count = 1;
1673 cmd.size = size;
1674 cmd.liid = liid;
1675
1676 fifo_tx(einfo, &cmd, sizeof(cmd));
1677
1678 srcu_read_unlock(&einfo->use_ref, rcu_id);
1679 return 0;
1680}
1681
1682/**
1683 * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
1684 * @if_ptr: The transport to transmit on.
1685 * @lcid: The local channel id to encode.
1686 * @liid: The local intent id to encode.
1687 * @reuse: Reuse the consumed intent.
1688 */
1689static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
1690 uint32_t lcid, uint32_t liid, bool reuse)
1691{
1692 struct command {
1693 uint16_t id;
1694 uint16_t lcid;
1695 uint32_t liid;
1696 };
1697 struct command cmd;
1698 struct edge_info *einfo;
1699 int rcu_id;
1700
1701 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1702
1703 if (einfo->intentless)
1704 return;
1705
1706 rcu_id = srcu_read_lock(&einfo->use_ref);
1707 if (einfo->in_ssr) {
1708 srcu_read_unlock(&einfo->use_ref, rcu_id);
1709 return;
1710 }
1711
1712 cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
1713 cmd.lcid = lcid;
1714 cmd.liid = liid;
1715
1716 fifo_tx(einfo, &cmd, sizeof(cmd));
1717 srcu_read_unlock(&einfo->use_ref, rcu_id);
1718}
1719
1720/**
1721 * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
1722 * transmit
1723 * @if_ptr: The transport to transmit on.
1724 * @lcid: The local channel id to encode.
1725 * @size: The requested intent size to encode.
1726 *
1727 * Return: 0 on success or standard Linux error code.
1728 */
1729static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
1730 uint32_t lcid, size_t size)
1731{
1732 struct command {
1733 uint16_t id;
1734 uint16_t lcid;
1735 uint32_t size;
1736 };
1737 struct command cmd;
1738 struct edge_info *einfo;
1739 int rcu_id;
1740
1741 if (size > UINT_MAX) {
1742 pr_err("%s: size %zu is too large to encode\n", __func__, size);
1743 return -EMSGSIZE;
1744 }
1745
1746 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1747
1748 if (einfo->intentless)
1749 return -EOPNOTSUPP;
1750
1751 rcu_id = srcu_read_lock(&einfo->use_ref);
1752 if (einfo->in_ssr) {
1753 srcu_read_unlock(&einfo->use_ref, rcu_id);
1754 return -EFAULT;
1755 }
1756
1757 cmd.id = RX_INTENT_REQ_CMD,
1758 cmd.lcid = lcid;
1759 cmd.size = size;
1760
1761 fifo_tx(einfo, &cmd, sizeof(cmd));
1762
1763 srcu_read_unlock(&einfo->use_ref, rcu_id);
1764 return 0;
1765}
1766
1767/**
1768 * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
1769 * format and transmit
1770 * @if_ptr: The transport to transmit on.
1771 * @lcid: The local channel id to encode.
1772 * @granted: The request response to encode.
1773 *
1774 * Return: 0 on success or standard Linux error code.
1775 */
1776static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
1777 uint32_t lcid, bool granted)
1778{
1779 struct command {
1780 uint16_t id;
1781 uint16_t lcid;
1782 uint32_t response;
1783 };
1784 struct command cmd;
1785 struct edge_info *einfo;
1786 int rcu_id;
1787
1788 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1789
1790 if (einfo->intentless)
1791 return -EOPNOTSUPP;
1792
1793 rcu_id = srcu_read_lock(&einfo->use_ref);
1794 if (einfo->in_ssr) {
1795 srcu_read_unlock(&einfo->use_ref, rcu_id);
1796 return -EFAULT;
1797 }
1798
1799 cmd.id = RX_INTENT_REQ_ACK_CMD,
1800 cmd.lcid = lcid;
1801 if (granted)
1802 cmd.response = 1;
1803 else
1804 cmd.response = 0;
1805
1806 fifo_tx(einfo, &cmd, sizeof(cmd));
1807
1808 srcu_read_unlock(&einfo->use_ref, rcu_id);
1809 return 0;
1810}
1811
1812/**
1813 * tx_cmd_set_sigs() - convert a signals ack cmd to wire format and transmit
1814 * @if_ptr: The transport to transmit on.
1815 * @lcid: The local channel id to encode.
1816 * @sigs: The signals to encode.
1817 *
1818 * Return: 0 on success or standard Linux error code.
1819 */
1820static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
1821 uint32_t sigs)
1822{
1823 struct command {
1824 uint16_t id;
1825 uint16_t lcid;
1826 uint32_t sigs;
1827 };
1828 struct command cmd;
1829 struct edge_info *einfo;
1830 int rcu_id;
1831
1832 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1833
1834 rcu_id = srcu_read_lock(&einfo->use_ref);
1835 if (einfo->in_ssr) {
1836 srcu_read_unlock(&einfo->use_ref, rcu_id);
1837 return -EFAULT;
1838 }
1839
1840 cmd.id = SIGNALS_CMD,
1841 cmd.lcid = lcid;
1842 cmd.sigs = sigs;
1843
1844 fifo_tx(einfo, &cmd, sizeof(cmd));
1845
1846 srcu_read_unlock(&einfo->use_ref, rcu_id);
1847 return 0;
1848}
1849
1850/**
1851 * poll() - poll for data on a channel
1852 * @if_ptr: The transport the channel exists on.
1853 * @lcid: The local channel id.
1854 *
1855 * Return: 0 if no data available, 1 if data available.
1856 */
1857static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
1858{
1859 struct edge_info *einfo;
1860 int rcu_id;
1861
1862 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1863
1864 rcu_id = srcu_read_lock(&einfo->use_ref);
1865 if (einfo->in_ssr) {
1866 srcu_read_unlock(&einfo->use_ref, rcu_id);
1867 return -EFAULT;
1868 }
1869
1870 if (fifo_read_avail(einfo)) {
1871 __rx_worker(einfo, true);
1872 srcu_read_unlock(&einfo->use_ref, rcu_id);
1873 return 1;
1874 }
1875
1876 srcu_read_unlock(&einfo->use_ref, rcu_id);
1877 return 0;
1878}
1879
1880/**
1881 * mask_rx_irq() - mask the receive irq for a channel
1882 * @if_ptr: The transport the channel exists on.
1883 * @lcid: The local channel id for the channel.
1884 * @mask: True to mask the irq, false to unmask.
1885 * @pstruct: Platform defined structure for handling the masking.
1886 *
1887 * Return: 0 on success or standard Linux error code.
1888 */
1889static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
1890 bool mask, void *pstruct)
1891{
1892 struct edge_info *einfo;
1893 struct irq_chip *irq_chip;
1894 struct irq_data *irq_data;
1895 int rcu_id;
1896
1897 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1898
1899 rcu_id = srcu_read_lock(&einfo->use_ref);
1900 if (einfo->in_ssr) {
1901 srcu_read_unlock(&einfo->use_ref, rcu_id);
1902 return -EFAULT;
1903 }
1904
1905 irq_chip = irq_get_chip(einfo->irq_line);
1906 if (!irq_chip) {
1907 srcu_read_unlock(&einfo->use_ref, rcu_id);
1908 return -ENODEV;
1909 }
1910
1911 irq_data = irq_get_irq_data(einfo->irq_line);
1912 if (!irq_data) {
1913 srcu_read_unlock(&einfo->use_ref, rcu_id);
1914 return -ENODEV;
1915 }
1916
1917 if (mask) {
1918 irq_chip->irq_mask(irq_data);
1919 einfo->irq_disabled = true;
1920 if (pstruct)
1921 irq_set_affinity(einfo->irq_line, pstruct);
1922 } else {
1923 irq_chip->irq_unmask(irq_data);
1924 einfo->irq_disabled = false;
1925 }
1926
1927 srcu_read_unlock(&einfo->use_ref, rcu_id);
1928 return 0;
1929}
1930
1931/**
1932 * tx_data() - convert a data/tracer_pkt to wire format and transmit
1933 * @if_ptr: The transport to transmit on.
1934 * @cmd_id: The command ID to transmit.
1935 * @lcid: The local channel id to encode.
1936 * @pctx: The data to encode.
1937 *
1938 * Return: Number of bytes written or standard Linux error code.
1939 */
1940static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
1941 uint32_t lcid, struct glink_core_tx_pkt *pctx)
1942{
1943 struct command {
1944 uint16_t id;
1945 uint16_t lcid;
1946 uint32_t riid;
1947 uint32_t size;
1948 uint32_t size_left;
1949 };
1950 struct command cmd;
1951 struct edge_info *einfo;
1952 uint32_t size;
1953 uint32_t zeros_size;
1954 const void *data_start;
1955 char zeros[FIFO_ALIGNMENT] = { 0 };
1956 unsigned long flags;
1957 size_t tx_size = 0;
1958 int rcu_id;
1959 int ret;
1960
1961 if (pctx->size < pctx->size_remaining) {
1962 GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
1963 __func__);
1964 pctx->size_remaining = pctx->size;
1965 }
1966 if (!pctx->size_remaining)
1967 return 0;
1968
1969 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1970
1971 rcu_id = srcu_read_lock(&einfo->use_ref);
1972 if (einfo->in_ssr) {
1973 srcu_read_unlock(&einfo->use_ref, rcu_id);
1974 return -EFAULT;
1975 }
1976
1977 if (einfo->intentless &&
1978 (pctx->size_remaining != pctx->size || cmd_id == TRACER_PKT_CMD)) {
1979 srcu_read_unlock(&einfo->use_ref, rcu_id);
1980 return -EINVAL;
1981 }
1982
1983 if (cmd_id == TX_DATA_CMD) {
1984 if (pctx->size_remaining == pctx->size)
1985 cmd.id = TX_DATA_CMD;
1986 else
1987 cmd.id = TX_DATA_CONT_CMD;
1988 } else {
1989 if (pctx->size_remaining == pctx->size)
1990 cmd.id = TRACER_PKT_CMD;
1991 else
1992 cmd.id = TRACER_PKT_CONT_CMD;
1993 }
1994 cmd.lcid = lcid;
1995 cmd.riid = pctx->riid;
1996 data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
1997 &tx_size);
1998 if (!data_start) {
1999 GLINK_ERR("%s: invalid data_start\n", __func__);
2000 srcu_read_unlock(&einfo->use_ref, rcu_id);
2001 return -EINVAL;
2002 }
2003
2004 spin_lock_irqsave(&einfo->write_lock, flags);
2005 size = fifo_write_avail(einfo);
2006
2007 /* Intentless clients expect a complete commit or instant failure */
2008 if (einfo->intentless && size < sizeof(cmd) + pctx->size) {
2009 spin_unlock_irqrestore(&einfo->write_lock, flags);
2010 srcu_read_unlock(&einfo->use_ref, rcu_id);
2011 return -ENOSPC;
2012 }
2013
2014 /* Need enough space to write the command and some data */
2015 if (size <= sizeof(cmd)) {
2016 einfo->tx_resume_needed = true;
2017 spin_unlock_irqrestore(&einfo->write_lock, flags);
2018 srcu_read_unlock(&einfo->use_ref, rcu_id);
2019 return -EAGAIN;
2020 }
2021 size -= sizeof(cmd);
2022 if (size > tx_size)
2023 size = tx_size;
2024
2025 cmd.size = size;
2026 pctx->size_remaining -= size;
2027 cmd.size_left = pctx->size_remaining;
2028 zeros_size = ALIGN(size, FIFO_ALIGNMENT) - cmd.size;
2029 if (cmd.id == TRACER_PKT_CMD)
2030 tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
2031
2032 ret = fifo_write_complex(einfo, &cmd, sizeof(cmd), data_start, size,
2033 zeros, zeros_size);
2034 if (ret < 0) {
2035 spin_unlock_irqrestore(&einfo->write_lock, flags);
2036 srcu_read_unlock(&einfo->use_ref, rcu_id);
2037 return ret;
2038 }
2039
2040 GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
2041 "<SMEM>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
2042 cmd.size_left);
2043 spin_unlock_irqrestore(&einfo->write_lock, flags);
2044
2045 /* Fake tx_done for intentless since its not supported over the wire */
2046 if (einfo->intentless) {
2047 spin_lock_irqsave(&einfo->rx_lock, flags);
2048 cmd.id = RX_DONE_CMD;
2049 cmd.lcid = pctx->rcid;
2050 queue_cmd(einfo, &cmd, NULL);
2051 spin_unlock_irqrestore(&einfo->rx_lock, flags);
2052 }
2053
2054 srcu_read_unlock(&einfo->use_ref, rcu_id);
2055 return cmd.size;
2056}
2057
2058/**
2059 * tx() - convert a data transmit cmd to wire format and transmit
2060 * @if_ptr: The transport to transmit on.
2061 * @lcid: The local channel id to encode.
2062 * @pctx: The data to encode.
2063 *
2064 * Return: Number of bytes written or standard Linux error code.
2065 */
2066static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
2067 struct glink_core_tx_pkt *pctx)
2068{
2069 return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
2070}
2071
2072/**
2073 * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
2074 * @if_ptr: The transport to transmit on.
2075 * @lcid: The local channel id to encode.
2076 * @pctx: The data to encode.
2077 *
2078 * Return: Number of bytes written or standard Linux error code.
2079 */
2080static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
2081 struct glink_core_tx_pkt *pctx)
2082{
2083 return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
2084}
2085
2086/**
2087 * get_power_vote_ramp_time() - Get the ramp time required for the power
2088 * votes to be applied
2089 * @if_ptr: The transport interface on which power voting is requested.
2090 * @state: The power state for which ramp time is required.
2091 *
2092 * Return: The ramp time specific to the power state, standard error otherwise.
2093 */
2094static unsigned long get_power_vote_ramp_time(
2095 struct glink_transport_if *if_ptr,
2096 uint32_t state)
2097{
2098 struct edge_info *einfo;
2099
2100 einfo = container_of(if_ptr, struct edge_info, xprt_if);
2101
2102 if (state >= einfo->num_pw_states || !(einfo->ramp_time_us))
2103 return (unsigned long)ERR_PTR(-EINVAL);
2104
2105 return einfo->ramp_time_us[state];
2106}
2107
2108/**
2109 * power_vote() - Update the power votes to meet qos requirement
2110 * @if_ptr: The transport interface on which power voting is requested.
2111 * @state: The power state for which the voting should be done.
2112 *
2113 * Return: 0 on Success, standard error otherwise.
2114 */
2115static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
2116{
2117 return 0;
2118}
2119
2120/**
2121 * power_unvote() - Remove the all the power votes
2122 * @if_ptr: The transport interface on which power voting is requested.
2123 *
2124 * Return: 0 on Success, standard error otherwise.
2125 */
2126static int power_unvote(struct glink_transport_if *if_ptr)
2127{
2128 return 0;
2129}
2130
2131/**
Chris Lewa9a78ae2017-05-11 16:47:37 -07002132 * rx_rt_vote() - Increment and RX thread RT vote
2133 * @if_ptr: The transport interface on which power voting is requested.
2134 *
2135 * Return: 0 on Success, standard error otherwise.
2136 */
2137static int rx_rt_vote(struct glink_transport_if *if_ptr)
2138{
2139 struct edge_info *einfo;
2140 struct sched_param param = { .sched_priority = 1 };
2141 int ret = 0;
2142 unsigned long flags;
2143
2144 einfo = container_of(if_ptr, struct edge_info, xprt_if);
2145 spin_lock_irqsave(&einfo->rt_vote_lock, flags);
2146 if (!einfo->rt_votes)
2147 ret = sched_setscheduler_nocheck(einfo->task, SCHED_FIFO,
2148 &param);
2149 einfo->rt_votes++;
2150 spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
2151 return ret;
2152}
2153
2154/**
2155 * rx_rt_unvote() - Remove a RX thread RT vote
2156 * @if_ptr: The transport interface on which power voting is requested.
2157 *
2158 * Return: 0 on Success, standard error otherwise.
2159 */
2160static int rx_rt_unvote(struct glink_transport_if *if_ptr)
2161{
2162 struct edge_info *einfo;
2163 struct sched_param param = { .sched_priority = 0 };
2164 int ret = 0;
2165 unsigned long flags;
2166
2167 einfo = container_of(if_ptr, struct edge_info, xprt_if);
2168 spin_lock_irqsave(&einfo->rt_vote_lock, flags);
2169 einfo->rt_votes--;
2170 if (!einfo->rt_votes)
2171 ret = sched_setscheduler_nocheck(einfo->task, SCHED_NORMAL,
2172 &param);
2173 spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
2174 return ret;
2175}
2176
2177/**
Chris Lewfa6135e2016-08-01 13:29:46 -07002178 * negotiate_features_v1() - determine what features of a version can be used
2179 * @if_ptr: The transport for which features are negotiated for.
2180 * @version: The version negotiated.
2181 * @features: The set of requested features.
2182 *
2183 * Return: What set of the requested features can be supported.
2184 */
2185static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
2186 const struct glink_core_version *version,
2187 uint32_t features)
2188{
2189 return features & version->features;
2190}
2191
2192/**
2193 * init_xprt_if() - initialize the xprt_if for an edge
2194 * @einfo: The edge to initialize.
2195 */
2196static void init_xprt_if(struct edge_info *einfo)
2197{
2198 einfo->xprt_if.tx_cmd_version = tx_cmd_version;
2199 einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
2200 einfo->xprt_if.set_version = set_version;
2201 einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
2202 einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
2203 einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
2204 einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
2205 einfo->xprt_if.ssr = ssr;
2206 einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
2207 einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
2208 einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
2209 einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
2210 einfo->xprt_if.tx = tx;
2211 einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
2212 einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
2213 tx_cmd_remote_rx_intent_req_ack;
2214 einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
2215 einfo->xprt_if.poll = poll;
2216 einfo->xprt_if.mask_rx_irq = mask_rx_irq;
2217 einfo->xprt_if.wait_link_down = wait_link_down;
2218 einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
2219 einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
2220 einfo->xprt_if.power_vote = power_vote;
2221 einfo->xprt_if.power_unvote = power_unvote;
Chris Lewa9a78ae2017-05-11 16:47:37 -07002222 einfo->xprt_if.rx_rt_vote = rx_rt_vote;
2223 einfo->xprt_if.rx_rt_unvote = rx_rt_unvote;
Chris Lewfa6135e2016-08-01 13:29:46 -07002224}
2225
2226/**
2227 * init_xprt_cfg() - initialize the xprt_cfg for an edge
2228 * @einfo: The edge to initialize.
2229 * @name: The name of the remote side this edge communicates to.
2230 */
2231static void init_xprt_cfg(struct edge_info *einfo, const char *name)
2232{
2233 einfo->xprt_cfg.name = XPRT_NAME;
2234 einfo->xprt_cfg.edge = name;
2235 einfo->xprt_cfg.versions = versions;
2236 einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
2237 einfo->xprt_cfg.max_cid = SZ_64K;
2238 einfo->xprt_cfg.max_iid = SZ_2G;
2239}
2240
2241/**
2242 * parse_qos_dt_params() - Parse the power states from DT
2243 * @dev: Reference to the platform device for a specific edge.
2244 * @einfo: Edge information for the edge probe function is called.
2245 *
2246 * Return: 0 on success, standard error code otherwise.
2247 */
2248static int parse_qos_dt_params(struct device_node *node,
2249 struct edge_info *einfo)
2250{
2251 int rc;
2252 int i;
2253 char *key;
2254 uint32_t *arr32;
2255 uint32_t num_states;
2256
2257 key = "qcom,ramp-time";
2258 if (!of_find_property(node, key, &num_states))
2259 return -ENODEV;
2260
2261 num_states /= sizeof(uint32_t);
2262
2263 einfo->num_pw_states = num_states;
2264
2265 arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
2266 if (!arr32)
2267 return -ENOMEM;
2268
2269 einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
2270 GFP_KERNEL);
2271 if (!einfo->ramp_time_us) {
2272 rc = -ENOMEM;
2273 goto mem_alloc_fail;
2274 }
2275
2276 rc = of_property_read_u32_array(node, key, arr32, num_states);
2277 if (rc) {
2278 rc = -ENODEV;
2279 goto invalid_key;
2280 }
2281 for (i = 0; i < num_states; i++)
2282 einfo->ramp_time_us[i] = arr32[i];
2283
2284 rc = 0;
2285 return rc;
2286
2287invalid_key:
2288 kfree(einfo->ramp_time_us);
2289mem_alloc_fail:
2290 kfree(arr32);
2291 return rc;
2292}
2293
2294/**
2295 * subsys_name_to_id() - translate a subsystem name to a processor id
2296 * @name: The subsystem name to look up.
2297 *
2298 * Return: The processor id corresponding to @name or standard Linux error code.
2299 */
2300static int subsys_name_to_id(const char *name)
2301{
2302 if (!name)
2303 return -ENODEV;
2304
2305 if (!strcmp(name, "apss"))
2306 return SMEM_APPS;
2307 if (!strcmp(name, "dsps"))
2308 return SMEM_DSPS;
2309 if (!strcmp(name, "lpass"))
2310 return SMEM_Q6;
2311 if (!strcmp(name, "mpss"))
2312 return SMEM_MODEM;
2313 if (!strcmp(name, "rpm"))
2314 return SMEM_RPM;
2315 if (!strcmp(name, "wcnss"))
2316 return SMEM_WCNSS;
2317 if (!strcmp(name, "spss"))
2318 return SMEM_SPSS;
2319 if (!strcmp(name, "cdsp"))
2320 return SMEM_CDSP;
2321 return -ENODEV;
2322}
2323
2324static int glink_smem_native_probe(struct platform_device *pdev)
2325{
2326 struct device_node *node;
2327 struct device_node *phandle_node;
2328 struct edge_info *einfo;
2329 int rc;
2330 char *key;
2331 const char *subsys_name;
2332 uint32_t irq_line;
2333 uint32_t irq_mask;
2334 struct resource *r;
2335
2336 node = pdev->dev.of_node;
2337
2338 einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
2339 if (!einfo) {
2340 rc = -ENOMEM;
2341 goto edge_info_alloc_fail;
2342 }
2343
2344 key = "label";
2345 subsys_name = of_get_property(node, key, NULL);
2346 if (!subsys_name) {
2347 pr_err("%s: missing key %s\n", __func__, key);
2348 rc = -ENODEV;
2349 goto missing_key;
2350 }
2351
2352 key = "interrupts";
2353 irq_line = irq_of_parse_and_map(node, 0);
2354 if (!irq_line) {
2355 pr_err("%s: missing key %s\n", __func__, key);
2356 rc = -ENODEV;
2357 goto missing_key;
2358 }
2359
2360 key = "qcom,irq-mask";
2361 rc = of_property_read_u32(node, key, &irq_mask);
2362 if (rc) {
2363 pr_err("%s: missing key %s\n", __func__, key);
2364 rc = -ENODEV;
2365 goto missing_key;
2366 }
2367
2368 key = "irq-reg-base";
2369 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2370 if (!r) {
2371 pr_err("%s: missing key %s\n", __func__, key);
2372 rc = -ENODEV;
2373 goto missing_key;
2374 }
2375
2376 if (subsys_name_to_id(subsys_name) == -ENODEV) {
2377 pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
2378 rc = -ENODEV;
2379 goto invalid_key;
2380 }
2381 einfo->remote_proc_id = subsys_name_to_id(subsys_name);
2382
2383 init_xprt_cfg(einfo, subsys_name);
2384 init_xprt_if(einfo);
2385 spin_lock_init(&einfo->write_lock);
2386 init_waitqueue_head(&einfo->tx_blocked_queue);
Kyle Yan65be4a52016-10-31 15:05:00 -07002387 kthread_init_work(&einfo->kwork, rx_worker);
2388 kthread_init_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302389 INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002390 tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
2391 einfo->read_from_fifo = read_from_fifo;
2392 einfo->write_to_fifo = write_to_fifo;
2393 init_srcu_struct(&einfo->use_ref);
2394 spin_lock_init(&einfo->rx_lock);
2395 INIT_LIST_HEAD(&einfo->deferred_cmds);
Chris Lewa9a78ae2017-05-11 16:47:37 -07002396 spin_lock_init(&einfo->rt_vote_lock);
2397 einfo->rt_votes = 0;
Chris Lewfa6135e2016-08-01 13:29:46 -07002398
2399 mutex_lock(&probe_lock);
2400 if (edge_infos[einfo->remote_proc_id]) {
2401 pr_err("%s: duplicate subsys %s is not valid\n", __func__,
2402 subsys_name);
2403 rc = -ENODEV;
2404 mutex_unlock(&probe_lock);
2405 goto invalid_key;
2406 }
2407 edge_infos[einfo->remote_proc_id] = einfo;
2408 mutex_unlock(&probe_lock);
2409
2410 einfo->out_irq_mask = irq_mask;
2411 einfo->out_irq_reg = ioremap_nocache(r->start, resource_size(r));
2412 if (!einfo->out_irq_reg) {
2413 pr_err("%s: unable to map irq reg\n", __func__);
2414 rc = -ENOMEM;
2415 goto ioremap_fail;
2416 }
2417
2418 einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
2419 "smem_native_%s", subsys_name);
2420 if (IS_ERR(einfo->task)) {
2421 rc = PTR_ERR(einfo->task);
2422 pr_err("%s: kthread_run failed %d\n", __func__, rc);
2423 goto kthread_fail;
2424 }
2425
2426 einfo->tx_ch_desc = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
2427 SMEM_CH_DESC_SIZE,
2428 einfo->remote_proc_id,
2429 0);
2430 if (PTR_ERR(einfo->tx_ch_desc) == -EPROBE_DEFER) {
2431 rc = -EPROBE_DEFER;
2432 goto smem_alloc_fail;
2433 }
2434 if (!einfo->tx_ch_desc) {
2435 pr_err("%s: smem alloc of ch descriptor failed\n", __func__);
2436 rc = -ENOMEM;
2437 goto smem_alloc_fail;
2438 }
2439 einfo->rx_ch_desc = einfo->tx_ch_desc + 1;
2440
2441 einfo->tx_fifo_size = SZ_16K;
2442 einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0,
2443 einfo->tx_fifo_size,
2444 einfo->remote_proc_id,
Dhoat Harpal07eb7032017-04-19 11:46:59 +05302445 0);
Chris Lewfa6135e2016-08-01 13:29:46 -07002446 if (!einfo->tx_fifo) {
2447 pr_err("%s: smem alloc of tx fifo failed\n", __func__);
2448 rc = -ENOMEM;
2449 goto smem_alloc_fail;
2450 }
2451
2452 key = "qcom,qos-config";
2453 phandle_node = of_parse_phandle(node, key, 0);
2454 if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
2455 &einfo->xprt_cfg)))
2456 parse_qos_dt_params(node, einfo);
2457
2458 rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
2459 if (rc == -EPROBE_DEFER)
2460 goto reg_xprt_fail;
2461 if (rc) {
2462 pr_err("%s: glink core register transport failed: %d\n",
2463 __func__, rc);
2464 goto reg_xprt_fail;
2465 }
2466
2467 einfo->irq_line = irq_line;
2468 rc = request_irq(irq_line, irq_handler,
2469 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
2470 node->name, einfo);
2471 if (rc < 0) {
2472 pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
2473 rc);
2474 goto request_irq_fail;
2475 }
2476 rc = enable_irq_wake(irq_line);
2477 if (rc < 0)
2478 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
2479 irq_line);
2480
2481 register_debugfs_info(einfo);
2482 /* fake an interrupt on this edge to see if the remote side is up */
2483 irq_handler(0, einfo);
2484 return 0;
2485
2486request_irq_fail:
2487 glink_core_unregister_transport(&einfo->xprt_if);
2488reg_xprt_fail:
2489smem_alloc_fail:
Kyle Yan65be4a52016-10-31 15:05:00 -07002490 kthread_flush_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302491 flush_work(&einfo->wakeup_work);
Chris Lewfa6135e2016-08-01 13:29:46 -07002492 kthread_stop(einfo->task);
2493 einfo->task = NULL;
2494 tasklet_kill(&einfo->tasklet);
2495kthread_fail:
2496 iounmap(einfo->out_irq_reg);
2497ioremap_fail:
2498 mutex_lock(&probe_lock);
2499 edge_infos[einfo->remote_proc_id] = NULL;
2500 mutex_unlock(&probe_lock);
2501invalid_key:
2502missing_key:
2503 kfree(einfo);
2504edge_info_alloc_fail:
2505 return rc;
2506}
2507
2508static int glink_rpm_native_probe(struct platform_device *pdev)
2509{
2510 struct device_node *node;
2511 struct edge_info *einfo;
2512 int rc;
2513 char *key;
2514 const char *subsys_name;
2515 uint32_t irq_line;
2516 uint32_t irq_mask;
2517 struct resource *irq_r;
2518 struct resource *msgram_r;
2519 void __iomem *msgram;
2520 char toc[RPM_TOC_SIZE];
2521 uint32_t *tocp;
2522 uint32_t num_toc_entries;
2523
2524 node = pdev->dev.of_node;
2525
2526 einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
2527 if (!einfo) {
2528 rc = -ENOMEM;
2529 goto edge_info_alloc_fail;
2530 }
2531
2532 subsys_name = "rpm";
2533
2534 key = "interrupts";
2535 irq_line = irq_of_parse_and_map(node, 0);
2536 if (!irq_line) {
2537 pr_err("%s: missing key %s\n", __func__, key);
2538 rc = -ENODEV;
2539 goto missing_key;
2540 }
2541
2542 key = "qcom,irq-mask";
2543 rc = of_property_read_u32(node, key, &irq_mask);
2544 if (rc) {
2545 pr_err("%s: missing key %s\n", __func__, key);
2546 rc = -ENODEV;
2547 goto missing_key;
2548 }
2549
2550 key = "irq-reg-base";
2551 irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2552 if (!irq_r) {
2553 pr_err("%s: missing key %s\n", __func__, key);
2554 rc = -ENODEV;
2555 goto missing_key;
2556 }
2557
2558 key = "msgram";
2559 msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2560 if (!msgram_r) {
2561 pr_err("%s: missing key %s\n", __func__, key);
2562 rc = -ENODEV;
2563 goto missing_key;
2564 }
2565
2566 if (subsys_name_to_id(subsys_name) == -ENODEV) {
2567 pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
2568 rc = -ENODEV;
2569 goto invalid_key;
2570 }
2571 einfo->remote_proc_id = subsys_name_to_id(subsys_name);
2572
2573 init_xprt_cfg(einfo, subsys_name);
2574 init_xprt_if(einfo);
2575 spin_lock_init(&einfo->write_lock);
2576 init_waitqueue_head(&einfo->tx_blocked_queue);
Kyle Yan65be4a52016-10-31 15:05:00 -07002577 kthread_init_work(&einfo->kwork, rx_worker);
2578 kthread_init_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302579 INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002580 tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
2581 einfo->intentless = true;
2582 einfo->read_from_fifo = memcpy32_fromio;
2583 einfo->write_to_fifo = memcpy32_toio;
2584 init_srcu_struct(&einfo->use_ref);
2585 spin_lock_init(&einfo->rx_lock);
2586 INIT_LIST_HEAD(&einfo->deferred_cmds);
2587
2588 mutex_lock(&probe_lock);
2589 if (edge_infos[einfo->remote_proc_id]) {
2590 pr_err("%s: duplicate subsys %s is not valid\n", __func__,
2591 subsys_name);
2592 rc = -ENODEV;
2593 mutex_unlock(&probe_lock);
2594 goto invalid_key;
2595 }
2596 edge_infos[einfo->remote_proc_id] = einfo;
2597 mutex_unlock(&probe_lock);
2598
2599 einfo->out_irq_mask = irq_mask;
2600 einfo->out_irq_reg = ioremap_nocache(irq_r->start,
2601 resource_size(irq_r));
2602 if (!einfo->out_irq_reg) {
2603 pr_err("%s: unable to map irq reg\n", __func__);
2604 rc = -ENOMEM;
2605 goto irq_ioremap_fail;
2606 }
2607
2608 msgram = ioremap_nocache(msgram_r->start, resource_size(msgram_r));
2609 if (!msgram) {
2610 pr_err("%s: unable to map msgram\n", __func__);
2611 rc = -ENOMEM;
2612 goto msgram_ioremap_fail;
2613 }
2614
2615 einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
2616 "smem_native_%s", subsys_name);
2617 if (IS_ERR(einfo->task)) {
2618 rc = PTR_ERR(einfo->task);
2619 pr_err("%s: kthread_run failed %d\n", __func__, rc);
2620 goto kthread_fail;
2621 }
2622
2623 memcpy32_fromio(toc, msgram + resource_size(msgram_r) - RPM_TOC_SIZE,
2624 RPM_TOC_SIZE);
2625 tocp = (uint32_t *)toc;
2626 if (*tocp != RPM_TOC_ID) {
2627 rc = -ENODEV;
2628 pr_err("%s: TOC id %d is not valid\n", __func__, *tocp);
2629 goto toc_init_fail;
2630 }
2631 ++tocp;
2632 num_toc_entries = *tocp;
2633 if (num_toc_entries > RPM_MAX_TOC_ENTRIES) {
2634 rc = -ENODEV;
2635 pr_err("%s: %d is too many toc entries\n", __func__,
2636 num_toc_entries);
2637 goto toc_init_fail;
2638 }
2639 ++tocp;
2640
2641 for (rc = 0; rc < num_toc_entries; ++rc) {
2642 if (*tocp != RPM_TX_FIFO_ID) {
2643 tocp += 3;
2644 continue;
2645 }
2646 ++tocp;
2647 einfo->tx_ch_desc = msgram + *tocp;
2648 einfo->tx_fifo = einfo->tx_ch_desc + 1;
2649 if ((uintptr_t)einfo->tx_fifo >
2650 (uintptr_t)(msgram + resource_size(msgram_r))) {
2651 pr_err("%s: invalid tx fifo address\n", __func__);
2652 einfo->tx_fifo = NULL;
2653 break;
2654 }
2655 ++tocp;
2656 einfo->tx_fifo_size = *tocp;
2657 if (einfo->tx_fifo_size > resource_size(msgram_r) ||
2658 (uintptr_t)(einfo->tx_fifo + einfo->tx_fifo_size) >
2659 (uintptr_t)(msgram + resource_size(msgram_r))) {
2660 pr_err("%s: invalid tx fifo size\n", __func__);
2661 einfo->tx_fifo = NULL;
2662 break;
2663 }
2664 break;
2665 }
2666 if (!einfo->tx_fifo) {
2667 rc = -ENODEV;
2668 pr_err("%s: tx fifo not found\n", __func__);
2669 goto toc_init_fail;
2670 }
2671
2672 tocp = (uint32_t *)toc;
2673 tocp += 2;
2674 for (rc = 0; rc < num_toc_entries; ++rc) {
2675 if (*tocp != RPM_RX_FIFO_ID) {
2676 tocp += 3;
2677 continue;
2678 }
2679 ++tocp;
2680 einfo->rx_ch_desc = msgram + *tocp;
2681 einfo->rx_fifo = einfo->rx_ch_desc + 1;
2682 if ((uintptr_t)einfo->rx_fifo >
2683 (uintptr_t)(msgram + resource_size(msgram_r))) {
2684 pr_err("%s: invalid rx fifo address\n", __func__);
2685 einfo->rx_fifo = NULL;
2686 break;
2687 }
2688 ++tocp;
2689 einfo->rx_fifo_size = *tocp;
2690 if (einfo->rx_fifo_size > resource_size(msgram_r) ||
2691 (uintptr_t)(einfo->rx_fifo + einfo->rx_fifo_size) >
2692 (uintptr_t)(msgram + resource_size(msgram_r))) {
2693 pr_err("%s: invalid rx fifo size\n", __func__);
2694 einfo->rx_fifo = NULL;
2695 break;
2696 }
2697 break;
2698 }
2699 if (!einfo->rx_fifo) {
2700 rc = -ENODEV;
2701 pr_err("%s: rx fifo not found\n", __func__);
2702 goto toc_init_fail;
2703 }
2704
2705 einfo->tx_ch_desc->write_index = 0;
2706 einfo->rx_ch_desc->read_index = 0;
2707
2708 rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
2709 if (rc == -EPROBE_DEFER)
2710 goto reg_xprt_fail;
2711 if (rc) {
2712 pr_err("%s: glink core register transport failed: %d\n",
2713 __func__, rc);
2714 goto reg_xprt_fail;
2715 }
2716
2717 einfo->irq_line = irq_line;
2718 rc = request_irq(irq_line, irq_handler,
2719 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
2720 node->name, einfo);
2721 if (rc < 0) {
2722 pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
2723 rc);
2724 goto request_irq_fail;
2725 }
2726 rc = enable_irq_wake(irq_line);
2727 if (rc < 0)
2728 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
2729 irq_line);
2730
2731 register_debugfs_info(einfo);
2732 einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
2733 return 0;
2734
2735request_irq_fail:
2736 glink_core_unregister_transport(&einfo->xprt_if);
2737reg_xprt_fail:
2738toc_init_fail:
Kyle Yan65be4a52016-10-31 15:05:00 -07002739 kthread_flush_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302740 flush_work(&einfo->wakeup_work);
Chris Lewfa6135e2016-08-01 13:29:46 -07002741 kthread_stop(einfo->task);
2742 einfo->task = NULL;
2743 tasklet_kill(&einfo->tasklet);
2744kthread_fail:
2745 iounmap(msgram);
2746msgram_ioremap_fail:
2747 iounmap(einfo->out_irq_reg);
2748irq_ioremap_fail:
2749 mutex_lock(&probe_lock);
2750 edge_infos[einfo->remote_proc_id] = NULL;
2751 mutex_unlock(&probe_lock);
2752invalid_key:
2753missing_key:
2754 kfree(einfo);
2755edge_info_alloc_fail:
2756 return rc;
2757}
2758
2759static int glink_mailbox_probe(struct platform_device *pdev)
2760{
2761 struct device_node *node;
2762 struct edge_info *einfo;
2763 int rc;
2764 char *key;
2765 const char *subsys_name;
2766 uint32_t irq_line;
2767 uint32_t irq_mask;
2768 struct resource *irq_r;
2769 struct resource *mbox_loc_r;
2770 struct resource *mbox_size_r;
2771 struct resource *rx_reset_r;
2772 void *mbox_loc;
2773 void *mbox_size;
2774 struct mailbox_config_info *mbox_cfg;
2775 uint32_t mbox_cfg_size;
2776 phys_addr_t cfg_p_addr;
2777
2778 node = pdev->dev.of_node;
2779
2780 einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
2781 if (!einfo) {
2782 rc = -ENOMEM;
2783 goto edge_info_alloc_fail;
2784 }
2785
2786 key = "label";
2787 subsys_name = of_get_property(node, key, NULL);
2788 if (!subsys_name) {
2789 pr_err("%s: missing key %s\n", __func__, key);
2790 rc = -ENODEV;
2791 goto missing_key;
2792 }
2793
2794 key = "interrupts";
2795 irq_line = irq_of_parse_and_map(node, 0);
2796 if (!irq_line) {
2797 pr_err("%s: missing key %s\n", __func__, key);
2798 rc = -ENODEV;
2799 goto missing_key;
2800 }
2801
2802 key = "qcom,irq-mask";
2803 rc = of_property_read_u32(node, key, &irq_mask);
2804 if (rc) {
2805 pr_err("%s: missing key %s\n", __func__, key);
2806 rc = -ENODEV;
2807 goto missing_key;
2808 }
2809
2810 key = "irq-reg-base";
2811 irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2812 if (!irq_r) {
2813 pr_err("%s: missing key %s\n", __func__, key);
2814 rc = -ENODEV;
2815 goto missing_key;
2816 }
2817
2818 key = "mbox-loc-addr";
2819 mbox_loc_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2820 if (!mbox_loc_r) {
2821 pr_err("%s: missing key %s\n", __func__, key);
2822 rc = -ENODEV;
2823 goto missing_key;
2824 }
2825
2826 key = "mbox-loc-size";
2827 mbox_size_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2828 if (!mbox_size_r) {
2829 pr_err("%s: missing key %s\n", __func__, key);
2830 rc = -ENODEV;
2831 goto missing_key;
2832 }
2833
2834 key = "irq-rx-reset";
2835 rx_reset_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2836 if (!rx_reset_r) {
2837 pr_err("%s: missing key %s\n", __func__, key);
2838 rc = -ENODEV;
2839 goto missing_key;
2840 }
2841
2842 key = "qcom,tx-ring-size";
2843 rc = of_property_read_u32(node, key, &einfo->tx_fifo_size);
2844 if (rc) {
2845 pr_err("%s: missing key %s\n", __func__, key);
2846 rc = -ENODEV;
2847 goto missing_key;
2848 }
2849
2850 key = "qcom,rx-ring-size";
2851 rc = of_property_read_u32(node, key, &einfo->rx_fifo_size);
2852 if (rc) {
2853 pr_err("%s: missing key %s\n", __func__, key);
2854 rc = -ENODEV;
2855 goto missing_key;
2856 }
2857
2858 if (subsys_name_to_id(subsys_name) == -ENODEV) {
2859 pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
2860 rc = -ENODEV;
2861 goto invalid_key;
2862 }
2863 einfo->remote_proc_id = subsys_name_to_id(subsys_name);
2864
2865 init_xprt_cfg(einfo, subsys_name);
2866 einfo->xprt_cfg.name = "mailbox";
2867 init_xprt_if(einfo);
2868 spin_lock_init(&einfo->write_lock);
2869 init_waitqueue_head(&einfo->tx_blocked_queue);
Kyle Yan65be4a52016-10-31 15:05:00 -07002870 kthread_init_work(&einfo->kwork, rx_worker);
2871 kthread_init_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302872 INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002873 tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
2874 einfo->read_from_fifo = read_from_fifo;
2875 einfo->write_to_fifo = write_to_fifo;
2876 init_srcu_struct(&einfo->use_ref);
2877 spin_lock_init(&einfo->rx_lock);
2878 INIT_LIST_HEAD(&einfo->deferred_cmds);
2879
2880 mutex_lock(&probe_lock);
2881 if (edge_infos[einfo->remote_proc_id]) {
2882 pr_err("%s: duplicate subsys %s is not valid\n", __func__,
2883 subsys_name);
2884 rc = -ENODEV;
2885 mutex_unlock(&probe_lock);
2886 goto invalid_key;
2887 }
2888 edge_infos[einfo->remote_proc_id] = einfo;
2889 mutex_unlock(&probe_lock);
2890
2891 einfo->out_irq_mask = irq_mask;
2892 einfo->out_irq_reg = ioremap_nocache(irq_r->start,
2893 resource_size(irq_r));
2894 if (!einfo->out_irq_reg) {
2895 pr_err("%s: unable to map irq reg\n", __func__);
2896 rc = -ENOMEM;
2897 goto irq_ioremap_fail;
2898 }
2899
2900 mbox_loc = ioremap_nocache(mbox_loc_r->start,
2901 resource_size(mbox_loc_r));
2902 if (!mbox_loc) {
2903 pr_err("%s: unable to map mailbox location reg\n", __func__);
2904 rc = -ENOMEM;
2905 goto mbox_loc_ioremap_fail;
2906 }
2907
2908 mbox_size = ioremap_nocache(mbox_size_r->start,
2909 resource_size(mbox_size_r));
2910 if (!mbox_size) {
2911 pr_err("%s: unable to map mailbox size reg\n", __func__);
2912 rc = -ENOMEM;
2913 goto mbox_size_ioremap_fail;
2914 }
2915
2916 einfo->rx_reset_reg = ioremap_nocache(rx_reset_r->start,
2917 resource_size(rx_reset_r));
2918 if (!einfo->rx_reset_reg) {
2919 pr_err("%s: unable to map rx reset reg\n", __func__);
2920 rc = -ENOMEM;
2921 goto rx_reset_ioremap_fail;
2922 }
2923
2924 einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
2925 "smem_native_%s", subsys_name);
2926 if (IS_ERR(einfo->task)) {
2927 rc = PTR_ERR(einfo->task);
2928 pr_err("%s: kthread_run failed %d\n", __func__, rc);
2929 goto kthread_fail;
2930 }
2931
2932 mbox_cfg_size = sizeof(*mbox_cfg) + einfo->tx_fifo_size +
2933 einfo->rx_fifo_size;
2934 mbox_cfg = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
2935 mbox_cfg_size,
2936 einfo->remote_proc_id,
2937 0);
2938 if (PTR_ERR(mbox_cfg) == -EPROBE_DEFER) {
2939 rc = -EPROBE_DEFER;
2940 goto smem_alloc_fail;
2941 }
2942 if (!mbox_cfg) {
2943 pr_err("%s: smem alloc of mailbox struct failed\n", __func__);
2944 rc = -ENOMEM;
2945 goto smem_alloc_fail;
2946 }
2947 einfo->mailbox = mbox_cfg;
2948 einfo->tx_ch_desc = (struct channel_desc *)(&mbox_cfg->tx_read_index);
2949 einfo->rx_ch_desc = (struct channel_desc *)(&mbox_cfg->rx_read_index);
2950 mbox_cfg->tx_size = einfo->tx_fifo_size;
2951 mbox_cfg->rx_size = einfo->rx_fifo_size;
2952 einfo->tx_fifo = &mbox_cfg->fifo[0];
2953
2954 rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
2955 if (rc == -EPROBE_DEFER)
2956 goto reg_xprt_fail;
2957 if (rc) {
2958 pr_err("%s: glink core register transport failed: %d\n",
2959 __func__, rc);
2960 goto reg_xprt_fail;
2961 }
2962
2963 einfo->irq_line = irq_line;
2964 rc = request_irq(irq_line, irq_handler,
2965 IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND | IRQF_SHARED,
2966 node->name, einfo);
2967 if (rc < 0) {
2968 pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
2969 rc);
2970 goto request_irq_fail;
2971 }
2972 rc = enable_irq_wake(irq_line);
2973 if (rc < 0)
2974 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
2975 irq_line);
2976
2977 register_debugfs_info(einfo);
2978
2979 writel_relaxed(mbox_cfg_size, mbox_size);
2980 cfg_p_addr = smem_virt_to_phys(mbox_cfg);
2981 writel_relaxed(lower_32_bits(cfg_p_addr), mbox_loc);
2982 writel_relaxed(upper_32_bits(cfg_p_addr), mbox_loc + 4);
2983 send_irq(einfo);
2984 iounmap(mbox_size);
2985 iounmap(mbox_loc);
2986 return 0;
2987
2988request_irq_fail:
2989 glink_core_unregister_transport(&einfo->xprt_if);
2990reg_xprt_fail:
2991smem_alloc_fail:
Kyle Yan65be4a52016-10-31 15:05:00 -07002992 kthread_flush_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302993 flush_work(&einfo->wakeup_work);
Chris Lewfa6135e2016-08-01 13:29:46 -07002994 kthread_stop(einfo->task);
2995 einfo->task = NULL;
2996 tasklet_kill(&einfo->tasklet);
2997kthread_fail:
2998 iounmap(einfo->rx_reset_reg);
2999rx_reset_ioremap_fail:
3000 iounmap(mbox_size);
3001mbox_size_ioremap_fail:
3002 iounmap(mbox_loc);
3003mbox_loc_ioremap_fail:
3004 iounmap(einfo->out_irq_reg);
3005irq_ioremap_fail:
3006 mutex_lock(&probe_lock);
3007 edge_infos[einfo->remote_proc_id] = NULL;
3008 mutex_unlock(&probe_lock);
3009invalid_key:
3010missing_key:
3011 kfree(einfo);
3012edge_info_alloc_fail:
3013 return rc;
3014}
3015
3016#if defined(CONFIG_DEBUG_FS)
3017/**
3018 * debug_edge() - generates formatted text output displaying current edge state
3019 * @s: File to send the output to.
3020 */
3021static void debug_edge(struct seq_file *s)
3022{
3023 struct edge_info *einfo;
3024 struct glink_dbgfs_data *dfs_d;
3025
3026 dfs_d = s->private;
3027 einfo = dfs_d->priv_data;
3028
3029/*
3030 * formatted, human readable edge state output, ie:
3031 * TX/RX fifo information:
3032ID|EDGE |TX READ |TX WRITE |TX SIZE |RX READ |RX WRITE |RX SIZE
3033-------------------------------------------------------------------------------
303401|mpss |0x00000128|0x00000128|0x00000800|0x00000256|0x00000256|0x00001000
3035 *
3036 * Interrupt information:
3037 * EDGE |TX INT |RX INT
3038 * --------------------------------
3039 * mpss |0x00000006|0x00000008
3040 */
3041 seq_puts(s, "TX/RX fifo information:\n");
3042 seq_printf(s, "%2s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s\n",
3043 "ID",
3044 "EDGE",
3045 "TX READ",
3046 "TX WRITE",
3047 "TX SIZE",
3048 "RX READ",
3049 "RX WRITE",
3050 "RX SIZE");
3051 seq_puts(s,
3052 "-------------------------------------------------------------------------------\n");
3053 if (!einfo)
3054 return;
3055
3056 seq_printf(s, "%02i|%-10s|", einfo->remote_proc_id,
3057 einfo->xprt_cfg.edge);
3058 if (!einfo->rx_fifo)
3059 seq_puts(s, "Link Not Up\n");
3060 else
3061 seq_printf(s, "0x%08X|0x%08X|0x%08X|0x%08X|0x%08X|0x%08X\n",
3062 einfo->tx_ch_desc->read_index,
3063 einfo->tx_ch_desc->write_index,
3064 einfo->tx_fifo_size,
3065 einfo->rx_ch_desc->read_index,
3066 einfo->rx_ch_desc->write_index,
3067 einfo->rx_fifo_size);
3068
3069 seq_puts(s, "\nInterrupt information:\n");
3070 seq_printf(s, "%-10s|%-10s|%-10s\n", "EDGE", "TX INT", "RX INT");
3071 seq_puts(s, "--------------------------------\n");
3072 seq_printf(s, "%-10s|0x%08X|0x%08X\n", einfo->xprt_cfg.edge,
3073 einfo->tx_irq_count,
3074 einfo->rx_irq_count);
3075}
3076
3077/**
3078 * register_debugfs_info() - initialize debugfs device entries
3079 * @einfo: Pointer to specific edge_info for which register is called.
3080 */
3081static void register_debugfs_info(struct edge_info *einfo)
3082{
3083 struct glink_dbgfs dfs;
3084 char *curr_dir_name;
3085 int dir_name_len;
3086
3087 dir_name_len = strlen(einfo->xprt_cfg.edge) +
3088 strlen(einfo->xprt_cfg.name) + 2;
3089 curr_dir_name = kmalloc(dir_name_len, GFP_KERNEL);
3090 if (!curr_dir_name) {
3091 GLINK_ERR("%s: Memory allocation failed\n", __func__);
3092 return;
3093 }
3094
3095 snprintf(curr_dir_name, dir_name_len, "%s_%s",
3096 einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
3097 dfs.curr_name = curr_dir_name;
3098 dfs.par_name = "xprt";
3099 dfs.b_dir_create = false;
3100 glink_debugfs_create("XPRT_INFO", debug_edge,
3101 &dfs, einfo, false);
3102 kfree(curr_dir_name);
3103}
3104
3105#else
3106static void register_debugfs_info(struct edge_info *einfo)
3107{
3108}
3109#endif /* CONFIG_DEBUG_FS */
3110
3111static const struct of_device_id smem_match_table[] = {
3112 { .compatible = "qcom,glink-smem-native-xprt" },
3113 {},
3114};
3115
3116static struct platform_driver glink_smem_native_driver = {
3117 .probe = glink_smem_native_probe,
3118 .driver = {
3119 .name = "msm_glink_smem_native_xprt",
3120 .owner = THIS_MODULE,
3121 .of_match_table = smem_match_table,
3122 },
3123};
3124
3125static const struct of_device_id rpm_match_table[] = {
3126 { .compatible = "qcom,glink-rpm-native-xprt" },
3127 {},
3128};
3129
3130static struct platform_driver glink_rpm_native_driver = {
3131 .probe = glink_rpm_native_probe,
3132 .driver = {
3133 .name = "msm_glink_rpm_native_xprt",
3134 .owner = THIS_MODULE,
3135 .of_match_table = rpm_match_table,
3136 },
3137};
3138
3139static const struct of_device_id mailbox_match_table[] = {
3140 { .compatible = "qcom,glink-mailbox-xprt" },
3141 {},
3142};
3143
3144static struct platform_driver glink_mailbox_driver = {
3145 .probe = glink_mailbox_probe,
3146 .driver = {
3147 .name = "msm_glink_mailbox_xprt",
3148 .owner = THIS_MODULE,
3149 .of_match_table = mailbox_match_table,
3150 },
3151};
3152
3153static int __init glink_smem_native_xprt_init(void)
3154{
3155 int rc;
3156
3157 rc = platform_driver_register(&glink_smem_native_driver);
3158 if (rc) {
3159 pr_err("%s: glink_smem_native_driver register failed %d\n",
3160 __func__, rc);
3161 return rc;
3162 }
3163
3164 rc = platform_driver_register(&glink_rpm_native_driver);
3165 if (rc) {
3166 pr_err("%s: glink_rpm_native_driver register failed %d\n",
3167 __func__, rc);
3168 return rc;
3169 }
3170
3171 rc = platform_driver_register(&glink_mailbox_driver);
3172 if (rc) {
3173 pr_err("%s: glink_mailbox_driver register failed %d\n",
3174 __func__, rc);
3175 return rc;
3176 }
3177
3178 return 0;
3179}
3180arch_initcall(glink_smem_native_xprt_init);
3181
3182MODULE_DESCRIPTION("MSM G-Link SMEM Native Transport");
3183MODULE_LICENSE("GPL v2");