blob: 266c0a283b080535b23163f253c4d6ded3c24f63 [file] [log] [blame]
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Chris Lewfa6135e2016-08-01 13:29:46 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/debugfs.h>
13#include <linux/err.h>
14#include <linux/fs.h>
15#include <linux/gfp.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/ipc_logging.h>
19#include <linux/irq.h>
20#include <linux/kernel.h>
21#include <linux/kthread.h>
22#include <linux/list.h>
23#include <linux/module.h>
24#include <linux/mutex.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/platform_device.h>
28#include <linux/printk.h>
29#include <linux/sched.h>
30#include <linux/seq_file.h>
31#include <linux/sizes.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/srcu.h>
35#include <linux/wait.h>
36#include <soc/qcom/smem.h>
37#include <soc/qcom/tracer_pkt.h>
38#include "glink_core_if.h"
39#include "glink_private.h"
40#include "glink_xprt_if.h"
41
42#define XPRT_NAME "smem"
43#define FIFO_FULL_RESERVE 8
44#define FIFO_ALIGNMENT 8
45#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
46#define SMEM_CH_DESC_SIZE 32
47#define RPM_TOC_ID 0x67727430
48#define RPM_TX_FIFO_ID 0x61703272
49#define RPM_RX_FIFO_ID 0x72326170
50#define RPM_TOC_SIZE 256
51#define RPM_MAX_TOC_ENTRIES 20
52#define RPM_FIFO_ADDR_ALIGN_BYTES 3
53#define TRACER_PKT_FEATURE BIT(2)
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -070054#define DEFERRED_CMDS_THRESHOLD 25
Chris Lewfa6135e2016-08-01 13:29:46 -070055/**
56 * enum command_types - definition of the types of commands sent/received
57 * @VERSION_CMD: Version and feature set supported
58 * @VERSION_ACK_CMD: Response for @VERSION_CMD
59 * @OPEN_CMD: Open a channel
60 * @CLOSE_CMD: Close a channel
61 * @OPEN_ACK_CMD: Response to @OPEN_CMD
62 * @RX_INTENT_CMD: RX intent for a channel was queued
63 * @RX_DONE_CMD: Use of RX intent for a channel is complete
64 * @RX_INTENT_REQ_CMD: Request to have RX intent queued
65 * @RX_INTENT_REQ_ACK_CMD: Response for @RX_INTENT_REQ_CMD
66 * @TX_DATA_CMD: Start of a data transfer
67 * @ZERO_COPY_TX_DATA_CMD: Start of a data transfer with zero copy
68 * @CLOSE_ACK_CMD: Response for @CLOSE_CMD
69 * @TX_DATA_CONT_CMD: Continuation or end of a data transfer
70 * @READ_NOTIF_CMD: Request for a notification when this cmd is read
71 * @RX_DONE_W_REUSE_CMD: Same as @RX_DONE but also reuse the used intent
72 * @SIGNALS_CMD: Sideband signals
73 * @TRACER_PKT_CMD: Start of a Tracer Packet Command
74 * @TRACER_PKT_CONT_CMD: Continuation or end of a Tracer Packet Command
75 */
76enum command_types {
77 VERSION_CMD,
78 VERSION_ACK_CMD,
79 OPEN_CMD,
80 CLOSE_CMD,
81 OPEN_ACK_CMD,
82 RX_INTENT_CMD,
83 RX_DONE_CMD,
84 RX_INTENT_REQ_CMD,
85 RX_INTENT_REQ_ACK_CMD,
86 TX_DATA_CMD,
87 ZERO_COPY_TX_DATA_CMD,
88 CLOSE_ACK_CMD,
89 TX_DATA_CONT_CMD,
90 READ_NOTIF_CMD,
91 RX_DONE_W_REUSE_CMD,
92 SIGNALS_CMD,
93 TRACER_PKT_CMD,
94 TRACER_PKT_CONT_CMD,
95};
96
97/**
98 * struct channel_desc - description of a channel fifo with a remote entity
99 * @read_index: The read index for the fifo where data should be
100 * consumed from.
101 * @write_index: The write index for the fifo where data should produced
102 * to.
103 *
104 * This structure resides in SMEM and contains the control information for the
105 * fifo data pipes of the channel. There is one physical channel between us
106 * and a remote entity.
107 */
108struct channel_desc {
109 uint32_t read_index;
110 uint32_t write_index;
111};
112
113/**
114 * struct mailbox_config_info - description of a mailbox tranposrt channel
115 * @tx_read_index: Offset into the tx fifo where data should be read from.
116 * @tx_write_index: Offset into the tx fifo where new data will be placed.
117 * @tx_size: Size of the transmit fifo in bytes.
118 * @rx_read_index: Offset into the rx fifo where data should be read from.
119 * @rx_write_index: Offset into the rx fifo where new data will be placed.
120 * @rx_size: Size of the receive fifo in bytes.
121 * @fifo: The fifos for the channel.
122 */
123struct mailbox_config_info {
124 uint32_t tx_read_index;
125 uint32_t tx_write_index;
126 uint32_t tx_size;
127 uint32_t rx_read_index;
128 uint32_t rx_write_index;
129 uint32_t rx_size;
130 char fifo[]; /* tx fifo, then rx fifo */
131};
132
133/**
134 * struct edge_info - local information for managing a single complete edge
135 * @xprt_if: The transport interface registered with the
136 * glink core associated with this edge.
137 * @xprt_cfg: The transport configuration for the glink core
138 * assocaited with this edge.
139 * @intentless: True if this edge runs in intentless mode.
140 * @irq_disabled: Flag indicating the whether interrupt is enabled
141 * or disabled.
142 * @remote_proc_id: The SMEM processor id for the remote side.
143 * @rx_reset_reg: Reference to the register to reset the rx irq
144 * line, if applicable.
145 * @out_irq_reg: Reference to the register to send an irq to the
146 * remote side.
147 * @out_irq_mask: Mask written to @out_irq_reg to trigger the
148 * correct irq.
149 * @irq_line: The incoming interrupt line.
150 * @tx_irq_count: Number of interrupts triggered.
151 * @rx_irq_count: Number of interrupts received.
152 * @tx_ch_desc: Reference to the channel description structure
153 * for tx in SMEM for this edge.
154 * @rx_ch_desc: Reference to the channel description structure
155 * for rx in SMEM for this edge.
156 * @tx_fifo: Reference to the transmit fifo in SMEM.
157 * @rx_fifo: Reference to the receive fifo in SMEM.
158 * @tx_fifo_size: Total size of @tx_fifo.
159 * @rx_fifo_size: Total size of @rx_fifo.
160 * @read_from_fifo: Memcpy for this edge.
161 * @write_to_fifo: Memcpy for this edge.
162 * @write_lock: Lock to serialize access to @tx_fifo.
163 * @tx_blocked_queue: Queue of entities waiting for the remote side to
164 * signal @tx_fifo has flushed and is now empty.
165 * @tx_resume_needed: A tx resume signal needs to be sent to the glink
166 * core once the remote side indicates @tx_fifo has
167 * flushed.
168 * @tx_blocked_signal_sent: Flag to indicate the flush signal has already
169 * been sent, and a response is pending from the
170 * remote side. Protected by @write_lock.
171 * @kwork: Work to be executed when an irq is received.
172 * @kworker: Handle to the entity processing of
173 deferred commands.
174 * @tasklet Handle to tasklet to process incoming data
175 packets in atomic manner.
176 * @task: Handle to the task context used to run @kworker.
177 * @use_ref: Active uses of this transport use this to grab
178 * a reference. Used for ssr synchronization.
179 * @in_ssr: Signals if this transport is in ssr.
180 * @rx_lock: Used to serialize concurrent instances of rx
181 * processing.
182 * @deferred_cmds: List of deferred commands that need to be
183 * processed in process context.
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700184 * @deferred_cmds_cnt: Number of deferred commands in queue.
Chris Lewfa6135e2016-08-01 13:29:46 -0700185 * @num_pw_states: Size of @ramp_time_us.
186 * @ramp_time_us: Array of ramp times in microseconds where array
187 * index position represents a power state.
188 * @mailbox: Mailbox transport channel description reference.
189 */
190struct edge_info {
191 struct glink_transport_if xprt_if;
192 struct glink_core_transport_cfg xprt_cfg;
193 bool intentless;
194 bool irq_disabled;
195 uint32_t remote_proc_id;
196 void __iomem *rx_reset_reg;
197 void __iomem *out_irq_reg;
198 uint32_t out_irq_mask;
199 uint32_t irq_line;
200 uint32_t tx_irq_count;
201 uint32_t rx_irq_count;
202 struct channel_desc *tx_ch_desc;
203 struct channel_desc *rx_ch_desc;
204 void __iomem *tx_fifo;
205 void __iomem *rx_fifo;
206 uint32_t tx_fifo_size;
207 uint32_t rx_fifo_size;
208 void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes);
209 void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes);
210 spinlock_t write_lock;
211 wait_queue_head_t tx_blocked_queue;
212 bool tx_resume_needed;
213 bool tx_blocked_signal_sent;
214 struct kthread_work kwork;
215 struct kthread_worker kworker;
Dhoat Harpale9d73372017-03-10 21:23:03 +0530216 struct work_struct wakeup_work;
Chris Lewfa6135e2016-08-01 13:29:46 -0700217 struct task_struct *task;
218 struct tasklet_struct tasklet;
219 struct srcu_struct use_ref;
220 bool in_ssr;
221 spinlock_t rx_lock;
222 struct list_head deferred_cmds;
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700223 uint32_t deferred_cmds_cnt;
Chris Lewfa6135e2016-08-01 13:29:46 -0700224 uint32_t num_pw_states;
225 unsigned long *ramp_time_us;
226 struct mailbox_config_info *mailbox;
227};
228
229/**
230 * struct deferred_cmd - description of a command to be processed later
231 * @list_node: Used to put this command on a list in the edge.
232 * @id: ID of the command.
233 * @param1: Parameter one of the command.
234 * @param2: Parameter two of the command.
235 * @data: Extra data associated with the command, if applicable.
236 *
237 * This structure stores the relevant information of a command that was removed
238 * from the fifo but needs to be processed at a later time.
239 */
240struct deferred_cmd {
241 struct list_head list_node;
242 uint16_t id;
243 uint16_t param1;
244 uint32_t param2;
245 void *data;
246};
247
248static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
249 const struct glink_core_version *version,
250 uint32_t features);
251static void register_debugfs_info(struct edge_info *einfo);
252
253static struct edge_info *edge_infos[NUM_SMEM_SUBSYSTEMS];
254static DEFINE_MUTEX(probe_lock);
255static struct glink_core_version versions[] = {
256 {1, TRACER_PKT_FEATURE, negotiate_features_v1},
257};
258
259/**
260 * send_irq() - send an irq to a remote entity as an event signal
261 * @einfo: Which remote entity that should receive the irq.
262 */
263static void send_irq(struct edge_info *einfo)
264{
265 /*
266 * Any data associated with this event must be visable to the remote
267 * before the interrupt is triggered
268 */
269 wmb();
270 writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
Karthikeyan Ramasubramaniandfc5d4a2016-10-14 08:42:30 -0600271 if (einfo->remote_proc_id != SMEM_SPSS)
272 writel_relaxed(0, einfo->out_irq_reg);
Chris Lewfa6135e2016-08-01 13:29:46 -0700273 einfo->tx_irq_count++;
274}
275
276/**
277 * read_from_fifo() - memcpy from fifo memory
278 * @dest: Destination address.
279 * @src: Source address.
280 * @num_bytes: Number of bytes to copy.
281 *
282 * Return: Destination address.
283 */
284static void *read_from_fifo(void *dest, const void *src, size_t num_bytes)
285{
286 memcpy_fromio(dest, src, num_bytes);
287 return dest;
288}
289
290/**
291 * write_to_fifo() - memcpy to fifo memory
292 * @dest: Destination address.
293 * @src: Source address.
294 * @num_bytes: Number of bytes to copy.
295 *
296 * Return: Destination address.
297 */
298static void *write_to_fifo(void *dest, const void *src, size_t num_bytes)
299{
300 memcpy_toio(dest, src, num_bytes);
301 return dest;
302}
303
304/**
305 * memcpy32_toio() - memcpy to word access only memory
306 * @dest: Destination address.
307 * @src: Source address.
308 * @num_bytes: Number of bytes to copy.
309 *
310 * Return: Destination address.
311 */
312static void *memcpy32_toio(void *dest, const void *src, size_t num_bytes)
313{
314 uint32_t *dest_local = (uint32_t *)dest;
315 uint32_t *src_local = (uint32_t *)src;
316
317 if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
318 return ERR_PTR(-EINVAL);
319 if (WARN_ON(!dest_local ||
320 ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
321 return ERR_PTR(-EINVAL);
322 if (WARN_ON(!src_local ||
323 ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
324 return ERR_PTR(-EINVAL);
325 num_bytes /= sizeof(uint32_t);
326
327 while (num_bytes--)
328 __raw_writel_no_log(*src_local++, dest_local++);
329
330 return dest;
331}
332
333/**
334 * memcpy32_fromio() - memcpy from word access only memory
335 * @dest: Destination address.
336 * @src: Source address.
337 * @num_bytes: Number of bytes to copy.
338 *
339 * Return: Destination address.
340 */
341static void *memcpy32_fromio(void *dest, const void *src, size_t num_bytes)
342{
343 uint32_t *dest_local = (uint32_t *)dest;
344 uint32_t *src_local = (uint32_t *)src;
345
346 if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
347 return ERR_PTR(-EINVAL);
348 if (WARN_ON(!dest_local ||
349 ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
350 return ERR_PTR(-EINVAL);
351 if (WARN_ON(!src_local ||
352 ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
353 return ERR_PTR(-EINVAL);
354 num_bytes /= sizeof(uint32_t);
355
356 while (num_bytes--)
357 *dest_local++ = __raw_readl_no_log(src_local++);
358
359 return dest;
360}
361
362/**
363 * fifo_read_avail() - how many bytes are available to be read from an edge
364 * @einfo: The concerned edge to query.
365 *
366 * Return: The number of bytes available to be read from edge.
367 */
368static uint32_t fifo_read_avail(struct edge_info *einfo)
369{
370 uint32_t read_index = einfo->rx_ch_desc->read_index;
371 uint32_t write_index = einfo->rx_ch_desc->write_index;
372 uint32_t fifo_size = einfo->rx_fifo_size;
373 uint32_t bytes_avail;
374
375 bytes_avail = write_index - read_index;
376 if (write_index < read_index)
377 /*
378 * Case: W < R - Write has wrapped
379 * --------------------------------
380 * In this case, the write operation has wrapped past the end
381 * of the FIFO which means that now calculating the amount of
382 * data in the FIFO results in a negative number. This can be
383 * easily fixed by adding the fifo_size to the value. Even
384 * though the values are unsigned, subtraction is always done
385 * using 2's complement which means that the result will still
386 * be correct once the FIFO size has been added to the negative
387 * result.
388 *
389 * Example:
390 * '-' = data in fifo
391 * '.' = empty
392 *
393 * 0 1
394 * 0123456789012345
395 * |-----w.....r----|
396 * 0 N
397 *
398 * write = 5 = 101b
399 * read = 11 = 1011b
400 * Data in FIFO
401 * (write - read) + fifo_size = (101b - 1011b) + 10000b
402 * = 11111010b + 10000b = 1010b = 10
403 */
404 bytes_avail += fifo_size;
405
406 return bytes_avail;
407}
408
409/**
410 * fifo_write_avail() - how many bytes can be written to the edge
411 * @einfo: The concerned edge to query.
412 *
413 * Calculates the number of bytes that can be transmitted at this time.
414 * Automatically reserves some space to maintain alignment when the fifo is
415 * completely full, and reserves space so that the flush command can always be
416 * transmitted when needed.
417 *
418 * Return: The number of bytes available to be read from edge.
419 */
420static uint32_t fifo_write_avail(struct edge_info *einfo)
421{
422 uint32_t read_index = einfo->tx_ch_desc->read_index;
423 uint32_t write_index = einfo->tx_ch_desc->write_index;
424 uint32_t fifo_size = einfo->tx_fifo_size;
425 uint32_t bytes_avail = read_index - write_index;
426
427 if (read_index <= write_index)
428 bytes_avail += fifo_size;
429 if (bytes_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
430 bytes_avail = 0;
431 else
432 bytes_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
433
434 return bytes_avail;
435}
436
437/**
438 * fifo_read() - read data from an edge
439 * @einfo: The concerned edge to read from.
440 * @_data: Buffer to copy the read data into.
441 * @len: The ammount of data to read in bytes.
442 *
443 * Return: The number of bytes read.
444 */
445static int fifo_read(struct edge_info *einfo, void *_data, int len)
446{
447 void *ptr;
448 void *ret;
449 void *data = _data;
450 int orig_len = len;
451 uint32_t read_index = einfo->rx_ch_desc->read_index;
452 uint32_t write_index = einfo->rx_ch_desc->write_index;
453 uint32_t fifo_size = einfo->rx_fifo_size;
454 uint32_t n;
455
456 while (len) {
457 ptr = einfo->rx_fifo + read_index;
458 if (read_index <= write_index)
459 n = write_index - read_index;
460 else
461 n = fifo_size - read_index;
462
463 if (n == 0)
464 break;
465 if (n > len)
466 n = len;
467
468 ret = einfo->read_from_fifo(data, ptr, n);
469 if (IS_ERR(ret))
470 return PTR_ERR(ret);
471
472 data += n;
473 len -= n;
474 read_index += n;
475 if (read_index >= fifo_size)
476 read_index -= fifo_size;
477 }
478 einfo->rx_ch_desc->read_index = read_index;
479
480 return orig_len - len;
481}
482
483/**
484 * fifo_write_body() - Copy transmit data into an edge
485 * @einfo: The concerned edge to copy into.
486 * @_data: Buffer of data to copy from.
487 * @len: Size of data to copy in bytes.
488 * @write_index: Index into the channel where the data should be copied.
489 *
490 * Return: Number of bytes remaining to be copied into the edge.
491 */
492static int fifo_write_body(struct edge_info *einfo, const void *_data,
493 int len, uint32_t *write_index)
494{
495 void *ptr;
496 void *ret;
497 const void *data = _data;
498 uint32_t read_index = einfo->tx_ch_desc->read_index;
499 uint32_t fifo_size = einfo->tx_fifo_size;
500 uint32_t n;
501
502 while (len) {
503 ptr = einfo->tx_fifo + *write_index;
504 if (*write_index < read_index) {
505 n = read_index - *write_index - FIFO_FULL_RESERVE;
506 } else {
507 if (read_index < FIFO_FULL_RESERVE)
508 n = fifo_size + read_index - *write_index -
509 FIFO_FULL_RESERVE;
510 else
511 n = fifo_size - *write_index;
512 }
513
514 if (n == 0)
515 break;
516 if (n > len)
517 n = len;
518
519 ret = einfo->write_to_fifo(ptr, data, n);
520 if (IS_ERR(ret))
521 return PTR_ERR(ret);
522
523 data += n;
524 len -= n;
525 *write_index += n;
526 if (*write_index >= fifo_size)
527 *write_index -= fifo_size;
528 }
529 return len;
530}
531
532/**
533 * fifo_write() - Write data into an edge
534 * @einfo: The concerned edge to write to.
535 * @data: Buffer of data to write.
536 * @len: Length of data to write, in bytes.
537 *
538 * Wrapper around fifo_write_body() to manage additional details that are
539 * necessary for a complete write event. Does not manage concurrency. Clients
540 * should use fifo_write_avail() to check if there is sufficent space before
541 * calling fifo_write().
542 *
543 * Return: Number of bytes written to the edge.
544 */
545static int fifo_write(struct edge_info *einfo, const void *data, int len)
546{
547 int orig_len = len;
548 uint32_t write_index = einfo->tx_ch_desc->write_index;
549
550 len = fifo_write_body(einfo, data, len, &write_index);
551 if (unlikely(len < 0))
552 return len;
553 einfo->tx_ch_desc->write_index = write_index;
554 send_irq(einfo);
555
556 return orig_len - len;
557}
558
559/**
560 * fifo_write_complex() - writes a transaction of multiple buffers to an edge
561 * @einfo: The concerned edge to write to.
562 * @data1: The first buffer of data to write.
563 * @len1: The length of the first buffer in bytes.
564 * @data2: The second buffer of data to write.
565 * @len2: The length of the second buffer in bytes.
566 * @data3: The thirs buffer of data to write.
567 * @len3: The length of the third buffer in bytes.
568 *
569 * A variant of fifo_write() which optimizes the usecase found in tx(). The
570 * remote side expects all or none of the transmitted data to be available.
571 * This prevents the tx() usecase from calling fifo_write() multiple times. The
572 * alternative would be an allocation and additional memcpy to create a buffer
573 * to copy all the data segments into one location before calling fifo_write().
574 *
575 * Return: Number of bytes written to the edge.
576 */
577static int fifo_write_complex(struct edge_info *einfo,
578 const void *data1, int len1,
579 const void *data2, int len2,
580 const void *data3, int len3)
581{
582 int orig_len = len1 + len2 + len3;
583 uint32_t write_index = einfo->tx_ch_desc->write_index;
584
585 len1 = fifo_write_body(einfo, data1, len1, &write_index);
586 if (unlikely(len1 < 0))
587 return len1;
588 len2 = fifo_write_body(einfo, data2, len2, &write_index);
589 if (unlikely(len2 < 0))
590 return len2;
591 len3 = fifo_write_body(einfo, data3, len3, &write_index);
592 if (unlikely(len3 < 0))
593 return len3;
594
595 einfo->tx_ch_desc->write_index = write_index;
596 send_irq(einfo);
597
598 return orig_len - len1 - len2 - len3;
599}
600
601/**
602 * send_tx_blocked_signal() - send the flush command as we are blocked from tx
603 * @einfo: The concerned edge which is blocked.
604 *
605 * Used to send a signal to the remote side that we have no more space to
606 * transmit data and therefore need the remote side to signal us when they have
607 * cleared some space by reading some data. This function relies upon the
608 * assumption that fifo_write_avail() will reserve some space so that the flush
609 * signal command can always be put into the transmit fifo, even when "everyone"
610 * else thinks that the transmit fifo is truely full. This function assumes
611 * that it is called with the write_lock already locked.
612 */
613static void send_tx_blocked_signal(struct edge_info *einfo)
614{
615 struct read_notif_request {
616 uint16_t cmd;
617 uint16_t reserved;
618 uint32_t reserved2;
619 };
620 struct read_notif_request read_notif_req;
621
622 read_notif_req.cmd = READ_NOTIF_CMD;
623 read_notif_req.reserved = 0;
624 read_notif_req.reserved2 = 0;
625
626 if (!einfo->tx_blocked_signal_sent) {
627 einfo->tx_blocked_signal_sent = true;
628 fifo_write(einfo, &read_notif_req, sizeof(read_notif_req));
629 }
630}
631
632/**
633 * fifo_tx() - transmit data on an edge
634 * @einfo: The concerned edge to transmit on.
635 * @data: Buffer of data to transmit.
636 * @len: Length of data to transmit in bytes.
637 *
638 * This helper function is the preferred interface to fifo_write() and should
639 * be used in the normal case for transmitting entities. fifo_tx() will block
640 * until there is sufficent room to transmit the requested ammount of data.
641 * fifo_tx() will manage any concurrency between multiple transmitters on a
642 * channel.
643 *
644 * Return: Number of bytes transmitted.
645 */
646static int fifo_tx(struct edge_info *einfo, const void *data, int len)
647{
648 unsigned long flags;
649 int ret;
650
651 DEFINE_WAIT(wait);
652
653 spin_lock_irqsave(&einfo->write_lock, flags);
654 while (fifo_write_avail(einfo) < len) {
655 send_tx_blocked_signal(einfo);
656 prepare_to_wait(&einfo->tx_blocked_queue, &wait,
657 TASK_UNINTERRUPTIBLE);
658 if (fifo_write_avail(einfo) < len && !einfo->in_ssr) {
659 spin_unlock_irqrestore(&einfo->write_lock, flags);
660 schedule();
661 spin_lock_irqsave(&einfo->write_lock, flags);
662 }
663 finish_wait(&einfo->tx_blocked_queue, &wait);
664 if (einfo->in_ssr) {
665 spin_unlock_irqrestore(&einfo->write_lock, flags);
666 return -EFAULT;
667 }
668 }
669 ret = fifo_write(einfo, data, len);
670 spin_unlock_irqrestore(&einfo->write_lock, flags);
671
672 return ret;
673}
674
675/**
676 * process_rx_data() - process received data from an edge
677 * @einfo: The edge the data was received on.
678 * @cmd_id: ID to specify the type of data.
679 * @rcid: The remote channel id associated with the data.
680 * @intend_id: The intent the data should be put in.
681 */
682static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
683 uint32_t rcid, uint32_t intent_id)
684{
685 struct command {
686 uint32_t frag_size;
687 uint32_t size_remaining;
688 };
689 struct command cmd;
690 struct glink_core_rx_intent *intent;
691 char trash[FIFO_ALIGNMENT];
692 int alignment;
693 bool err = false;
694
695 fifo_read(einfo, &cmd, sizeof(cmd));
696
697 intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
698 &einfo->xprt_if, rcid, intent_id);
699 if (intent == NULL) {
700 GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
701 intent_id);
702 err = true;
703 } else if (intent->data == NULL) {
704 if (einfo->intentless) {
705 intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC);
Chris Lewa3c44d22017-01-17 14:50:24 -0800706 if (!intent->data) {
Chris Lewfa6135e2016-08-01 13:29:46 -0700707 err = true;
Chris Lewa3c44d22017-01-17 14:50:24 -0800708 GLINK_ERR(
709 "%s: atomic alloc fail ch %d liid %d size %d\n",
710 __func__, rcid, intent_id,
711 cmd.frag_size);
712 } else {
Chris Lewfa6135e2016-08-01 13:29:46 -0700713 intent->intent_size = cmd.frag_size;
Chris Lewa3c44d22017-01-17 14:50:24 -0800714 }
Chris Lewfa6135e2016-08-01 13:29:46 -0700715 } else {
716 GLINK_ERR(
717 "%s: intent for ch %d liid %d has no data buff\n",
718 __func__, rcid, intent_id);
719 err = true;
720 }
721 }
722
723 if (!err &&
724 (intent->intent_size - intent->write_offset < cmd.frag_size ||
725 intent->write_offset + cmd.size_remaining > intent->intent_size)) {
726 GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
727 __func__,
728 cmd.frag_size,
729 cmd.size_remaining,
730 "will overflow ch",
731 rcid,
732 "intent",
733 intent_id);
734 err = true;
735 }
736
737 if (err) {
738 alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
739 alignment -= cmd.frag_size;
740 while (cmd.frag_size) {
741 if (cmd.frag_size > FIFO_ALIGNMENT) {
742 fifo_read(einfo, trash, FIFO_ALIGNMENT);
743 cmd.frag_size -= FIFO_ALIGNMENT;
744 } else {
745 fifo_read(einfo, trash, cmd.frag_size);
746 cmd.frag_size = 0;
747 }
748 }
749 if (alignment)
750 fifo_read(einfo, trash, alignment);
751 return;
752 }
753 fifo_read(einfo, intent->data + intent->write_offset, cmd.frag_size);
754 intent->write_offset += cmd.frag_size;
755 intent->pkt_size += cmd.frag_size;
756
757 alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
758 alignment -= cmd.frag_size;
759 if (alignment)
760 fifo_read(einfo, trash, alignment);
761
762 if (unlikely((cmd_id == TRACER_PKT_CMD ||
763 cmd_id == TRACER_PKT_CONT_CMD) && !cmd.size_remaining)) {
764 tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
765 intent->tracer_pkt = true;
766 }
767
768 einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
769 rcid,
770 intent,
771 cmd.size_remaining ?
772 false : true);
773}
774
775/**
776 * queue_cmd() - queue a deferred command for later processing
777 * @einfo: Edge to queue commands on.
778 * @cmd: Command to queue.
779 * @data: Command specific data to queue with the command.
780 *
781 * Return: True if queuing was successful, false otherwise.
782 */
783static bool queue_cmd(struct edge_info *einfo, void *cmd, void *data)
784{
785 struct command {
786 uint16_t id;
787 uint16_t param1;
788 uint32_t param2;
789 };
790 struct command *_cmd = cmd;
791 struct deferred_cmd *d_cmd;
792
793 d_cmd = kmalloc(sizeof(*d_cmd), GFP_ATOMIC);
794 if (!d_cmd) {
795 GLINK_ERR("%s: Discarding cmd %d\n", __func__, _cmd->id);
796 return false;
797 }
798 d_cmd->id = _cmd->id;
799 d_cmd->param1 = _cmd->param1;
800 d_cmd->param2 = _cmd->param2;
801 d_cmd->data = data;
802 list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds);
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700803 einfo->deferred_cmds_cnt++;
Kyle Yan65be4a52016-10-31 15:05:00 -0700804 kthread_queue_work(&einfo->kworker, &einfo->kwork);
Chris Lewfa6135e2016-08-01 13:29:46 -0700805 return true;
806}
807
808/**
809 * get_rx_fifo() - Find the rx fifo for an edge
810 * @einfo: Edge to find the fifo for.
811 *
812 * Return: True if fifo was found, false otherwise.
813 */
814static bool get_rx_fifo(struct edge_info *einfo)
815{
816 if (einfo->mailbox) {
817 einfo->rx_fifo = &einfo->mailbox->fifo[einfo->mailbox->tx_size];
818 einfo->rx_fifo_size = einfo->mailbox->rx_size;
819 } else {
820 einfo->rx_fifo = smem_get_entry(SMEM_GLINK_NATIVE_XPRT_FIFO_1,
821 &einfo->rx_fifo_size,
822 einfo->remote_proc_id,
823 SMEM_ITEM_CACHED_FLAG);
824 if (!einfo->rx_fifo)
825 return false;
826 }
827
828 return true;
829}
830
831/**
832 * __rx_worker() - process received commands on a specific edge
833 * @einfo: Edge to process commands on.
834 * @atomic_ctx: Indicates if the caller is in atomic context and requires any
835 * non-atomic operations to be deferred.
836 */
837static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
838{
839 struct command {
840 uint16_t id;
841 uint16_t param1;
842 uint32_t param2;
843 };
844 struct intent_desc {
845 uint32_t size;
846 uint32_t id;
847 };
848 struct command cmd;
849 struct intent_desc intent;
850 struct intent_desc *intents;
851 int i;
852 bool granted;
853 unsigned long flags;
854 int rcu_id;
855 uint16_t rcid;
856 uint32_t name_len;
857 uint32_t len;
858 char *name;
859 char trash[FIFO_ALIGNMENT];
860 struct deferred_cmd *d_cmd;
861 void *cmd_data;
862
863 rcu_id = srcu_read_lock(&einfo->use_ref);
864
865 if (unlikely(!einfo->rx_fifo)) {
866 if (!get_rx_fifo(einfo)) {
867 srcu_read_unlock(&einfo->use_ref, rcu_id);
868 return;
869 }
870 einfo->in_ssr = false;
871 einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
872 }
873
874 if (einfo->in_ssr) {
875 srcu_read_unlock(&einfo->use_ref, rcu_id);
876 return;
877 }
Chris Lewfa6135e2016-08-01 13:29:46 -0700878
Dhoat Harpale9d73372017-03-10 21:23:03 +0530879 if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
880 (waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
881 schedule_work(&einfo->wakeup_work);
Chris Lewfa6135e2016-08-01 13:29:46 -0700882
883 /*
884 * Access to the fifo needs to be synchronized, however only the calls
885 * into the core from process_rx_data() are compatible with an atomic
886 * processing context. For everything else, we need to do all the fifo
887 * processing, then unlock the lock for the call into the core. Data
888 * in the fifo is allowed to be processed immediately instead of being
889 * ordered with the commands because the channel open process prevents
890 * intents from being queued (which prevents data from being sent) until
891 * all the channel open commands are processed by the core, thus
892 * eliminating a race.
893 */
894 spin_lock_irqsave(&einfo->rx_lock, flags);
895 while (fifo_read_avail(einfo) ||
896 (!atomic_ctx && !list_empty(&einfo->deferred_cmds))) {
897 if (einfo->in_ssr)
898 break;
899
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700900 if (atomic_ctx && !einfo->intentless &&
901 einfo->deferred_cmds_cnt >= DEFERRED_CMDS_THRESHOLD)
902 break;
903
Chris Lewfa6135e2016-08-01 13:29:46 -0700904 if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) {
905 d_cmd = list_first_entry(&einfo->deferred_cmds,
906 struct deferred_cmd, list_node);
907 list_del(&d_cmd->list_node);
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700908 einfo->deferred_cmds_cnt--;
Chris Lewfa6135e2016-08-01 13:29:46 -0700909 cmd.id = d_cmd->id;
910 cmd.param1 = d_cmd->param1;
911 cmd.param2 = d_cmd->param2;
912 cmd_data = d_cmd->data;
913 kfree(d_cmd);
914 } else {
915 fifo_read(einfo, &cmd, sizeof(cmd));
916 cmd_data = NULL;
917 }
918
919 switch (cmd.id) {
920 case VERSION_CMD:
921 if (atomic_ctx) {
922 queue_cmd(einfo, &cmd, NULL);
923 break;
924 }
925 spin_unlock_irqrestore(&einfo->rx_lock, flags);
926 einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
927 &einfo->xprt_if,
928 cmd.param1,
929 cmd.param2);
930 spin_lock_irqsave(&einfo->rx_lock, flags);
931 break;
932 case VERSION_ACK_CMD:
933 if (atomic_ctx) {
934 queue_cmd(einfo, &cmd, NULL);
935 break;
936 }
937 spin_unlock_irqrestore(&einfo->rx_lock, flags);
938 einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
939 &einfo->xprt_if,
940 cmd.param1,
941 cmd.param2);
942 spin_lock_irqsave(&einfo->rx_lock, flags);
943 break;
944 case OPEN_CMD:
945 rcid = cmd.param1;
946 name_len = cmd.param2;
947
948 if (cmd_data) {
949 name = cmd_data;
950 } else {
951 len = ALIGN(name_len, FIFO_ALIGNMENT);
952 name = kmalloc(len, GFP_ATOMIC);
953 if (!name) {
954 pr_err("No memory available to rx ch open cmd name. Discarding cmd.\n");
955 while (len) {
956 fifo_read(einfo, trash,
957 FIFO_ALIGNMENT);
958 len -= FIFO_ALIGNMENT;
959 }
960 break;
961 }
962 fifo_read(einfo, name, len);
963 }
964 if (atomic_ctx) {
965 if (!queue_cmd(einfo, &cmd, name))
966 kfree(name);
967 break;
968 }
969
970 spin_unlock_irqrestore(&einfo->rx_lock, flags);
971 einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
972 &einfo->xprt_if,
973 rcid,
974 name,
975 SMEM_XPRT_ID);
976 kfree(name);
977 spin_lock_irqsave(&einfo->rx_lock, flags);
978 break;
979 case CLOSE_CMD:
980 if (atomic_ctx) {
981 queue_cmd(einfo, &cmd, NULL);
982 break;
983 }
984 spin_unlock_irqrestore(&einfo->rx_lock, flags);
985 einfo->xprt_if.glink_core_if_ptr->
986 rx_cmd_ch_remote_close(
987 &einfo->xprt_if,
988 cmd.param1);
989 spin_lock_irqsave(&einfo->rx_lock, flags);
990 break;
991 case OPEN_ACK_CMD:
992 if (atomic_ctx) {
993 queue_cmd(einfo, &cmd, NULL);
994 break;
995 }
996 spin_unlock_irqrestore(&einfo->rx_lock, flags);
997 einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
998 &einfo->xprt_if,
999 cmd.param1,
1000 SMEM_XPRT_ID);
1001 spin_lock_irqsave(&einfo->rx_lock, flags);
1002 break;
1003 case RX_INTENT_CMD:
1004 /*
1005 * One intent listed with this command. This is the
1006 * expected case and can be optimized over the general
1007 * case of an array of intents.
1008 */
1009 if (cmd.param2 == 1) {
1010 if (cmd_data) {
1011 intent.id = ((struct intent_desc *)
1012 cmd_data)->id;
1013 intent.size = ((struct intent_desc *)
1014 cmd_data)->size;
1015 kfree(cmd_data);
1016 } else {
1017 fifo_read(einfo, &intent,
1018 sizeof(intent));
1019 }
1020 if (atomic_ctx) {
1021 cmd_data = kmalloc(sizeof(intent),
1022 GFP_ATOMIC);
1023 if (!cmd_data) {
1024 GLINK_ERR(
1025 "%s: dropping cmd %d\n",
1026 __func__, cmd.id);
1027 break;
1028 }
1029 ((struct intent_desc *)cmd_data)->id =
1030 intent.id;
1031 ((struct intent_desc *)cmd_data)->size =
1032 intent.size;
1033 if (!queue_cmd(einfo, &cmd, cmd_data))
1034 kfree(cmd_data);
1035 break;
1036 }
1037 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1038 einfo->xprt_if.glink_core_if_ptr->
1039 rx_cmd_remote_rx_intent_put(
1040 &einfo->xprt_if,
1041 cmd.param1,
1042 intent.id,
1043 intent.size);
1044 spin_lock_irqsave(&einfo->rx_lock, flags);
1045 break;
1046 }
1047
1048 /* Array of intents to process */
1049 if (cmd_data) {
1050 intents = cmd_data;
1051 } else {
1052 intents = kmalloc_array(cmd.param2,
1053 sizeof(*intents), GFP_ATOMIC);
1054 if (!intents) {
1055 for (i = 0; i < cmd.param2; ++i)
1056 fifo_read(einfo, &intent,
1057 sizeof(intent));
1058 break;
1059 }
1060 fifo_read(einfo, intents,
1061 sizeof(*intents) * cmd.param2);
1062 }
1063 if (atomic_ctx) {
1064 if (!queue_cmd(einfo, &cmd, intents))
1065 kfree(intents);
1066 break;
1067 }
1068 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1069 for (i = 0; i < cmd.param2; ++i) {
1070 einfo->xprt_if.glink_core_if_ptr->
1071 rx_cmd_remote_rx_intent_put(
1072 &einfo->xprt_if,
1073 cmd.param1,
1074 intents[i].id,
1075 intents[i].size);
1076 }
1077 kfree(intents);
1078 spin_lock_irqsave(&einfo->rx_lock, flags);
1079 break;
1080 case RX_DONE_CMD:
1081 if (atomic_ctx) {
1082 queue_cmd(einfo, &cmd, NULL);
1083 break;
1084 }
1085 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1086 einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
1087 &einfo->xprt_if,
1088 cmd.param1,
1089 cmd.param2,
1090 false);
1091 spin_lock_irqsave(&einfo->rx_lock, flags);
1092 break;
1093 case RX_INTENT_REQ_CMD:
1094 if (atomic_ctx) {
1095 queue_cmd(einfo, &cmd, NULL);
1096 break;
1097 }
1098 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1099 einfo->xprt_if.glink_core_if_ptr->
1100 rx_cmd_remote_rx_intent_req(
1101 &einfo->xprt_if,
1102 cmd.param1,
1103 cmd.param2);
1104 spin_lock_irqsave(&einfo->rx_lock, flags);
1105 break;
1106 case RX_INTENT_REQ_ACK_CMD:
1107 if (atomic_ctx) {
1108 queue_cmd(einfo, &cmd, NULL);
1109 break;
1110 }
1111 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1112 granted = false;
1113 if (cmd.param2 == 1)
1114 granted = true;
1115 einfo->xprt_if.glink_core_if_ptr->
1116 rx_cmd_rx_intent_req_ack(
1117 &einfo->xprt_if,
1118 cmd.param1,
1119 granted);
1120 spin_lock_irqsave(&einfo->rx_lock, flags);
1121 break;
1122 case TX_DATA_CMD:
1123 case TX_DATA_CONT_CMD:
1124 case TRACER_PKT_CMD:
1125 case TRACER_PKT_CONT_CMD:
1126 process_rx_data(einfo, cmd.id, cmd.param1, cmd.param2);
1127 break;
1128 case CLOSE_ACK_CMD:
1129 if (atomic_ctx) {
1130 queue_cmd(einfo, &cmd, NULL);
1131 break;
1132 }
1133 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1134 einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
1135 &einfo->xprt_if,
1136 cmd.param1);
1137 spin_lock_irqsave(&einfo->rx_lock, flags);
1138 break;
1139 case READ_NOTIF_CMD:
1140 send_irq(einfo);
1141 break;
1142 case SIGNALS_CMD:
1143 if (atomic_ctx) {
1144 queue_cmd(einfo, &cmd, NULL);
1145 break;
1146 }
1147 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1148 einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
1149 &einfo->xprt_if,
1150 cmd.param1,
1151 cmd.param2);
1152 spin_lock_irqsave(&einfo->rx_lock, flags);
1153 break;
1154 case RX_DONE_W_REUSE_CMD:
1155 if (atomic_ctx) {
1156 queue_cmd(einfo, &cmd, NULL);
1157 break;
1158 }
1159 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1160 einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
1161 &einfo->xprt_if,
1162 cmd.param1,
1163 cmd.param2,
1164 true);
1165 spin_lock_irqsave(&einfo->rx_lock, flags);
1166 break;
1167 default:
1168 pr_err("Unrecognized command: %d\n", cmd.id);
1169 break;
1170 }
1171 }
1172 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1173 srcu_read_unlock(&einfo->use_ref, rcu_id);
1174}
1175
1176/**
1177 * rx_worker_atomic() - worker function to process received command in atomic
1178 * context.
1179 * @param: The param parameter passed during initialization of the tasklet.
1180 */
1181static void rx_worker_atomic(unsigned long param)
1182{
1183 struct edge_info *einfo = (struct edge_info *)param;
1184
1185 __rx_worker(einfo, true);
1186}
1187
1188/**
Dhoat Harpale9d73372017-03-10 21:23:03 +05301189 * tx_wakeup_worker() - worker function to wakeup tx blocked thread
1190 * @work: kwork associated with the edge to process commands on.
1191 */
1192static void tx_wakeup_worker(struct work_struct *work)
1193{
1194 struct edge_info *einfo;
1195 bool trigger_wakeup = false;
1196 unsigned long flags;
1197 int rcu_id;
1198
1199 einfo = container_of(work, struct edge_info, wakeup_work);
1200 rcu_id = srcu_read_lock(&einfo->use_ref);
1201 if (einfo->in_ssr) {
1202 srcu_read_unlock(&einfo->use_ref, rcu_id);
1203 return;
1204 }
1205 if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
1206 einfo->tx_resume_needed = false;
1207 einfo->xprt_if.glink_core_if_ptr->tx_resume(
1208 &einfo->xprt_if);
1209 }
1210 spin_lock_irqsave(&einfo->write_lock, flags);
1211 if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
1212 einfo->tx_blocked_signal_sent = false;
1213 trigger_wakeup = true;
1214 }
1215 spin_unlock_irqrestore(&einfo->write_lock, flags);
1216 if (trigger_wakeup)
1217 wake_up_all(&einfo->tx_blocked_queue);
1218 srcu_read_unlock(&einfo->use_ref, rcu_id);
1219}
1220
1221/**
Chris Lewfa6135e2016-08-01 13:29:46 -07001222 * rx_worker() - worker function to process received commands
1223 * @work: kwork associated with the edge to process commands on.
1224 */
1225static void rx_worker(struct kthread_work *work)
1226{
1227 struct edge_info *einfo;
1228
1229 einfo = container_of(work, struct edge_info, kwork);
1230 __rx_worker(einfo, false);
1231}
1232
1233irqreturn_t irq_handler(int irq, void *priv)
1234{
1235 struct edge_info *einfo = (struct edge_info *)priv;
1236
1237 if (einfo->rx_reset_reg)
1238 writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
1239
1240 tasklet_hi_schedule(&einfo->tasklet);
1241 einfo->rx_irq_count++;
1242
1243 return IRQ_HANDLED;
1244}
1245
1246/**
1247 * tx_cmd_version() - convert a version cmd to wire format and transmit
1248 * @if_ptr: The transport to transmit on.
1249 * @version: The version number to encode.
1250 * @features: The features information to encode.
1251 */
1252static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
1253 uint32_t features)
1254{
1255 struct command {
1256 uint16_t id;
1257 uint16_t version;
1258 uint32_t features;
1259 };
1260 struct command cmd;
1261 struct edge_info *einfo;
1262 int rcu_id;
1263
1264 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1265
1266 rcu_id = srcu_read_lock(&einfo->use_ref);
1267 if (einfo->in_ssr) {
1268 srcu_read_unlock(&einfo->use_ref, rcu_id);
1269 return;
1270 }
1271
1272 cmd.id = VERSION_CMD;
1273 cmd.version = version;
1274 cmd.features = features;
1275
1276 fifo_tx(einfo, &cmd, sizeof(cmd));
1277 srcu_read_unlock(&einfo->use_ref, rcu_id);
1278}
1279
1280/**
1281 * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
1282 * @if_ptr: The transport to transmit on.
1283 * @version: The version number to encode.
1284 * @features: The features information to encode.
1285 */
1286static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
1287 uint32_t version,
1288 uint32_t features)
1289{
1290 struct command {
1291 uint16_t id;
1292 uint16_t version;
1293 uint32_t features;
1294 };
1295 struct command cmd;
1296 struct edge_info *einfo;
1297 int rcu_id;
1298
1299 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1300
1301 rcu_id = srcu_read_lock(&einfo->use_ref);
1302 if (einfo->in_ssr) {
1303 srcu_read_unlock(&einfo->use_ref, rcu_id);
1304 return;
1305 }
1306
1307 cmd.id = VERSION_ACK_CMD;
1308 cmd.version = version;
1309 cmd.features = features;
1310
1311 fifo_tx(einfo, &cmd, sizeof(cmd));
1312 srcu_read_unlock(&einfo->use_ref, rcu_id);
1313}
1314
1315/**
1316 * set_version() - activate a negotiated version and feature set
1317 * @if_ptr: The transport to configure.
1318 * @version: The version to use.
1319 * @features: The features to use.
1320 *
1321 * Return: The supported capabilities of the transport.
1322 */
1323static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
1324 uint32_t features)
1325{
1326 struct edge_info *einfo;
1327 uint32_t ret;
1328 int rcu_id;
1329
1330 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1331
1332 rcu_id = srcu_read_lock(&einfo->use_ref);
1333 if (einfo->in_ssr) {
1334 srcu_read_unlock(&einfo->use_ref, rcu_id);
1335 return 0;
1336 }
1337
1338 ret = einfo->intentless ?
1339 GCAP_INTENTLESS | GCAP_SIGNALS : GCAP_SIGNALS;
1340
1341 if (features & TRACER_PKT_FEATURE)
1342 ret |= GCAP_TRACER_PKT;
1343
1344 srcu_read_unlock(&einfo->use_ref, rcu_id);
1345 return ret;
1346}
1347
1348/**
1349 * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
1350 * @if_ptr: The transport to transmit on.
1351 * @lcid: The local channel id to encode.
1352 * @name: The channel name to encode.
1353 * @req_xprt: The transport the core would like to migrate this channel to.
1354 *
1355 * Return: 0 on success or standard Linux error code.
1356 */
1357static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
1358 const char *name, uint16_t req_xprt)
1359{
1360 struct command {
1361 uint16_t id;
1362 uint16_t lcid;
1363 uint32_t length;
1364 };
1365 struct command cmd;
1366 struct edge_info *einfo;
1367 uint32_t buf_size;
1368 void *buf;
1369 int rcu_id;
1370
1371 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1372
1373 rcu_id = srcu_read_lock(&einfo->use_ref);
1374 if (einfo->in_ssr) {
1375 srcu_read_unlock(&einfo->use_ref, rcu_id);
1376 return -EFAULT;
1377 }
1378
1379 cmd.id = OPEN_CMD;
1380 cmd.lcid = lcid;
1381 cmd.length = strlen(name) + 1;
1382
1383 buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
1384
1385 buf = kzalloc(buf_size, GFP_KERNEL);
1386 if (!buf) {
1387 GLINK_ERR("%s: malloc fail for %d size buf\n",
1388 __func__, buf_size);
1389 srcu_read_unlock(&einfo->use_ref, rcu_id);
1390 return -ENOMEM;
1391 }
1392
1393 memcpy(buf, &cmd, sizeof(cmd));
1394 memcpy(buf + sizeof(cmd), name, cmd.length);
1395
1396 fifo_tx(einfo, buf, buf_size);
1397
1398 kfree(buf);
1399
1400 srcu_read_unlock(&einfo->use_ref, rcu_id);
1401 return 0;
1402}
1403
1404/**
1405 * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
1406 * @if_ptr: The transport to transmit on.
1407 * @lcid: The local channel id to encode.
1408 *
1409 * Return: 0 on success or standard Linux error code.
1410 */
1411static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
1412{
1413 struct command {
1414 uint16_t id;
1415 uint16_t lcid;
1416 uint32_t reserved;
1417 };
1418 struct command cmd;
1419 struct edge_info *einfo;
1420 int rcu_id;
1421
1422 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1423
1424 rcu_id = srcu_read_lock(&einfo->use_ref);
1425 if (einfo->in_ssr) {
1426 srcu_read_unlock(&einfo->use_ref, rcu_id);
1427 return -EFAULT;
1428 }
1429
1430 cmd.id = CLOSE_CMD;
1431 cmd.lcid = lcid;
1432 cmd.reserved = 0;
1433
1434 fifo_tx(einfo, &cmd, sizeof(cmd));
1435
1436 srcu_read_unlock(&einfo->use_ref, rcu_id);
1437 return 0;
1438}
1439
1440/**
1441 * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
1442 * and transmit
1443 * @if_ptr: The transport to transmit on.
1444 * @rcid: The remote channel id to encode.
1445 * @xprt_resp: The response to a transport migration request.
1446 */
1447static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
1448 uint32_t rcid, uint16_t xprt_resp)
1449{
1450 struct command {
1451 uint16_t id;
1452 uint16_t rcid;
1453 uint32_t reserved;
1454 };
1455 struct command cmd;
1456 struct edge_info *einfo;
1457 int rcu_id;
1458
1459 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1460
1461 rcu_id = srcu_read_lock(&einfo->use_ref);
1462 if (einfo->in_ssr) {
1463 srcu_read_unlock(&einfo->use_ref, rcu_id);
1464 return;
1465 }
1466
1467 cmd.id = OPEN_ACK_CMD;
1468 cmd.rcid = rcid;
1469 cmd.reserved = 0;
1470
1471 fifo_tx(einfo, &cmd, sizeof(cmd));
1472 srcu_read_unlock(&einfo->use_ref, rcu_id);
1473}
1474
1475/**
1476 * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
1477 * and transmit
1478 * @if_ptr: The transport to transmit on.
1479 * @rcid: The remote channel id to encode.
1480 */
1481static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
1482 uint32_t rcid)
1483{
1484 struct command {
1485 uint16_t id;
1486 uint16_t rcid;
1487 uint32_t reserved;
1488 };
1489 struct command cmd;
1490 struct edge_info *einfo;
1491 int rcu_id;
1492
1493 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1494
1495 rcu_id = srcu_read_lock(&einfo->use_ref);
1496 if (einfo->in_ssr) {
1497 srcu_read_unlock(&einfo->use_ref, rcu_id);
1498 return;
1499 }
1500
1501 cmd.id = CLOSE_ACK_CMD;
1502 cmd.rcid = rcid;
1503 cmd.reserved = 0;
1504
1505 fifo_tx(einfo, &cmd, sizeof(cmd));
1506 srcu_read_unlock(&einfo->use_ref, rcu_id);
1507}
1508
1509/**
1510 * ssr() - process a subsystem restart notification of a transport
1511 * @if_ptr: The transport to restart
1512 *
1513 * Return: 0 on success or standard Linux error code.
1514 */
1515static int ssr(struct glink_transport_if *if_ptr)
1516{
1517 struct edge_info *einfo;
1518 struct deferred_cmd *cmd;
1519
1520 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1521
1522 BUG_ON(einfo->remote_proc_id == SMEM_RPM);
1523
1524 einfo->in_ssr = true;
1525 wake_up_all(&einfo->tx_blocked_queue);
1526
1527 synchronize_srcu(&einfo->use_ref);
1528
1529 while (!list_empty(&einfo->deferred_cmds)) {
1530 cmd = list_first_entry(&einfo->deferred_cmds,
1531 struct deferred_cmd, list_node);
1532 list_del(&cmd->list_node);
1533 kfree(cmd->data);
1534 kfree(cmd);
1535 }
1536
1537 einfo->tx_resume_needed = false;
1538 einfo->tx_blocked_signal_sent = false;
1539 einfo->rx_fifo = NULL;
1540 einfo->rx_fifo_size = 0;
1541 einfo->tx_ch_desc->write_index = 0;
1542 einfo->rx_ch_desc->read_index = 0;
1543 einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
1544
1545 return 0;
1546}
1547
1548/**
1549 * int wait_link_down() - Check status of read/write indices
1550 * @if_ptr: The transport to check
1551 *
1552 * Return: 1 if indices are all zero, 0 otherwise
1553 */
1554int wait_link_down(struct glink_transport_if *if_ptr)
1555{
1556 struct edge_info *einfo;
1557
1558 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1559
1560 if (einfo->tx_ch_desc->write_index == 0 &&
1561 einfo->tx_ch_desc->read_index == 0 &&
1562 einfo->rx_ch_desc->write_index == 0 &&
1563 einfo->rx_ch_desc->read_index == 0)
1564 return 1;
1565 else
1566 return 0;
1567}
1568
1569/**
1570 * allocate_rx_intent() - allocate/reserve space for RX Intent
1571 * @if_ptr: The transport the intent is associated with.
1572 * @size: size of intent.
1573 * @intent: Pointer to the intent structure.
1574 *
1575 * Assign "data" with the buffer created, since the transport creates
1576 * a linear buffer and "iovec" with the "intent" itself, so that
1577 * the data can be passed to a client that receives only vector buffer.
1578 * Note that returning NULL for the pointer is valid (it means that space has
1579 * been reserved, but the actual pointer will be provided later).
1580 *
1581 * Return: 0 on success or standard Linux error code.
1582 */
1583static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
1584 struct glink_core_rx_intent *intent)
1585{
1586 void *t;
1587
1588 t = kmalloc(size, GFP_KERNEL);
1589 if (!t)
1590 return -ENOMEM;
1591
1592 intent->data = t;
1593 intent->iovec = (void *)intent;
1594 intent->vprovider = rx_linear_vbuf_provider;
1595 intent->pprovider = NULL;
1596 return 0;
1597}
1598
1599/**
1600 * deallocate_rx_intent() - Deallocate space created for RX Intent
1601 * @if_ptr: The transport the intent is associated with.
1602 * @intent: Pointer to the intent structure.
1603 *
1604 * Return: 0 on success or standard Linux error code.
1605 */
1606static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
1607 struct glink_core_rx_intent *intent)
1608{
1609 if (!intent || !intent->data)
1610 return -EINVAL;
1611
1612 kfree(intent->data);
1613 intent->data = NULL;
1614 intent->iovec = NULL;
1615 intent->vprovider = NULL;
1616 return 0;
1617}
1618
1619/**
1620 * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
1621 * transmit
1622 * @if_ptr: The transport to transmit on.
1623 * @lcid: The local channel id to encode.
1624 * @size: The intent size to encode.
1625 * @liid: The local intent id to encode.
1626 *
1627 * Return: 0 on success or standard Linux error code.
1628 */
1629static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
1630 uint32_t lcid, size_t size, uint32_t liid)
1631{
1632 struct command {
1633 uint16_t id;
1634 uint16_t lcid;
1635 uint32_t count;
1636 uint32_t size;
1637 uint32_t liid;
1638 };
1639 struct command cmd;
1640 struct edge_info *einfo;
1641 int rcu_id;
1642
1643 if (size > UINT_MAX) {
1644 pr_err("%s: size %zu is too large to encode\n", __func__, size);
1645 return -EMSGSIZE;
1646 }
1647
1648 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1649
1650 if (einfo->intentless)
1651 return -EOPNOTSUPP;
1652
1653 rcu_id = srcu_read_lock(&einfo->use_ref);
1654 if (einfo->in_ssr) {
1655 srcu_read_unlock(&einfo->use_ref, rcu_id);
1656 return -EFAULT;
1657 }
1658
1659 cmd.id = RX_INTENT_CMD;
1660 cmd.lcid = lcid;
1661 cmd.count = 1;
1662 cmd.size = size;
1663 cmd.liid = liid;
1664
1665 fifo_tx(einfo, &cmd, sizeof(cmd));
1666
1667 srcu_read_unlock(&einfo->use_ref, rcu_id);
1668 return 0;
1669}
1670
1671/**
1672 * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
1673 * @if_ptr: The transport to transmit on.
1674 * @lcid: The local channel id to encode.
1675 * @liid: The local intent id to encode.
1676 * @reuse: Reuse the consumed intent.
1677 */
1678static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
1679 uint32_t lcid, uint32_t liid, bool reuse)
1680{
1681 struct command {
1682 uint16_t id;
1683 uint16_t lcid;
1684 uint32_t liid;
1685 };
1686 struct command cmd;
1687 struct edge_info *einfo;
1688 int rcu_id;
1689
1690 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1691
1692 if (einfo->intentless)
1693 return;
1694
1695 rcu_id = srcu_read_lock(&einfo->use_ref);
1696 if (einfo->in_ssr) {
1697 srcu_read_unlock(&einfo->use_ref, rcu_id);
1698 return;
1699 }
1700
1701 cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
1702 cmd.lcid = lcid;
1703 cmd.liid = liid;
1704
1705 fifo_tx(einfo, &cmd, sizeof(cmd));
1706 srcu_read_unlock(&einfo->use_ref, rcu_id);
1707}
1708
1709/**
1710 * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
1711 * transmit
1712 * @if_ptr: The transport to transmit on.
1713 * @lcid: The local channel id to encode.
1714 * @size: The requested intent size to encode.
1715 *
1716 * Return: 0 on success or standard Linux error code.
1717 */
1718static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
1719 uint32_t lcid, size_t size)
1720{
1721 struct command {
1722 uint16_t id;
1723 uint16_t lcid;
1724 uint32_t size;
1725 };
1726 struct command cmd;
1727 struct edge_info *einfo;
1728 int rcu_id;
1729
1730 if (size > UINT_MAX) {
1731 pr_err("%s: size %zu is too large to encode\n", __func__, size);
1732 return -EMSGSIZE;
1733 }
1734
1735 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1736
1737 if (einfo->intentless)
1738 return -EOPNOTSUPP;
1739
1740 rcu_id = srcu_read_lock(&einfo->use_ref);
1741 if (einfo->in_ssr) {
1742 srcu_read_unlock(&einfo->use_ref, rcu_id);
1743 return -EFAULT;
1744 }
1745
1746 cmd.id = RX_INTENT_REQ_CMD,
1747 cmd.lcid = lcid;
1748 cmd.size = size;
1749
1750 fifo_tx(einfo, &cmd, sizeof(cmd));
1751
1752 srcu_read_unlock(&einfo->use_ref, rcu_id);
1753 return 0;
1754}
1755
1756/**
1757 * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
1758 * format and transmit
1759 * @if_ptr: The transport to transmit on.
1760 * @lcid: The local channel id to encode.
1761 * @granted: The request response to encode.
1762 *
1763 * Return: 0 on success or standard Linux error code.
1764 */
1765static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
1766 uint32_t lcid, bool granted)
1767{
1768 struct command {
1769 uint16_t id;
1770 uint16_t lcid;
1771 uint32_t response;
1772 };
1773 struct command cmd;
1774 struct edge_info *einfo;
1775 int rcu_id;
1776
1777 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1778
1779 if (einfo->intentless)
1780 return -EOPNOTSUPP;
1781
1782 rcu_id = srcu_read_lock(&einfo->use_ref);
1783 if (einfo->in_ssr) {
1784 srcu_read_unlock(&einfo->use_ref, rcu_id);
1785 return -EFAULT;
1786 }
1787
1788 cmd.id = RX_INTENT_REQ_ACK_CMD,
1789 cmd.lcid = lcid;
1790 if (granted)
1791 cmd.response = 1;
1792 else
1793 cmd.response = 0;
1794
1795 fifo_tx(einfo, &cmd, sizeof(cmd));
1796
1797 srcu_read_unlock(&einfo->use_ref, rcu_id);
1798 return 0;
1799}
1800
1801/**
1802 * tx_cmd_set_sigs() - convert a signals ack cmd to wire format and transmit
1803 * @if_ptr: The transport to transmit on.
1804 * @lcid: The local channel id to encode.
1805 * @sigs: The signals to encode.
1806 *
1807 * Return: 0 on success or standard Linux error code.
1808 */
1809static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
1810 uint32_t sigs)
1811{
1812 struct command {
1813 uint16_t id;
1814 uint16_t lcid;
1815 uint32_t sigs;
1816 };
1817 struct command cmd;
1818 struct edge_info *einfo;
1819 int rcu_id;
1820
1821 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1822
1823 rcu_id = srcu_read_lock(&einfo->use_ref);
1824 if (einfo->in_ssr) {
1825 srcu_read_unlock(&einfo->use_ref, rcu_id);
1826 return -EFAULT;
1827 }
1828
1829 cmd.id = SIGNALS_CMD,
1830 cmd.lcid = lcid;
1831 cmd.sigs = sigs;
1832
1833 fifo_tx(einfo, &cmd, sizeof(cmd));
1834
1835 srcu_read_unlock(&einfo->use_ref, rcu_id);
1836 return 0;
1837}
1838
1839/**
1840 * poll() - poll for data on a channel
1841 * @if_ptr: The transport the channel exists on.
1842 * @lcid: The local channel id.
1843 *
1844 * Return: 0 if no data available, 1 if data available.
1845 */
1846static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
1847{
1848 struct edge_info *einfo;
1849 int rcu_id;
1850
1851 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1852
1853 rcu_id = srcu_read_lock(&einfo->use_ref);
1854 if (einfo->in_ssr) {
1855 srcu_read_unlock(&einfo->use_ref, rcu_id);
1856 return -EFAULT;
1857 }
1858
1859 if (fifo_read_avail(einfo)) {
1860 __rx_worker(einfo, true);
1861 srcu_read_unlock(&einfo->use_ref, rcu_id);
1862 return 1;
1863 }
1864
1865 srcu_read_unlock(&einfo->use_ref, rcu_id);
1866 return 0;
1867}
1868
1869/**
1870 * mask_rx_irq() - mask the receive irq for a channel
1871 * @if_ptr: The transport the channel exists on.
1872 * @lcid: The local channel id for the channel.
1873 * @mask: True to mask the irq, false to unmask.
1874 * @pstruct: Platform defined structure for handling the masking.
1875 *
1876 * Return: 0 on success or standard Linux error code.
1877 */
1878static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
1879 bool mask, void *pstruct)
1880{
1881 struct edge_info *einfo;
1882 struct irq_chip *irq_chip;
1883 struct irq_data *irq_data;
1884 int rcu_id;
1885
1886 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1887
1888 rcu_id = srcu_read_lock(&einfo->use_ref);
1889 if (einfo->in_ssr) {
1890 srcu_read_unlock(&einfo->use_ref, rcu_id);
1891 return -EFAULT;
1892 }
1893
1894 irq_chip = irq_get_chip(einfo->irq_line);
1895 if (!irq_chip) {
1896 srcu_read_unlock(&einfo->use_ref, rcu_id);
1897 return -ENODEV;
1898 }
1899
1900 irq_data = irq_get_irq_data(einfo->irq_line);
1901 if (!irq_data) {
1902 srcu_read_unlock(&einfo->use_ref, rcu_id);
1903 return -ENODEV;
1904 }
1905
1906 if (mask) {
1907 irq_chip->irq_mask(irq_data);
1908 einfo->irq_disabled = true;
1909 if (pstruct)
1910 irq_set_affinity(einfo->irq_line, pstruct);
1911 } else {
1912 irq_chip->irq_unmask(irq_data);
1913 einfo->irq_disabled = false;
1914 }
1915
1916 srcu_read_unlock(&einfo->use_ref, rcu_id);
1917 return 0;
1918}
1919
1920/**
1921 * tx_data() - convert a data/tracer_pkt to wire format and transmit
1922 * @if_ptr: The transport to transmit on.
1923 * @cmd_id: The command ID to transmit.
1924 * @lcid: The local channel id to encode.
1925 * @pctx: The data to encode.
1926 *
1927 * Return: Number of bytes written or standard Linux error code.
1928 */
1929static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
1930 uint32_t lcid, struct glink_core_tx_pkt *pctx)
1931{
1932 struct command {
1933 uint16_t id;
1934 uint16_t lcid;
1935 uint32_t riid;
1936 uint32_t size;
1937 uint32_t size_left;
1938 };
1939 struct command cmd;
1940 struct edge_info *einfo;
1941 uint32_t size;
1942 uint32_t zeros_size;
1943 const void *data_start;
1944 char zeros[FIFO_ALIGNMENT] = { 0 };
1945 unsigned long flags;
1946 size_t tx_size = 0;
1947 int rcu_id;
1948 int ret;
1949
1950 if (pctx->size < pctx->size_remaining) {
1951 GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
1952 __func__);
1953 pctx->size_remaining = pctx->size;
1954 }
1955 if (!pctx->size_remaining)
1956 return 0;
1957
1958 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1959
1960 rcu_id = srcu_read_lock(&einfo->use_ref);
1961 if (einfo->in_ssr) {
1962 srcu_read_unlock(&einfo->use_ref, rcu_id);
1963 return -EFAULT;
1964 }
1965
1966 if (einfo->intentless &&
1967 (pctx->size_remaining != pctx->size || cmd_id == TRACER_PKT_CMD)) {
1968 srcu_read_unlock(&einfo->use_ref, rcu_id);
1969 return -EINVAL;
1970 }
1971
1972 if (cmd_id == TX_DATA_CMD) {
1973 if (pctx->size_remaining == pctx->size)
1974 cmd.id = TX_DATA_CMD;
1975 else
1976 cmd.id = TX_DATA_CONT_CMD;
1977 } else {
1978 if (pctx->size_remaining == pctx->size)
1979 cmd.id = TRACER_PKT_CMD;
1980 else
1981 cmd.id = TRACER_PKT_CONT_CMD;
1982 }
1983 cmd.lcid = lcid;
1984 cmd.riid = pctx->riid;
1985 data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
1986 &tx_size);
1987 if (!data_start) {
1988 GLINK_ERR("%s: invalid data_start\n", __func__);
1989 srcu_read_unlock(&einfo->use_ref, rcu_id);
1990 return -EINVAL;
1991 }
1992
1993 spin_lock_irqsave(&einfo->write_lock, flags);
1994 size = fifo_write_avail(einfo);
1995
1996 /* Intentless clients expect a complete commit or instant failure */
1997 if (einfo->intentless && size < sizeof(cmd) + pctx->size) {
1998 spin_unlock_irqrestore(&einfo->write_lock, flags);
1999 srcu_read_unlock(&einfo->use_ref, rcu_id);
2000 return -ENOSPC;
2001 }
2002
2003 /* Need enough space to write the command and some data */
2004 if (size <= sizeof(cmd)) {
2005 einfo->tx_resume_needed = true;
2006 spin_unlock_irqrestore(&einfo->write_lock, flags);
2007 srcu_read_unlock(&einfo->use_ref, rcu_id);
2008 return -EAGAIN;
2009 }
2010 size -= sizeof(cmd);
2011 if (size > tx_size)
2012 size = tx_size;
2013
2014 cmd.size = size;
2015 pctx->size_remaining -= size;
2016 cmd.size_left = pctx->size_remaining;
2017 zeros_size = ALIGN(size, FIFO_ALIGNMENT) - cmd.size;
2018 if (cmd.id == TRACER_PKT_CMD)
2019 tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
2020
2021 ret = fifo_write_complex(einfo, &cmd, sizeof(cmd), data_start, size,
2022 zeros, zeros_size);
2023 if (ret < 0) {
2024 spin_unlock_irqrestore(&einfo->write_lock, flags);
2025 srcu_read_unlock(&einfo->use_ref, rcu_id);
2026 return ret;
2027 }
2028
2029 GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
2030 "<SMEM>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
2031 cmd.size_left);
2032 spin_unlock_irqrestore(&einfo->write_lock, flags);
2033
2034 /* Fake tx_done for intentless since its not supported over the wire */
2035 if (einfo->intentless) {
2036 spin_lock_irqsave(&einfo->rx_lock, flags);
2037 cmd.id = RX_DONE_CMD;
2038 cmd.lcid = pctx->rcid;
2039 queue_cmd(einfo, &cmd, NULL);
2040 spin_unlock_irqrestore(&einfo->rx_lock, flags);
2041 }
2042
2043 srcu_read_unlock(&einfo->use_ref, rcu_id);
2044 return cmd.size;
2045}
2046
2047/**
2048 * tx() - convert a data transmit cmd to wire format and transmit
2049 * @if_ptr: The transport to transmit on.
2050 * @lcid: The local channel id to encode.
2051 * @pctx: The data to encode.
2052 *
2053 * Return: Number of bytes written or standard Linux error code.
2054 */
2055static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
2056 struct glink_core_tx_pkt *pctx)
2057{
2058 return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
2059}
2060
2061/**
2062 * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
2063 * @if_ptr: The transport to transmit on.
2064 * @lcid: The local channel id to encode.
2065 * @pctx: The data to encode.
2066 *
2067 * Return: Number of bytes written or standard Linux error code.
2068 */
2069static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
2070 struct glink_core_tx_pkt *pctx)
2071{
2072 return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
2073}
2074
2075/**
2076 * get_power_vote_ramp_time() - Get the ramp time required for the power
2077 * votes to be applied
2078 * @if_ptr: The transport interface on which power voting is requested.
2079 * @state: The power state for which ramp time is required.
2080 *
2081 * Return: The ramp time specific to the power state, standard error otherwise.
2082 */
2083static unsigned long get_power_vote_ramp_time(
2084 struct glink_transport_if *if_ptr,
2085 uint32_t state)
2086{
2087 struct edge_info *einfo;
2088
2089 einfo = container_of(if_ptr, struct edge_info, xprt_if);
2090
2091 if (state >= einfo->num_pw_states || !(einfo->ramp_time_us))
2092 return (unsigned long)ERR_PTR(-EINVAL);
2093
2094 return einfo->ramp_time_us[state];
2095}
2096
2097/**
2098 * power_vote() - Update the power votes to meet qos requirement
2099 * @if_ptr: The transport interface on which power voting is requested.
2100 * @state: The power state for which the voting should be done.
2101 *
2102 * Return: 0 on Success, standard error otherwise.
2103 */
2104static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
2105{
2106 return 0;
2107}
2108
2109/**
2110 * power_unvote() - Remove the all the power votes
2111 * @if_ptr: The transport interface on which power voting is requested.
2112 *
2113 * Return: 0 on Success, standard error otherwise.
2114 */
2115static int power_unvote(struct glink_transport_if *if_ptr)
2116{
2117 return 0;
2118}
2119
2120/**
2121 * negotiate_features_v1() - determine what features of a version can be used
2122 * @if_ptr: The transport for which features are negotiated for.
2123 * @version: The version negotiated.
2124 * @features: The set of requested features.
2125 *
2126 * Return: What set of the requested features can be supported.
2127 */
2128static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
2129 const struct glink_core_version *version,
2130 uint32_t features)
2131{
2132 return features & version->features;
2133}
2134
2135/**
2136 * init_xprt_if() - initialize the xprt_if for an edge
2137 * @einfo: The edge to initialize.
2138 */
2139static void init_xprt_if(struct edge_info *einfo)
2140{
2141 einfo->xprt_if.tx_cmd_version = tx_cmd_version;
2142 einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
2143 einfo->xprt_if.set_version = set_version;
2144 einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
2145 einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
2146 einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
2147 einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
2148 einfo->xprt_if.ssr = ssr;
2149 einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
2150 einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
2151 einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
2152 einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
2153 einfo->xprt_if.tx = tx;
2154 einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
2155 einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
2156 tx_cmd_remote_rx_intent_req_ack;
2157 einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
2158 einfo->xprt_if.poll = poll;
2159 einfo->xprt_if.mask_rx_irq = mask_rx_irq;
2160 einfo->xprt_if.wait_link_down = wait_link_down;
2161 einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
2162 einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
2163 einfo->xprt_if.power_vote = power_vote;
2164 einfo->xprt_if.power_unvote = power_unvote;
2165}
2166
2167/**
2168 * init_xprt_cfg() - initialize the xprt_cfg for an edge
2169 * @einfo: The edge to initialize.
2170 * @name: The name of the remote side this edge communicates to.
2171 */
2172static void init_xprt_cfg(struct edge_info *einfo, const char *name)
2173{
2174 einfo->xprt_cfg.name = XPRT_NAME;
2175 einfo->xprt_cfg.edge = name;
2176 einfo->xprt_cfg.versions = versions;
2177 einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
2178 einfo->xprt_cfg.max_cid = SZ_64K;
2179 einfo->xprt_cfg.max_iid = SZ_2G;
2180}
2181
2182/**
2183 * parse_qos_dt_params() - Parse the power states from DT
2184 * @dev: Reference to the platform device for a specific edge.
2185 * @einfo: Edge information for the edge probe function is called.
2186 *
2187 * Return: 0 on success, standard error code otherwise.
2188 */
2189static int parse_qos_dt_params(struct device_node *node,
2190 struct edge_info *einfo)
2191{
2192 int rc;
2193 int i;
2194 char *key;
2195 uint32_t *arr32;
2196 uint32_t num_states;
2197
2198 key = "qcom,ramp-time";
2199 if (!of_find_property(node, key, &num_states))
2200 return -ENODEV;
2201
2202 num_states /= sizeof(uint32_t);
2203
2204 einfo->num_pw_states = num_states;
2205
2206 arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
2207 if (!arr32)
2208 return -ENOMEM;
2209
2210 einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
2211 GFP_KERNEL);
2212 if (!einfo->ramp_time_us) {
2213 rc = -ENOMEM;
2214 goto mem_alloc_fail;
2215 }
2216
2217 rc = of_property_read_u32_array(node, key, arr32, num_states);
2218 if (rc) {
2219 rc = -ENODEV;
2220 goto invalid_key;
2221 }
2222 for (i = 0; i < num_states; i++)
2223 einfo->ramp_time_us[i] = arr32[i];
2224
2225 rc = 0;
2226 return rc;
2227
2228invalid_key:
2229 kfree(einfo->ramp_time_us);
2230mem_alloc_fail:
2231 kfree(arr32);
2232 return rc;
2233}
2234
2235/**
2236 * subsys_name_to_id() - translate a subsystem name to a processor id
2237 * @name: The subsystem name to look up.
2238 *
2239 * Return: The processor id corresponding to @name or standard Linux error code.
2240 */
2241static int subsys_name_to_id(const char *name)
2242{
2243 if (!name)
2244 return -ENODEV;
2245
2246 if (!strcmp(name, "apss"))
2247 return SMEM_APPS;
2248 if (!strcmp(name, "dsps"))
2249 return SMEM_DSPS;
2250 if (!strcmp(name, "lpass"))
2251 return SMEM_Q6;
2252 if (!strcmp(name, "mpss"))
2253 return SMEM_MODEM;
2254 if (!strcmp(name, "rpm"))
2255 return SMEM_RPM;
2256 if (!strcmp(name, "wcnss"))
2257 return SMEM_WCNSS;
2258 if (!strcmp(name, "spss"))
2259 return SMEM_SPSS;
2260 if (!strcmp(name, "cdsp"))
2261 return SMEM_CDSP;
2262 return -ENODEV;
2263}
2264
2265static int glink_smem_native_probe(struct platform_device *pdev)
2266{
2267 struct device_node *node;
2268 struct device_node *phandle_node;
2269 struct edge_info *einfo;
2270 int rc;
2271 char *key;
2272 const char *subsys_name;
2273 uint32_t irq_line;
2274 uint32_t irq_mask;
2275 struct resource *r;
2276
2277 node = pdev->dev.of_node;
2278
2279 einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
2280 if (!einfo) {
2281 rc = -ENOMEM;
2282 goto edge_info_alloc_fail;
2283 }
2284
2285 key = "label";
2286 subsys_name = of_get_property(node, key, NULL);
2287 if (!subsys_name) {
2288 pr_err("%s: missing key %s\n", __func__, key);
2289 rc = -ENODEV;
2290 goto missing_key;
2291 }
2292
2293 key = "interrupts";
2294 irq_line = irq_of_parse_and_map(node, 0);
2295 if (!irq_line) {
2296 pr_err("%s: missing key %s\n", __func__, key);
2297 rc = -ENODEV;
2298 goto missing_key;
2299 }
2300
2301 key = "qcom,irq-mask";
2302 rc = of_property_read_u32(node, key, &irq_mask);
2303 if (rc) {
2304 pr_err("%s: missing key %s\n", __func__, key);
2305 rc = -ENODEV;
2306 goto missing_key;
2307 }
2308
2309 key = "irq-reg-base";
2310 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2311 if (!r) {
2312 pr_err("%s: missing key %s\n", __func__, key);
2313 rc = -ENODEV;
2314 goto missing_key;
2315 }
2316
2317 if (subsys_name_to_id(subsys_name) == -ENODEV) {
2318 pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
2319 rc = -ENODEV;
2320 goto invalid_key;
2321 }
2322 einfo->remote_proc_id = subsys_name_to_id(subsys_name);
2323
2324 init_xprt_cfg(einfo, subsys_name);
2325 init_xprt_if(einfo);
2326 spin_lock_init(&einfo->write_lock);
2327 init_waitqueue_head(&einfo->tx_blocked_queue);
Kyle Yan65be4a52016-10-31 15:05:00 -07002328 kthread_init_work(&einfo->kwork, rx_worker);
2329 kthread_init_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302330 INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002331 tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
2332 einfo->read_from_fifo = read_from_fifo;
2333 einfo->write_to_fifo = write_to_fifo;
2334 init_srcu_struct(&einfo->use_ref);
2335 spin_lock_init(&einfo->rx_lock);
2336 INIT_LIST_HEAD(&einfo->deferred_cmds);
2337
2338 mutex_lock(&probe_lock);
2339 if (edge_infos[einfo->remote_proc_id]) {
2340 pr_err("%s: duplicate subsys %s is not valid\n", __func__,
2341 subsys_name);
2342 rc = -ENODEV;
2343 mutex_unlock(&probe_lock);
2344 goto invalid_key;
2345 }
2346 edge_infos[einfo->remote_proc_id] = einfo;
2347 mutex_unlock(&probe_lock);
2348
2349 einfo->out_irq_mask = irq_mask;
2350 einfo->out_irq_reg = ioremap_nocache(r->start, resource_size(r));
2351 if (!einfo->out_irq_reg) {
2352 pr_err("%s: unable to map irq reg\n", __func__);
2353 rc = -ENOMEM;
2354 goto ioremap_fail;
2355 }
2356
2357 einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
2358 "smem_native_%s", subsys_name);
2359 if (IS_ERR(einfo->task)) {
2360 rc = PTR_ERR(einfo->task);
2361 pr_err("%s: kthread_run failed %d\n", __func__, rc);
2362 goto kthread_fail;
2363 }
2364
2365 einfo->tx_ch_desc = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
2366 SMEM_CH_DESC_SIZE,
2367 einfo->remote_proc_id,
2368 0);
2369 if (PTR_ERR(einfo->tx_ch_desc) == -EPROBE_DEFER) {
2370 rc = -EPROBE_DEFER;
2371 goto smem_alloc_fail;
2372 }
2373 if (!einfo->tx_ch_desc) {
2374 pr_err("%s: smem alloc of ch descriptor failed\n", __func__);
2375 rc = -ENOMEM;
2376 goto smem_alloc_fail;
2377 }
2378 einfo->rx_ch_desc = einfo->tx_ch_desc + 1;
2379
2380 einfo->tx_fifo_size = SZ_16K;
2381 einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0,
2382 einfo->tx_fifo_size,
2383 einfo->remote_proc_id,
2384 SMEM_ITEM_CACHED_FLAG);
2385 if (!einfo->tx_fifo) {
2386 pr_err("%s: smem alloc of tx fifo failed\n", __func__);
2387 rc = -ENOMEM;
2388 goto smem_alloc_fail;
2389 }
2390
2391 key = "qcom,qos-config";
2392 phandle_node = of_parse_phandle(node, key, 0);
2393 if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
2394 &einfo->xprt_cfg)))
2395 parse_qos_dt_params(node, einfo);
2396
2397 rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
2398 if (rc == -EPROBE_DEFER)
2399 goto reg_xprt_fail;
2400 if (rc) {
2401 pr_err("%s: glink core register transport failed: %d\n",
2402 __func__, rc);
2403 goto reg_xprt_fail;
2404 }
2405
2406 einfo->irq_line = irq_line;
2407 rc = request_irq(irq_line, irq_handler,
2408 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
2409 node->name, einfo);
2410 if (rc < 0) {
2411 pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
2412 rc);
2413 goto request_irq_fail;
2414 }
2415 rc = enable_irq_wake(irq_line);
2416 if (rc < 0)
2417 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
2418 irq_line);
2419
2420 register_debugfs_info(einfo);
2421 /* fake an interrupt on this edge to see if the remote side is up */
2422 irq_handler(0, einfo);
2423 return 0;
2424
2425request_irq_fail:
2426 glink_core_unregister_transport(&einfo->xprt_if);
2427reg_xprt_fail:
2428smem_alloc_fail:
Kyle Yan65be4a52016-10-31 15:05:00 -07002429 kthread_flush_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302430 flush_work(&einfo->wakeup_work);
Chris Lewfa6135e2016-08-01 13:29:46 -07002431 kthread_stop(einfo->task);
2432 einfo->task = NULL;
2433 tasklet_kill(&einfo->tasklet);
2434kthread_fail:
2435 iounmap(einfo->out_irq_reg);
2436ioremap_fail:
2437 mutex_lock(&probe_lock);
2438 edge_infos[einfo->remote_proc_id] = NULL;
2439 mutex_unlock(&probe_lock);
2440invalid_key:
2441missing_key:
2442 kfree(einfo);
2443edge_info_alloc_fail:
2444 return rc;
2445}
2446
2447static int glink_rpm_native_probe(struct platform_device *pdev)
2448{
2449 struct device_node *node;
2450 struct edge_info *einfo;
2451 int rc;
2452 char *key;
2453 const char *subsys_name;
2454 uint32_t irq_line;
2455 uint32_t irq_mask;
2456 struct resource *irq_r;
2457 struct resource *msgram_r;
2458 void __iomem *msgram;
2459 char toc[RPM_TOC_SIZE];
2460 uint32_t *tocp;
2461 uint32_t num_toc_entries;
2462
2463 node = pdev->dev.of_node;
2464
2465 einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
2466 if (!einfo) {
2467 rc = -ENOMEM;
2468 goto edge_info_alloc_fail;
2469 }
2470
2471 subsys_name = "rpm";
2472
2473 key = "interrupts";
2474 irq_line = irq_of_parse_and_map(node, 0);
2475 if (!irq_line) {
2476 pr_err("%s: missing key %s\n", __func__, key);
2477 rc = -ENODEV;
2478 goto missing_key;
2479 }
2480
2481 key = "qcom,irq-mask";
2482 rc = of_property_read_u32(node, key, &irq_mask);
2483 if (rc) {
2484 pr_err("%s: missing key %s\n", __func__, key);
2485 rc = -ENODEV;
2486 goto missing_key;
2487 }
2488
2489 key = "irq-reg-base";
2490 irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2491 if (!irq_r) {
2492 pr_err("%s: missing key %s\n", __func__, key);
2493 rc = -ENODEV;
2494 goto missing_key;
2495 }
2496
2497 key = "msgram";
2498 msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2499 if (!msgram_r) {
2500 pr_err("%s: missing key %s\n", __func__, key);
2501 rc = -ENODEV;
2502 goto missing_key;
2503 }
2504
2505 if (subsys_name_to_id(subsys_name) == -ENODEV) {
2506 pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
2507 rc = -ENODEV;
2508 goto invalid_key;
2509 }
2510 einfo->remote_proc_id = subsys_name_to_id(subsys_name);
2511
2512 init_xprt_cfg(einfo, subsys_name);
2513 init_xprt_if(einfo);
2514 spin_lock_init(&einfo->write_lock);
2515 init_waitqueue_head(&einfo->tx_blocked_queue);
Kyle Yan65be4a52016-10-31 15:05:00 -07002516 kthread_init_work(&einfo->kwork, rx_worker);
2517 kthread_init_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302518 INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002519 tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
2520 einfo->intentless = true;
2521 einfo->read_from_fifo = memcpy32_fromio;
2522 einfo->write_to_fifo = memcpy32_toio;
2523 init_srcu_struct(&einfo->use_ref);
2524 spin_lock_init(&einfo->rx_lock);
2525 INIT_LIST_HEAD(&einfo->deferred_cmds);
2526
2527 mutex_lock(&probe_lock);
2528 if (edge_infos[einfo->remote_proc_id]) {
2529 pr_err("%s: duplicate subsys %s is not valid\n", __func__,
2530 subsys_name);
2531 rc = -ENODEV;
2532 mutex_unlock(&probe_lock);
2533 goto invalid_key;
2534 }
2535 edge_infos[einfo->remote_proc_id] = einfo;
2536 mutex_unlock(&probe_lock);
2537
2538 einfo->out_irq_mask = irq_mask;
2539 einfo->out_irq_reg = ioremap_nocache(irq_r->start,
2540 resource_size(irq_r));
2541 if (!einfo->out_irq_reg) {
2542 pr_err("%s: unable to map irq reg\n", __func__);
2543 rc = -ENOMEM;
2544 goto irq_ioremap_fail;
2545 }
2546
2547 msgram = ioremap_nocache(msgram_r->start, resource_size(msgram_r));
2548 if (!msgram) {
2549 pr_err("%s: unable to map msgram\n", __func__);
2550 rc = -ENOMEM;
2551 goto msgram_ioremap_fail;
2552 }
2553
2554 einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
2555 "smem_native_%s", subsys_name);
2556 if (IS_ERR(einfo->task)) {
2557 rc = PTR_ERR(einfo->task);
2558 pr_err("%s: kthread_run failed %d\n", __func__, rc);
2559 goto kthread_fail;
2560 }
2561
2562 memcpy32_fromio(toc, msgram + resource_size(msgram_r) - RPM_TOC_SIZE,
2563 RPM_TOC_SIZE);
2564 tocp = (uint32_t *)toc;
2565 if (*tocp != RPM_TOC_ID) {
2566 rc = -ENODEV;
2567 pr_err("%s: TOC id %d is not valid\n", __func__, *tocp);
2568 goto toc_init_fail;
2569 }
2570 ++tocp;
2571 num_toc_entries = *tocp;
2572 if (num_toc_entries > RPM_MAX_TOC_ENTRIES) {
2573 rc = -ENODEV;
2574 pr_err("%s: %d is too many toc entries\n", __func__,
2575 num_toc_entries);
2576 goto toc_init_fail;
2577 }
2578 ++tocp;
2579
2580 for (rc = 0; rc < num_toc_entries; ++rc) {
2581 if (*tocp != RPM_TX_FIFO_ID) {
2582 tocp += 3;
2583 continue;
2584 }
2585 ++tocp;
2586 einfo->tx_ch_desc = msgram + *tocp;
2587 einfo->tx_fifo = einfo->tx_ch_desc + 1;
2588 if ((uintptr_t)einfo->tx_fifo >
2589 (uintptr_t)(msgram + resource_size(msgram_r))) {
2590 pr_err("%s: invalid tx fifo address\n", __func__);
2591 einfo->tx_fifo = NULL;
2592 break;
2593 }
2594 ++tocp;
2595 einfo->tx_fifo_size = *tocp;
2596 if (einfo->tx_fifo_size > resource_size(msgram_r) ||
2597 (uintptr_t)(einfo->tx_fifo + einfo->tx_fifo_size) >
2598 (uintptr_t)(msgram + resource_size(msgram_r))) {
2599 pr_err("%s: invalid tx fifo size\n", __func__);
2600 einfo->tx_fifo = NULL;
2601 break;
2602 }
2603 break;
2604 }
2605 if (!einfo->tx_fifo) {
2606 rc = -ENODEV;
2607 pr_err("%s: tx fifo not found\n", __func__);
2608 goto toc_init_fail;
2609 }
2610
2611 tocp = (uint32_t *)toc;
2612 tocp += 2;
2613 for (rc = 0; rc < num_toc_entries; ++rc) {
2614 if (*tocp != RPM_RX_FIFO_ID) {
2615 tocp += 3;
2616 continue;
2617 }
2618 ++tocp;
2619 einfo->rx_ch_desc = msgram + *tocp;
2620 einfo->rx_fifo = einfo->rx_ch_desc + 1;
2621 if ((uintptr_t)einfo->rx_fifo >
2622 (uintptr_t)(msgram + resource_size(msgram_r))) {
2623 pr_err("%s: invalid rx fifo address\n", __func__);
2624 einfo->rx_fifo = NULL;
2625 break;
2626 }
2627 ++tocp;
2628 einfo->rx_fifo_size = *tocp;
2629 if (einfo->rx_fifo_size > resource_size(msgram_r) ||
2630 (uintptr_t)(einfo->rx_fifo + einfo->rx_fifo_size) >
2631 (uintptr_t)(msgram + resource_size(msgram_r))) {
2632 pr_err("%s: invalid rx fifo size\n", __func__);
2633 einfo->rx_fifo = NULL;
2634 break;
2635 }
2636 break;
2637 }
2638 if (!einfo->rx_fifo) {
2639 rc = -ENODEV;
2640 pr_err("%s: rx fifo not found\n", __func__);
2641 goto toc_init_fail;
2642 }
2643
2644 einfo->tx_ch_desc->write_index = 0;
2645 einfo->rx_ch_desc->read_index = 0;
2646
2647 rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
2648 if (rc == -EPROBE_DEFER)
2649 goto reg_xprt_fail;
2650 if (rc) {
2651 pr_err("%s: glink core register transport failed: %d\n",
2652 __func__, rc);
2653 goto reg_xprt_fail;
2654 }
2655
2656 einfo->irq_line = irq_line;
2657 rc = request_irq(irq_line, irq_handler,
2658 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
2659 node->name, einfo);
2660 if (rc < 0) {
2661 pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
2662 rc);
2663 goto request_irq_fail;
2664 }
2665 rc = enable_irq_wake(irq_line);
2666 if (rc < 0)
2667 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
2668 irq_line);
2669
2670 register_debugfs_info(einfo);
2671 einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
2672 return 0;
2673
2674request_irq_fail:
2675 glink_core_unregister_transport(&einfo->xprt_if);
2676reg_xprt_fail:
2677toc_init_fail:
Kyle Yan65be4a52016-10-31 15:05:00 -07002678 kthread_flush_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302679 flush_work(&einfo->wakeup_work);
Chris Lewfa6135e2016-08-01 13:29:46 -07002680 kthread_stop(einfo->task);
2681 einfo->task = NULL;
2682 tasklet_kill(&einfo->tasklet);
2683kthread_fail:
2684 iounmap(msgram);
2685msgram_ioremap_fail:
2686 iounmap(einfo->out_irq_reg);
2687irq_ioremap_fail:
2688 mutex_lock(&probe_lock);
2689 edge_infos[einfo->remote_proc_id] = NULL;
2690 mutex_unlock(&probe_lock);
2691invalid_key:
2692missing_key:
2693 kfree(einfo);
2694edge_info_alloc_fail:
2695 return rc;
2696}
2697
2698static int glink_mailbox_probe(struct platform_device *pdev)
2699{
2700 struct device_node *node;
2701 struct edge_info *einfo;
2702 int rc;
2703 char *key;
2704 const char *subsys_name;
2705 uint32_t irq_line;
2706 uint32_t irq_mask;
2707 struct resource *irq_r;
2708 struct resource *mbox_loc_r;
2709 struct resource *mbox_size_r;
2710 struct resource *rx_reset_r;
2711 void *mbox_loc;
2712 void *mbox_size;
2713 struct mailbox_config_info *mbox_cfg;
2714 uint32_t mbox_cfg_size;
2715 phys_addr_t cfg_p_addr;
2716
2717 node = pdev->dev.of_node;
2718
2719 einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
2720 if (!einfo) {
2721 rc = -ENOMEM;
2722 goto edge_info_alloc_fail;
2723 }
2724
2725 key = "label";
2726 subsys_name = of_get_property(node, key, NULL);
2727 if (!subsys_name) {
2728 pr_err("%s: missing key %s\n", __func__, key);
2729 rc = -ENODEV;
2730 goto missing_key;
2731 }
2732
2733 key = "interrupts";
2734 irq_line = irq_of_parse_and_map(node, 0);
2735 if (!irq_line) {
2736 pr_err("%s: missing key %s\n", __func__, key);
2737 rc = -ENODEV;
2738 goto missing_key;
2739 }
2740
2741 key = "qcom,irq-mask";
2742 rc = of_property_read_u32(node, key, &irq_mask);
2743 if (rc) {
2744 pr_err("%s: missing key %s\n", __func__, key);
2745 rc = -ENODEV;
2746 goto missing_key;
2747 }
2748
2749 key = "irq-reg-base";
2750 irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2751 if (!irq_r) {
2752 pr_err("%s: missing key %s\n", __func__, key);
2753 rc = -ENODEV;
2754 goto missing_key;
2755 }
2756
2757 key = "mbox-loc-addr";
2758 mbox_loc_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2759 if (!mbox_loc_r) {
2760 pr_err("%s: missing key %s\n", __func__, key);
2761 rc = -ENODEV;
2762 goto missing_key;
2763 }
2764
2765 key = "mbox-loc-size";
2766 mbox_size_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2767 if (!mbox_size_r) {
2768 pr_err("%s: missing key %s\n", __func__, key);
2769 rc = -ENODEV;
2770 goto missing_key;
2771 }
2772
2773 key = "irq-rx-reset";
2774 rx_reset_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2775 if (!rx_reset_r) {
2776 pr_err("%s: missing key %s\n", __func__, key);
2777 rc = -ENODEV;
2778 goto missing_key;
2779 }
2780
2781 key = "qcom,tx-ring-size";
2782 rc = of_property_read_u32(node, key, &einfo->tx_fifo_size);
2783 if (rc) {
2784 pr_err("%s: missing key %s\n", __func__, key);
2785 rc = -ENODEV;
2786 goto missing_key;
2787 }
2788
2789 key = "qcom,rx-ring-size";
2790 rc = of_property_read_u32(node, key, &einfo->rx_fifo_size);
2791 if (rc) {
2792 pr_err("%s: missing key %s\n", __func__, key);
2793 rc = -ENODEV;
2794 goto missing_key;
2795 }
2796
2797 if (subsys_name_to_id(subsys_name) == -ENODEV) {
2798 pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
2799 rc = -ENODEV;
2800 goto invalid_key;
2801 }
2802 einfo->remote_proc_id = subsys_name_to_id(subsys_name);
2803
2804 init_xprt_cfg(einfo, subsys_name);
2805 einfo->xprt_cfg.name = "mailbox";
2806 init_xprt_if(einfo);
2807 spin_lock_init(&einfo->write_lock);
2808 init_waitqueue_head(&einfo->tx_blocked_queue);
Kyle Yan65be4a52016-10-31 15:05:00 -07002809 kthread_init_work(&einfo->kwork, rx_worker);
2810 kthread_init_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302811 INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002812 tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
2813 einfo->read_from_fifo = read_from_fifo;
2814 einfo->write_to_fifo = write_to_fifo;
2815 init_srcu_struct(&einfo->use_ref);
2816 spin_lock_init(&einfo->rx_lock);
2817 INIT_LIST_HEAD(&einfo->deferred_cmds);
2818
2819 mutex_lock(&probe_lock);
2820 if (edge_infos[einfo->remote_proc_id]) {
2821 pr_err("%s: duplicate subsys %s is not valid\n", __func__,
2822 subsys_name);
2823 rc = -ENODEV;
2824 mutex_unlock(&probe_lock);
2825 goto invalid_key;
2826 }
2827 edge_infos[einfo->remote_proc_id] = einfo;
2828 mutex_unlock(&probe_lock);
2829
2830 einfo->out_irq_mask = irq_mask;
2831 einfo->out_irq_reg = ioremap_nocache(irq_r->start,
2832 resource_size(irq_r));
2833 if (!einfo->out_irq_reg) {
2834 pr_err("%s: unable to map irq reg\n", __func__);
2835 rc = -ENOMEM;
2836 goto irq_ioremap_fail;
2837 }
2838
2839 mbox_loc = ioremap_nocache(mbox_loc_r->start,
2840 resource_size(mbox_loc_r));
2841 if (!mbox_loc) {
2842 pr_err("%s: unable to map mailbox location reg\n", __func__);
2843 rc = -ENOMEM;
2844 goto mbox_loc_ioremap_fail;
2845 }
2846
2847 mbox_size = ioremap_nocache(mbox_size_r->start,
2848 resource_size(mbox_size_r));
2849 if (!mbox_size) {
2850 pr_err("%s: unable to map mailbox size reg\n", __func__);
2851 rc = -ENOMEM;
2852 goto mbox_size_ioremap_fail;
2853 }
2854
2855 einfo->rx_reset_reg = ioremap_nocache(rx_reset_r->start,
2856 resource_size(rx_reset_r));
2857 if (!einfo->rx_reset_reg) {
2858 pr_err("%s: unable to map rx reset reg\n", __func__);
2859 rc = -ENOMEM;
2860 goto rx_reset_ioremap_fail;
2861 }
2862
2863 einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
2864 "smem_native_%s", subsys_name);
2865 if (IS_ERR(einfo->task)) {
2866 rc = PTR_ERR(einfo->task);
2867 pr_err("%s: kthread_run failed %d\n", __func__, rc);
2868 goto kthread_fail;
2869 }
2870
2871 mbox_cfg_size = sizeof(*mbox_cfg) + einfo->tx_fifo_size +
2872 einfo->rx_fifo_size;
2873 mbox_cfg = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
2874 mbox_cfg_size,
2875 einfo->remote_proc_id,
2876 0);
2877 if (PTR_ERR(mbox_cfg) == -EPROBE_DEFER) {
2878 rc = -EPROBE_DEFER;
2879 goto smem_alloc_fail;
2880 }
2881 if (!mbox_cfg) {
2882 pr_err("%s: smem alloc of mailbox struct failed\n", __func__);
2883 rc = -ENOMEM;
2884 goto smem_alloc_fail;
2885 }
2886 einfo->mailbox = mbox_cfg;
2887 einfo->tx_ch_desc = (struct channel_desc *)(&mbox_cfg->tx_read_index);
2888 einfo->rx_ch_desc = (struct channel_desc *)(&mbox_cfg->rx_read_index);
2889 mbox_cfg->tx_size = einfo->tx_fifo_size;
2890 mbox_cfg->rx_size = einfo->rx_fifo_size;
2891 einfo->tx_fifo = &mbox_cfg->fifo[0];
2892
2893 rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
2894 if (rc == -EPROBE_DEFER)
2895 goto reg_xprt_fail;
2896 if (rc) {
2897 pr_err("%s: glink core register transport failed: %d\n",
2898 __func__, rc);
2899 goto reg_xprt_fail;
2900 }
2901
2902 einfo->irq_line = irq_line;
2903 rc = request_irq(irq_line, irq_handler,
2904 IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND | IRQF_SHARED,
2905 node->name, einfo);
2906 if (rc < 0) {
2907 pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
2908 rc);
2909 goto request_irq_fail;
2910 }
2911 rc = enable_irq_wake(irq_line);
2912 if (rc < 0)
2913 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
2914 irq_line);
2915
2916 register_debugfs_info(einfo);
2917
2918 writel_relaxed(mbox_cfg_size, mbox_size);
2919 cfg_p_addr = smem_virt_to_phys(mbox_cfg);
2920 writel_relaxed(lower_32_bits(cfg_p_addr), mbox_loc);
2921 writel_relaxed(upper_32_bits(cfg_p_addr), mbox_loc + 4);
2922 send_irq(einfo);
2923 iounmap(mbox_size);
2924 iounmap(mbox_loc);
2925 return 0;
2926
2927request_irq_fail:
2928 glink_core_unregister_transport(&einfo->xprt_if);
2929reg_xprt_fail:
2930smem_alloc_fail:
Kyle Yan65be4a52016-10-31 15:05:00 -07002931 kthread_flush_worker(&einfo->kworker);
Dhoat Harpale9d73372017-03-10 21:23:03 +05302932 flush_work(&einfo->wakeup_work);
Chris Lewfa6135e2016-08-01 13:29:46 -07002933 kthread_stop(einfo->task);
2934 einfo->task = NULL;
2935 tasklet_kill(&einfo->tasklet);
2936kthread_fail:
2937 iounmap(einfo->rx_reset_reg);
2938rx_reset_ioremap_fail:
2939 iounmap(mbox_size);
2940mbox_size_ioremap_fail:
2941 iounmap(mbox_loc);
2942mbox_loc_ioremap_fail:
2943 iounmap(einfo->out_irq_reg);
2944irq_ioremap_fail:
2945 mutex_lock(&probe_lock);
2946 edge_infos[einfo->remote_proc_id] = NULL;
2947 mutex_unlock(&probe_lock);
2948invalid_key:
2949missing_key:
2950 kfree(einfo);
2951edge_info_alloc_fail:
2952 return rc;
2953}
2954
2955#if defined(CONFIG_DEBUG_FS)
2956/**
2957 * debug_edge() - generates formatted text output displaying current edge state
2958 * @s: File to send the output to.
2959 */
2960static void debug_edge(struct seq_file *s)
2961{
2962 struct edge_info *einfo;
2963 struct glink_dbgfs_data *dfs_d;
2964
2965 dfs_d = s->private;
2966 einfo = dfs_d->priv_data;
2967
2968/*
2969 * formatted, human readable edge state output, ie:
2970 * TX/RX fifo information:
2971ID|EDGE |TX READ |TX WRITE |TX SIZE |RX READ |RX WRITE |RX SIZE
2972-------------------------------------------------------------------------------
297301|mpss |0x00000128|0x00000128|0x00000800|0x00000256|0x00000256|0x00001000
2974 *
2975 * Interrupt information:
2976 * EDGE |TX INT |RX INT
2977 * --------------------------------
2978 * mpss |0x00000006|0x00000008
2979 */
2980 seq_puts(s, "TX/RX fifo information:\n");
2981 seq_printf(s, "%2s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s\n",
2982 "ID",
2983 "EDGE",
2984 "TX READ",
2985 "TX WRITE",
2986 "TX SIZE",
2987 "RX READ",
2988 "RX WRITE",
2989 "RX SIZE");
2990 seq_puts(s,
2991 "-------------------------------------------------------------------------------\n");
2992 if (!einfo)
2993 return;
2994
2995 seq_printf(s, "%02i|%-10s|", einfo->remote_proc_id,
2996 einfo->xprt_cfg.edge);
2997 if (!einfo->rx_fifo)
2998 seq_puts(s, "Link Not Up\n");
2999 else
3000 seq_printf(s, "0x%08X|0x%08X|0x%08X|0x%08X|0x%08X|0x%08X\n",
3001 einfo->tx_ch_desc->read_index,
3002 einfo->tx_ch_desc->write_index,
3003 einfo->tx_fifo_size,
3004 einfo->rx_ch_desc->read_index,
3005 einfo->rx_ch_desc->write_index,
3006 einfo->rx_fifo_size);
3007
3008 seq_puts(s, "\nInterrupt information:\n");
3009 seq_printf(s, "%-10s|%-10s|%-10s\n", "EDGE", "TX INT", "RX INT");
3010 seq_puts(s, "--------------------------------\n");
3011 seq_printf(s, "%-10s|0x%08X|0x%08X\n", einfo->xprt_cfg.edge,
3012 einfo->tx_irq_count,
3013 einfo->rx_irq_count);
3014}
3015
3016/**
3017 * register_debugfs_info() - initialize debugfs device entries
3018 * @einfo: Pointer to specific edge_info for which register is called.
3019 */
3020static void register_debugfs_info(struct edge_info *einfo)
3021{
3022 struct glink_dbgfs dfs;
3023 char *curr_dir_name;
3024 int dir_name_len;
3025
3026 dir_name_len = strlen(einfo->xprt_cfg.edge) +
3027 strlen(einfo->xprt_cfg.name) + 2;
3028 curr_dir_name = kmalloc(dir_name_len, GFP_KERNEL);
3029 if (!curr_dir_name) {
3030 GLINK_ERR("%s: Memory allocation failed\n", __func__);
3031 return;
3032 }
3033
3034 snprintf(curr_dir_name, dir_name_len, "%s_%s",
3035 einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
3036 dfs.curr_name = curr_dir_name;
3037 dfs.par_name = "xprt";
3038 dfs.b_dir_create = false;
3039 glink_debugfs_create("XPRT_INFO", debug_edge,
3040 &dfs, einfo, false);
3041 kfree(curr_dir_name);
3042}
3043
3044#else
3045static void register_debugfs_info(struct edge_info *einfo)
3046{
3047}
3048#endif /* CONFIG_DEBUG_FS */
3049
3050static const struct of_device_id smem_match_table[] = {
3051 { .compatible = "qcom,glink-smem-native-xprt" },
3052 {},
3053};
3054
3055static struct platform_driver glink_smem_native_driver = {
3056 .probe = glink_smem_native_probe,
3057 .driver = {
3058 .name = "msm_glink_smem_native_xprt",
3059 .owner = THIS_MODULE,
3060 .of_match_table = smem_match_table,
3061 },
3062};
3063
3064static const struct of_device_id rpm_match_table[] = {
3065 { .compatible = "qcom,glink-rpm-native-xprt" },
3066 {},
3067};
3068
3069static struct platform_driver glink_rpm_native_driver = {
3070 .probe = glink_rpm_native_probe,
3071 .driver = {
3072 .name = "msm_glink_rpm_native_xprt",
3073 .owner = THIS_MODULE,
3074 .of_match_table = rpm_match_table,
3075 },
3076};
3077
3078static const struct of_device_id mailbox_match_table[] = {
3079 { .compatible = "qcom,glink-mailbox-xprt" },
3080 {},
3081};
3082
3083static struct platform_driver glink_mailbox_driver = {
3084 .probe = glink_mailbox_probe,
3085 .driver = {
3086 .name = "msm_glink_mailbox_xprt",
3087 .owner = THIS_MODULE,
3088 .of_match_table = mailbox_match_table,
3089 },
3090};
3091
3092static int __init glink_smem_native_xprt_init(void)
3093{
3094 int rc;
3095
3096 rc = platform_driver_register(&glink_smem_native_driver);
3097 if (rc) {
3098 pr_err("%s: glink_smem_native_driver register failed %d\n",
3099 __func__, rc);
3100 return rc;
3101 }
3102
3103 rc = platform_driver_register(&glink_rpm_native_driver);
3104 if (rc) {
3105 pr_err("%s: glink_rpm_native_driver register failed %d\n",
3106 __func__, rc);
3107 return rc;
3108 }
3109
3110 rc = platform_driver_register(&glink_mailbox_driver);
3111 if (rc) {
3112 pr_err("%s: glink_mailbox_driver register failed %d\n",
3113 __func__, rc);
3114 return rc;
3115 }
3116
3117 return 0;
3118}
3119arch_initcall(glink_smem_native_xprt_init);
3120
3121MODULE_DESCRIPTION("MSM G-Link SMEM Native Transport");
3122MODULE_LICENSE("GPL v2");