blob: f9c79c85384715a2d8dfb923f5c229e99195baef [file] [log] [blame]
Hardik Aryad9f068f2019-01-04 16:14:20 +05301/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
Chris Lewfa6135e2016-08-01 13:29:46 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/debugfs.h>
13#include <linux/err.h>
14#include <linux/fs.h>
15#include <linux/gfp.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/ipc_logging.h>
19#include <linux/irq.h>
20#include <linux/kernel.h>
21#include <linux/kthread.h>
22#include <linux/list.h>
23#include <linux/module.h>
24#include <linux/mutex.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/platform_device.h>
28#include <linux/printk.h>
29#include <linux/sched.h>
30#include <linux/seq_file.h>
31#include <linux/sizes.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/srcu.h>
35#include <linux/wait.h>
Chris Lew490a42a2017-10-02 15:20:54 -070036#include <linux/cpumask.h>
Chris Lewfa6135e2016-08-01 13:29:46 -070037#include <soc/qcom/smem.h>
38#include <soc/qcom/tracer_pkt.h>
39#include "glink_core_if.h"
40#include "glink_private.h"
41#include "glink_xprt_if.h"
42
43#define XPRT_NAME "smem"
44#define FIFO_FULL_RESERVE 8
45#define FIFO_ALIGNMENT 8
46#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
47#define SMEM_CH_DESC_SIZE 32
48#define RPM_TOC_ID 0x67727430
49#define RPM_TX_FIFO_ID 0x61703272
50#define RPM_RX_FIFO_ID 0x72326170
51#define RPM_TOC_SIZE 256
52#define RPM_MAX_TOC_ENTRIES 20
53#define RPM_FIFO_ADDR_ALIGN_BYTES 3
54#define TRACER_PKT_FEATURE BIT(2)
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -070055#define DEFERRED_CMDS_THRESHOLD 25
Dhoat Harpal4a052812017-10-06 22:14:10 +053056#define NUM_LOG_PAGES 4
57
Chris Lewfa6135e2016-08-01 13:29:46 -070058/**
59 * enum command_types - definition of the types of commands sent/received
60 * @VERSION_CMD: Version and feature set supported
61 * @VERSION_ACK_CMD: Response for @VERSION_CMD
62 * @OPEN_CMD: Open a channel
63 * @CLOSE_CMD: Close a channel
64 * @OPEN_ACK_CMD: Response to @OPEN_CMD
65 * @RX_INTENT_CMD: RX intent for a channel was queued
66 * @RX_DONE_CMD: Use of RX intent for a channel is complete
67 * @RX_INTENT_REQ_CMD: Request to have RX intent queued
68 * @RX_INTENT_REQ_ACK_CMD: Response for @RX_INTENT_REQ_CMD
69 * @TX_DATA_CMD: Start of a data transfer
70 * @ZERO_COPY_TX_DATA_CMD: Start of a data transfer with zero copy
71 * @CLOSE_ACK_CMD: Response for @CLOSE_CMD
72 * @TX_DATA_CONT_CMD: Continuation or end of a data transfer
73 * @READ_NOTIF_CMD: Request for a notification when this cmd is read
74 * @RX_DONE_W_REUSE_CMD: Same as @RX_DONE but also reuse the used intent
75 * @SIGNALS_CMD: Sideband signals
76 * @TRACER_PKT_CMD: Start of a Tracer Packet Command
77 * @TRACER_PKT_CONT_CMD: Continuation or end of a Tracer Packet Command
78 */
79enum command_types {
80 VERSION_CMD,
81 VERSION_ACK_CMD,
82 OPEN_CMD,
83 CLOSE_CMD,
84 OPEN_ACK_CMD,
85 RX_INTENT_CMD,
86 RX_DONE_CMD,
87 RX_INTENT_REQ_CMD,
88 RX_INTENT_REQ_ACK_CMD,
89 TX_DATA_CMD,
90 ZERO_COPY_TX_DATA_CMD,
91 CLOSE_ACK_CMD,
92 TX_DATA_CONT_CMD,
93 READ_NOTIF_CMD,
94 RX_DONE_W_REUSE_CMD,
95 SIGNALS_CMD,
96 TRACER_PKT_CMD,
97 TRACER_PKT_CONT_CMD,
98};
99
100/**
101 * struct channel_desc - description of a channel fifo with a remote entity
102 * @read_index: The read index for the fifo where data should be
103 * consumed from.
104 * @write_index: The write index for the fifo where data should produced
105 * to.
106 *
107 * This structure resides in SMEM and contains the control information for the
108 * fifo data pipes of the channel. There is one physical channel between us
109 * and a remote entity.
110 */
111struct channel_desc {
112 uint32_t read_index;
113 uint32_t write_index;
114};
115
116/**
117 * struct mailbox_config_info - description of a mailbox tranposrt channel
118 * @tx_read_index: Offset into the tx fifo where data should be read from.
119 * @tx_write_index: Offset into the tx fifo where new data will be placed.
120 * @tx_size: Size of the transmit fifo in bytes.
121 * @rx_read_index: Offset into the rx fifo where data should be read from.
122 * @rx_write_index: Offset into the rx fifo where new data will be placed.
123 * @rx_size: Size of the receive fifo in bytes.
124 * @fifo: The fifos for the channel.
125 */
126struct mailbox_config_info {
127 uint32_t tx_read_index;
128 uint32_t tx_write_index;
129 uint32_t tx_size;
130 uint32_t rx_read_index;
131 uint32_t rx_write_index;
132 uint32_t rx_size;
133 char fifo[]; /* tx fifo, then rx fifo */
134};
135
136/**
137 * struct edge_info - local information for managing a single complete edge
138 * @xprt_if: The transport interface registered with the
139 * glink core associated with this edge.
140 * @xprt_cfg: The transport configuration for the glink core
141 * assocaited with this edge.
142 * @intentless: True if this edge runs in intentless mode.
143 * @irq_disabled: Flag indicating the whether interrupt is enabled
144 * or disabled.
145 * @remote_proc_id: The SMEM processor id for the remote side.
146 * @rx_reset_reg: Reference to the register to reset the rx irq
147 * line, if applicable.
148 * @out_irq_reg: Reference to the register to send an irq to the
149 * remote side.
150 * @out_irq_mask: Mask written to @out_irq_reg to trigger the
151 * correct irq.
152 * @irq_line: The incoming interrupt line.
153 * @tx_irq_count: Number of interrupts triggered.
154 * @rx_irq_count: Number of interrupts received.
155 * @tx_ch_desc: Reference to the channel description structure
156 * for tx in SMEM for this edge.
157 * @rx_ch_desc: Reference to the channel description structure
158 * for rx in SMEM for this edge.
159 * @tx_fifo: Reference to the transmit fifo in SMEM.
160 * @rx_fifo: Reference to the receive fifo in SMEM.
161 * @tx_fifo_size: Total size of @tx_fifo.
162 * @rx_fifo_size: Total size of @rx_fifo.
163 * @read_from_fifo: Memcpy for this edge.
164 * @write_to_fifo: Memcpy for this edge.
165 * @write_lock: Lock to serialize access to @tx_fifo.
166 * @tx_blocked_queue: Queue of entities waiting for the remote side to
167 * signal @tx_fifo has flushed and is now empty.
168 * @tx_resume_needed: A tx resume signal needs to be sent to the glink
169 * core once the remote side indicates @tx_fifo has
170 * flushed.
171 * @tx_blocked_signal_sent: Flag to indicate the flush signal has already
172 * been sent, and a response is pending from the
173 * remote side. Protected by @write_lock.
Dhoat Harpal4a052812017-10-06 22:14:10 +0530174 * @debug_mask mask to set debugging level.
Chris Lewfa6135e2016-08-01 13:29:46 -0700175 * @kwork: Work to be executed when an irq is received.
176 * @kworker: Handle to the entity processing of
177 deferred commands.
Chris Lewfa6135e2016-08-01 13:29:46 -0700178 * @task: Handle to the task context used to run @kworker.
179 * @use_ref: Active uses of this transport use this to grab
180 * a reference. Used for ssr synchronization.
181 * @in_ssr: Signals if this transport is in ssr.
182 * @rx_lock: Used to serialize concurrent instances of rx
183 * processing.
184 * @deferred_cmds: List of deferred commands that need to be
185 * processed in process context.
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700186 * @deferred_cmds_cnt: Number of deferred commands in queue.
Chris Lewa9a78ae2017-05-11 16:47:37 -0700187 * @rt_vote_lock: Serialize access to RT rx votes
188 * @rt_votes: Vote count for RT rx thread priority
Chris Lewfa6135e2016-08-01 13:29:46 -0700189 * @num_pw_states: Size of @ramp_time_us.
190 * @ramp_time_us: Array of ramp times in microseconds where array
191 * index position represents a power state.
192 * @mailbox: Mailbox transport channel description reference.
Dhoat Harpal4a052812017-10-06 22:14:10 +0530193 * @log_ctx: Pointer to log context.
Chris Lewfa6135e2016-08-01 13:29:46 -0700194 */
195struct edge_info {
196 struct glink_transport_if xprt_if;
197 struct glink_core_transport_cfg xprt_cfg;
198 bool intentless;
199 bool irq_disabled;
200 uint32_t remote_proc_id;
201 void __iomem *rx_reset_reg;
202 void __iomem *out_irq_reg;
203 uint32_t out_irq_mask;
204 uint32_t irq_line;
205 uint32_t tx_irq_count;
206 uint32_t rx_irq_count;
207 struct channel_desc *tx_ch_desc;
208 struct channel_desc *rx_ch_desc;
209 void __iomem *tx_fifo;
210 void __iomem *rx_fifo;
211 uint32_t tx_fifo_size;
212 uint32_t rx_fifo_size;
213 void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes);
214 void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes);
215 spinlock_t write_lock;
216 wait_queue_head_t tx_blocked_queue;
217 bool tx_resume_needed;
218 bool tx_blocked_signal_sent;
Dhoat Harpal4a052812017-10-06 22:14:10 +0530219 unsigned int debug_mask;
Chris Lewfa6135e2016-08-01 13:29:46 -0700220 struct kthread_work kwork;
221 struct kthread_worker kworker;
222 struct task_struct *task;
Chris Lewfa6135e2016-08-01 13:29:46 -0700223 struct srcu_struct use_ref;
224 bool in_ssr;
225 spinlock_t rx_lock;
226 struct list_head deferred_cmds;
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700227 uint32_t deferred_cmds_cnt;
Chris Lewa9a78ae2017-05-11 16:47:37 -0700228 spinlock_t rt_vote_lock;
229 uint32_t rt_votes;
Chris Lewfa6135e2016-08-01 13:29:46 -0700230 uint32_t num_pw_states;
Dhoat Harpal057e4fb2017-09-21 22:12:14 +0530231 uint32_t readback;
Chris Lewfa6135e2016-08-01 13:29:46 -0700232 unsigned long *ramp_time_us;
233 struct mailbox_config_info *mailbox;
Dhoat Harpal4a052812017-10-06 22:14:10 +0530234 void *log_ctx;
Chris Lewfa6135e2016-08-01 13:29:46 -0700235};
236
237/**
238 * struct deferred_cmd - description of a command to be processed later
239 * @list_node: Used to put this command on a list in the edge.
240 * @id: ID of the command.
241 * @param1: Parameter one of the command.
242 * @param2: Parameter two of the command.
243 * @data: Extra data associated with the command, if applicable.
244 *
245 * This structure stores the relevant information of a command that was removed
246 * from the fifo but needs to be processed at a later time.
247 */
248struct deferred_cmd {
249 struct list_head list_node;
250 uint16_t id;
251 uint16_t param1;
252 uint32_t param2;
253 void *data;
254};
255
256static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
257 const struct glink_core_version *version,
258 uint32_t features);
259static void register_debugfs_info(struct edge_info *einfo);
260
261static struct edge_info *edge_infos[NUM_SMEM_SUBSYSTEMS];
262static DEFINE_MUTEX(probe_lock);
263static struct glink_core_version versions[] = {
264 {1, TRACER_PKT_FEATURE, negotiate_features_v1},
265};
266
Dhoat Harpal4a052812017-10-06 22:14:10 +0530267#define SMEM_IPC_LOG(einfo, str, id, param1, param2) do { \
268 if ((glink_xprt_debug_mask & QCOM_GLINK_DEBUG_ENABLE) \
269 && (einfo->debug_mask & QCOM_GLINK_DEBUG_ENABLE)) \
270 ipc_log_string(einfo->log_ctx, \
271 "%s: Rx:%x:%x Tx:%x:%x Cmd:%x P1:%x P2:%x\n", \
272 str, einfo->rx_ch_desc->read_index, \
273 einfo->rx_ch_desc->write_index, \
274 einfo->tx_ch_desc->read_index, \
275 einfo->tx_ch_desc->write_index, \
276 id, param1, param2); \
277} while (0) \
278
279enum {
280 QCOM_GLINK_DEBUG_ENABLE = 1U << 0,
281 QCOM_GLINK_DEBUG_DISABLE = 1U << 1,
282};
283
284static unsigned int glink_xprt_debug_mask = QCOM_GLINK_DEBUG_ENABLE;
285module_param_named(debug_mask, glink_xprt_debug_mask,
286 uint, 0664);
287
Chris Lewfa6135e2016-08-01 13:29:46 -0700288/**
289 * send_irq() - send an irq to a remote entity as an event signal
290 * @einfo: Which remote entity that should receive the irq.
291 */
292static void send_irq(struct edge_info *einfo)
293{
294 /*
295 * Any data associated with this event must be visable to the remote
296 * before the interrupt is triggered
297 */
Dhoat Harpal057e4fb2017-09-21 22:12:14 +0530298 einfo->readback = einfo->tx_ch_desc->write_index;
Chris Lewfa6135e2016-08-01 13:29:46 -0700299 wmb();
300 writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
Karthikeyan Ramasubramaniandfc5d4a2016-10-14 08:42:30 -0600301 if (einfo->remote_proc_id != SMEM_SPSS)
302 writel_relaxed(0, einfo->out_irq_reg);
Chris Lewfa6135e2016-08-01 13:29:46 -0700303 einfo->tx_irq_count++;
304}
305
306/**
307 * read_from_fifo() - memcpy from fifo memory
308 * @dest: Destination address.
309 * @src: Source address.
310 * @num_bytes: Number of bytes to copy.
311 *
312 * Return: Destination address.
313 */
314static void *read_from_fifo(void *dest, const void *src, size_t num_bytes)
315{
316 memcpy_fromio(dest, src, num_bytes);
317 return dest;
318}
319
320/**
321 * write_to_fifo() - memcpy to fifo memory
322 * @dest: Destination address.
323 * @src: Source address.
324 * @num_bytes: Number of bytes to copy.
325 *
326 * Return: Destination address.
327 */
328static void *write_to_fifo(void *dest, const void *src, size_t num_bytes)
329{
330 memcpy_toio(dest, src, num_bytes);
331 return dest;
332}
333
334/**
335 * memcpy32_toio() - memcpy to word access only memory
336 * @dest: Destination address.
337 * @src: Source address.
338 * @num_bytes: Number of bytes to copy.
339 *
340 * Return: Destination address.
341 */
342static void *memcpy32_toio(void *dest, const void *src, size_t num_bytes)
343{
344 uint32_t *dest_local = (uint32_t *)dest;
345 uint32_t *src_local = (uint32_t *)src;
346
347 if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
348 return ERR_PTR(-EINVAL);
349 if (WARN_ON(!dest_local ||
350 ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
351 return ERR_PTR(-EINVAL);
352 if (WARN_ON(!src_local ||
353 ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
354 return ERR_PTR(-EINVAL);
355 num_bytes /= sizeof(uint32_t);
356
357 while (num_bytes--)
358 __raw_writel_no_log(*src_local++, dest_local++);
359
360 return dest;
361}
362
363/**
364 * memcpy32_fromio() - memcpy from word access only memory
365 * @dest: Destination address.
366 * @src: Source address.
367 * @num_bytes: Number of bytes to copy.
368 *
369 * Return: Destination address.
370 */
371static void *memcpy32_fromio(void *dest, const void *src, size_t num_bytes)
372{
373 uint32_t *dest_local = (uint32_t *)dest;
374 uint32_t *src_local = (uint32_t *)src;
375
376 if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
377 return ERR_PTR(-EINVAL);
378 if (WARN_ON(!dest_local ||
379 ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
380 return ERR_PTR(-EINVAL);
381 if (WARN_ON(!src_local ||
382 ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
383 return ERR_PTR(-EINVAL);
384 num_bytes /= sizeof(uint32_t);
385
386 while (num_bytes--)
387 *dest_local++ = __raw_readl_no_log(src_local++);
388
389 return dest;
390}
391
392/**
393 * fifo_read_avail() - how many bytes are available to be read from an edge
394 * @einfo: The concerned edge to query.
395 *
396 * Return: The number of bytes available to be read from edge.
397 */
398static uint32_t fifo_read_avail(struct edge_info *einfo)
399{
400 uint32_t read_index = einfo->rx_ch_desc->read_index;
401 uint32_t write_index = einfo->rx_ch_desc->write_index;
402 uint32_t fifo_size = einfo->rx_fifo_size;
403 uint32_t bytes_avail;
404
405 bytes_avail = write_index - read_index;
406 if (write_index < read_index)
407 /*
408 * Case: W < R - Write has wrapped
409 * --------------------------------
410 * In this case, the write operation has wrapped past the end
411 * of the FIFO which means that now calculating the amount of
412 * data in the FIFO results in a negative number. This can be
413 * easily fixed by adding the fifo_size to the value. Even
414 * though the values are unsigned, subtraction is always done
415 * using 2's complement which means that the result will still
416 * be correct once the FIFO size has been added to the negative
417 * result.
418 *
419 * Example:
420 * '-' = data in fifo
421 * '.' = empty
422 *
423 * 0 1
424 * 0123456789012345
425 * |-----w.....r----|
426 * 0 N
427 *
428 * write = 5 = 101b
429 * read = 11 = 1011b
430 * Data in FIFO
431 * (write - read) + fifo_size = (101b - 1011b) + 10000b
432 * = 11111010b + 10000b = 1010b = 10
433 */
434 bytes_avail += fifo_size;
435
436 return bytes_avail;
437}
438
439/**
440 * fifo_write_avail() - how many bytes can be written to the edge
441 * @einfo: The concerned edge to query.
442 *
443 * Calculates the number of bytes that can be transmitted at this time.
444 * Automatically reserves some space to maintain alignment when the fifo is
445 * completely full, and reserves space so that the flush command can always be
446 * transmitted when needed.
447 *
448 * Return: The number of bytes available to be read from edge.
449 */
450static uint32_t fifo_write_avail(struct edge_info *einfo)
451{
452 uint32_t read_index = einfo->tx_ch_desc->read_index;
453 uint32_t write_index = einfo->tx_ch_desc->write_index;
454 uint32_t fifo_size = einfo->tx_fifo_size;
455 uint32_t bytes_avail = read_index - write_index;
456
457 if (read_index <= write_index)
458 bytes_avail += fifo_size;
459 if (bytes_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
460 bytes_avail = 0;
461 else
462 bytes_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
463
464 return bytes_avail;
465}
466
467/**
468 * fifo_read() - read data from an edge
469 * @einfo: The concerned edge to read from.
470 * @_data: Buffer to copy the read data into.
471 * @len: The ammount of data to read in bytes.
472 *
473 * Return: The number of bytes read.
474 */
475static int fifo_read(struct edge_info *einfo, void *_data, int len)
476{
477 void *ptr;
478 void *ret;
479 void *data = _data;
480 int orig_len = len;
481 uint32_t read_index = einfo->rx_ch_desc->read_index;
482 uint32_t write_index = einfo->rx_ch_desc->write_index;
483 uint32_t fifo_size = einfo->rx_fifo_size;
484 uint32_t n;
485
Hardik Aryad9f068f2019-01-04 16:14:20 +0530486 if (read_index > fifo_size && write_index > fifo_size)
487 return 0;
Chris Lewfa6135e2016-08-01 13:29:46 -0700488 while (len) {
489 ptr = einfo->rx_fifo + read_index;
490 if (read_index <= write_index)
491 n = write_index - read_index;
492 else
493 n = fifo_size - read_index;
494
495 if (n == 0)
496 break;
497 if (n > len)
498 n = len;
499
500 ret = einfo->read_from_fifo(data, ptr, n);
501 if (IS_ERR(ret))
502 return PTR_ERR(ret);
503
504 data += n;
505 len -= n;
506 read_index += n;
507 if (read_index >= fifo_size)
508 read_index -= fifo_size;
509 }
510 einfo->rx_ch_desc->read_index = read_index;
511
512 return orig_len - len;
513}
514
515/**
516 * fifo_write_body() - Copy transmit data into an edge
517 * @einfo: The concerned edge to copy into.
518 * @_data: Buffer of data to copy from.
519 * @len: Size of data to copy in bytes.
520 * @write_index: Index into the channel where the data should be copied.
521 *
522 * Return: Number of bytes remaining to be copied into the edge.
523 */
524static int fifo_write_body(struct edge_info *einfo, const void *_data,
525 int len, uint32_t *write_index)
526{
527 void *ptr;
528 void *ret;
529 const void *data = _data;
530 uint32_t read_index = einfo->tx_ch_desc->read_index;
531 uint32_t fifo_size = einfo->tx_fifo_size;
532 uint32_t n;
533
Hardik Aryad9f068f2019-01-04 16:14:20 +0530534 if (read_index > fifo_size && *write_index > fifo_size)
535 return 0;
Chris Lewfa6135e2016-08-01 13:29:46 -0700536 while (len) {
537 ptr = einfo->tx_fifo + *write_index;
538 if (*write_index < read_index) {
539 n = read_index - *write_index - FIFO_FULL_RESERVE;
540 } else {
541 if (read_index < FIFO_FULL_RESERVE)
542 n = fifo_size + read_index - *write_index -
543 FIFO_FULL_RESERVE;
544 else
545 n = fifo_size - *write_index;
546 }
547
548 if (n == 0)
549 break;
550 if (n > len)
551 n = len;
552
553 ret = einfo->write_to_fifo(ptr, data, n);
554 if (IS_ERR(ret))
555 return PTR_ERR(ret);
556
557 data += n;
558 len -= n;
559 *write_index += n;
560 if (*write_index >= fifo_size)
561 *write_index -= fifo_size;
562 }
563 return len;
564}
565
566/**
567 * fifo_write() - Write data into an edge
568 * @einfo: The concerned edge to write to.
569 * @data: Buffer of data to write.
570 * @len: Length of data to write, in bytes.
571 *
572 * Wrapper around fifo_write_body() to manage additional details that are
573 * necessary for a complete write event. Does not manage concurrency. Clients
574 * should use fifo_write_avail() to check if there is sufficent space before
575 * calling fifo_write().
576 *
577 * Return: Number of bytes written to the edge.
578 */
579static int fifo_write(struct edge_info *einfo, const void *data, int len)
580{
581 int orig_len = len;
582 uint32_t write_index = einfo->tx_ch_desc->write_index;
583
584 len = fifo_write_body(einfo, data, len, &write_index);
585 if (unlikely(len < 0))
586 return len;
Chris Lewcd2e54c2017-12-05 15:23:58 -0800587
588 /* All data writes need to be flushed to memory before the write index
589 * is updated. This protects against a race condition where the remote
590 * reads stale data because the write index was written before the data.
591 */
592 wmb();
Chris Lewfa6135e2016-08-01 13:29:46 -0700593 einfo->tx_ch_desc->write_index = write_index;
594 send_irq(einfo);
595
596 return orig_len - len;
597}
598
599/**
600 * fifo_write_complex() - writes a transaction of multiple buffers to an edge
601 * @einfo: The concerned edge to write to.
602 * @data1: The first buffer of data to write.
603 * @len1: The length of the first buffer in bytes.
604 * @data2: The second buffer of data to write.
605 * @len2: The length of the second buffer in bytes.
606 * @data3: The thirs buffer of data to write.
607 * @len3: The length of the third buffer in bytes.
608 *
609 * A variant of fifo_write() which optimizes the usecase found in tx(). The
610 * remote side expects all or none of the transmitted data to be available.
611 * This prevents the tx() usecase from calling fifo_write() multiple times. The
612 * alternative would be an allocation and additional memcpy to create a buffer
613 * to copy all the data segments into one location before calling fifo_write().
614 *
615 * Return: Number of bytes written to the edge.
616 */
617static int fifo_write_complex(struct edge_info *einfo,
618 const void *data1, int len1,
619 const void *data2, int len2,
620 const void *data3, int len3)
621{
622 int orig_len = len1 + len2 + len3;
623 uint32_t write_index = einfo->tx_ch_desc->write_index;
624
625 len1 = fifo_write_body(einfo, data1, len1, &write_index);
626 if (unlikely(len1 < 0))
627 return len1;
628 len2 = fifo_write_body(einfo, data2, len2, &write_index);
629 if (unlikely(len2 < 0))
630 return len2;
631 len3 = fifo_write_body(einfo, data3, len3, &write_index);
632 if (unlikely(len3 < 0))
633 return len3;
634
Chris Lewcd2e54c2017-12-05 15:23:58 -0800635 /* All data writes need to be flushed to memory before the write index
636 * is updated. This protects against a race condition where the remote
637 * reads stale data because the write index was written before the data.
638 */
639 wmb();
Chris Lewfa6135e2016-08-01 13:29:46 -0700640 einfo->tx_ch_desc->write_index = write_index;
641 send_irq(einfo);
642
643 return orig_len - len1 - len2 - len3;
644}
645
646/**
647 * send_tx_blocked_signal() - send the flush command as we are blocked from tx
648 * @einfo: The concerned edge which is blocked.
649 *
650 * Used to send a signal to the remote side that we have no more space to
651 * transmit data and therefore need the remote side to signal us when they have
652 * cleared some space by reading some data. This function relies upon the
653 * assumption that fifo_write_avail() will reserve some space so that the flush
654 * signal command can always be put into the transmit fifo, even when "everyone"
655 * else thinks that the transmit fifo is truely full. This function assumes
656 * that it is called with the write_lock already locked.
657 */
658static void send_tx_blocked_signal(struct edge_info *einfo)
659{
660 struct read_notif_request {
661 uint16_t cmd;
662 uint16_t reserved;
663 uint32_t reserved2;
664 };
665 struct read_notif_request read_notif_req;
666
667 read_notif_req.cmd = READ_NOTIF_CMD;
668 read_notif_req.reserved = 0;
669 read_notif_req.reserved2 = 0;
670
Dhoat Harpal4a052812017-10-06 22:14:10 +0530671 SMEM_IPC_LOG(einfo, __func__, READ_NOTIF_CMD, 0, 0);
Chris Lewfa6135e2016-08-01 13:29:46 -0700672 if (!einfo->tx_blocked_signal_sent) {
673 einfo->tx_blocked_signal_sent = true;
674 fifo_write(einfo, &read_notif_req, sizeof(read_notif_req));
675 }
676}
677
678/**
679 * fifo_tx() - transmit data on an edge
680 * @einfo: The concerned edge to transmit on.
681 * @data: Buffer of data to transmit.
682 * @len: Length of data to transmit in bytes.
683 *
684 * This helper function is the preferred interface to fifo_write() and should
685 * be used in the normal case for transmitting entities. fifo_tx() will block
686 * until there is sufficent room to transmit the requested ammount of data.
687 * fifo_tx() will manage any concurrency between multiple transmitters on a
688 * channel.
689 *
690 * Return: Number of bytes transmitted.
691 */
692static int fifo_tx(struct edge_info *einfo, const void *data, int len)
693{
694 unsigned long flags;
695 int ret;
696
697 DEFINE_WAIT(wait);
698
699 spin_lock_irqsave(&einfo->write_lock, flags);
700 while (fifo_write_avail(einfo) < len) {
701 send_tx_blocked_signal(einfo);
702 prepare_to_wait(&einfo->tx_blocked_queue, &wait,
703 TASK_UNINTERRUPTIBLE);
704 if (fifo_write_avail(einfo) < len && !einfo->in_ssr) {
705 spin_unlock_irqrestore(&einfo->write_lock, flags);
706 schedule();
707 spin_lock_irqsave(&einfo->write_lock, flags);
708 }
709 finish_wait(&einfo->tx_blocked_queue, &wait);
710 if (einfo->in_ssr) {
711 spin_unlock_irqrestore(&einfo->write_lock, flags);
712 return -EFAULT;
713 }
714 }
715 ret = fifo_write(einfo, data, len);
716 spin_unlock_irqrestore(&einfo->write_lock, flags);
717
718 return ret;
719}
720
721/**
722 * process_rx_data() - process received data from an edge
723 * @einfo: The edge the data was received on.
724 * @cmd_id: ID to specify the type of data.
725 * @rcid: The remote channel id associated with the data.
726 * @intend_id: The intent the data should be put in.
727 */
728static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
729 uint32_t rcid, uint32_t intent_id)
730{
731 struct command {
732 uint32_t frag_size;
733 uint32_t size_remaining;
734 };
735 struct command cmd;
736 struct glink_core_rx_intent *intent;
737 char trash[FIFO_ALIGNMENT];
738 int alignment;
739 bool err = false;
740
741 fifo_read(einfo, &cmd, sizeof(cmd));
742
743 intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
744 &einfo->xprt_if, rcid, intent_id);
745 if (intent == NULL) {
746 GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
747 intent_id);
748 err = true;
749 } else if (intent->data == NULL) {
750 if (einfo->intentless) {
Chris Lew7cff2be2017-04-12 15:11:08 -0700751 intent->data = kmalloc(cmd.frag_size,
752 __GFP_ATOMIC | __GFP_HIGH);
Chris Lewa3c44d22017-01-17 14:50:24 -0800753 if (!intent->data) {
Chris Lewfa6135e2016-08-01 13:29:46 -0700754 err = true;
Chris Lewa3c44d22017-01-17 14:50:24 -0800755 GLINK_ERR(
756 "%s: atomic alloc fail ch %d liid %d size %d\n",
757 __func__, rcid, intent_id,
758 cmd.frag_size);
759 } else {
Chris Lewfa6135e2016-08-01 13:29:46 -0700760 intent->intent_size = cmd.frag_size;
Chris Lewa3c44d22017-01-17 14:50:24 -0800761 }
Chris Lewfa6135e2016-08-01 13:29:46 -0700762 } else {
763 GLINK_ERR(
764 "%s: intent for ch %d liid %d has no data buff\n",
765 __func__, rcid, intent_id);
766 err = true;
767 }
768 }
769
770 if (!err &&
771 (intent->intent_size - intent->write_offset < cmd.frag_size ||
772 intent->write_offset + cmd.size_remaining > intent->intent_size)) {
773 GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
774 __func__,
775 cmd.frag_size,
776 cmd.size_remaining,
777 "will overflow ch",
778 rcid,
779 "intent",
780 intent_id);
781 err = true;
782 }
783
784 if (err) {
785 alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
786 alignment -= cmd.frag_size;
787 while (cmd.frag_size) {
788 if (cmd.frag_size > FIFO_ALIGNMENT) {
789 fifo_read(einfo, trash, FIFO_ALIGNMENT);
790 cmd.frag_size -= FIFO_ALIGNMENT;
791 } else {
792 fifo_read(einfo, trash, cmd.frag_size);
793 cmd.frag_size = 0;
794 }
795 }
796 if (alignment)
797 fifo_read(einfo, trash, alignment);
798 return;
799 }
800 fifo_read(einfo, intent->data + intent->write_offset, cmd.frag_size);
801 intent->write_offset += cmd.frag_size;
802 intent->pkt_size += cmd.frag_size;
803
804 alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
805 alignment -= cmd.frag_size;
806 if (alignment)
807 fifo_read(einfo, trash, alignment);
808
809 if (unlikely((cmd_id == TRACER_PKT_CMD ||
810 cmd_id == TRACER_PKT_CONT_CMD) && !cmd.size_remaining)) {
811 tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
812 intent->tracer_pkt = true;
813 }
814
815 einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
816 rcid,
817 intent,
818 cmd.size_remaining ?
819 false : true);
820}
821
822/**
823 * queue_cmd() - queue a deferred command for later processing
824 * @einfo: Edge to queue commands on.
825 * @cmd: Command to queue.
826 * @data: Command specific data to queue with the command.
827 *
828 * Return: True if queuing was successful, false otherwise.
829 */
830static bool queue_cmd(struct edge_info *einfo, void *cmd, void *data)
831{
832 struct command {
833 uint16_t id;
834 uint16_t param1;
835 uint32_t param2;
836 };
837 struct command *_cmd = cmd;
838 struct deferred_cmd *d_cmd;
839
840 d_cmd = kmalloc(sizeof(*d_cmd), GFP_ATOMIC);
841 if (!d_cmd) {
842 GLINK_ERR("%s: Discarding cmd %d\n", __func__, _cmd->id);
843 return false;
844 }
845 d_cmd->id = _cmd->id;
846 d_cmd->param1 = _cmd->param1;
847 d_cmd->param2 = _cmd->param2;
848 d_cmd->data = data;
849 list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds);
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700850 einfo->deferred_cmds_cnt++;
Kyle Yan65be4a52016-10-31 15:05:00 -0700851 kthread_queue_work(&einfo->kworker, &einfo->kwork);
Chris Lewfa6135e2016-08-01 13:29:46 -0700852 return true;
853}
854
855/**
856 * get_rx_fifo() - Find the rx fifo for an edge
857 * @einfo: Edge to find the fifo for.
858 *
859 * Return: True if fifo was found, false otherwise.
860 */
861static bool get_rx_fifo(struct edge_info *einfo)
862{
863 if (einfo->mailbox) {
864 einfo->rx_fifo = &einfo->mailbox->fifo[einfo->mailbox->tx_size];
865 einfo->rx_fifo_size = einfo->mailbox->rx_size;
866 } else {
867 einfo->rx_fifo = smem_get_entry(SMEM_GLINK_NATIVE_XPRT_FIFO_1,
868 &einfo->rx_fifo_size,
869 einfo->remote_proc_id,
870 SMEM_ITEM_CACHED_FLAG);
871 if (!einfo->rx_fifo)
Dhoat Harpal55342ec2017-04-03 17:04:11 +0530872 einfo->rx_fifo = smem_get_entry(
873 SMEM_GLINK_NATIVE_XPRT_FIFO_1,
874 &einfo->rx_fifo_size,
875 einfo->remote_proc_id,
876 0);
877 if (!einfo->rx_fifo)
Chris Lewfa6135e2016-08-01 13:29:46 -0700878 return false;
879 }
880
881 return true;
882}
883
884/**
Chris Lew398933d2017-05-11 16:54:10 -0700885 * tx_wakeup_worker() - worker function to wakeup tx blocked thread
886 * @work: kwork associated with the edge to process commands on.
887 */
888static void tx_wakeup_worker(struct edge_info *einfo)
889{
890 struct glink_transport_if xprt_if = einfo->xprt_if;
891 bool trigger_wakeup = false;
Arun Kumar Neelakantam68156b42018-02-08 16:26:16 +0530892 bool trigger_resume = false;
Chris Lew398933d2017-05-11 16:54:10 -0700893 unsigned long flags;
894
895 if (einfo->in_ssr)
896 return;
Arun Kumar Neelakantam68156b42018-02-08 16:26:16 +0530897
Chris Lew398933d2017-05-11 16:54:10 -0700898 spin_lock_irqsave(&einfo->write_lock, flags);
Arun Kumar Neelakantam68156b42018-02-08 16:26:16 +0530899 if (fifo_write_avail(einfo)) {
900 if (einfo->tx_blocked_signal_sent)
901 einfo->tx_blocked_signal_sent = false;
902 if (einfo->tx_resume_needed) {
903 einfo->tx_resume_needed = false;
904 trigger_resume = true;
905 }
906 }
Chris Lew398933d2017-05-11 16:54:10 -0700907 if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
Chris Lew398933d2017-05-11 16:54:10 -0700908 trigger_wakeup = true;
909 }
910 spin_unlock_irqrestore(&einfo->write_lock, flags);
911 if (trigger_wakeup)
912 wake_up_all(&einfo->tx_blocked_queue);
Arun Kumar Neelakantam68156b42018-02-08 16:26:16 +0530913 if (trigger_resume)
914 xprt_if.glink_core_if_ptr->tx_resume(&xprt_if);
Chris Lew398933d2017-05-11 16:54:10 -0700915}
916
917/**
Chris Lewfa6135e2016-08-01 13:29:46 -0700918 * __rx_worker() - process received commands on a specific edge
919 * @einfo: Edge to process commands on.
920 * @atomic_ctx: Indicates if the caller is in atomic context and requires any
921 * non-atomic operations to be deferred.
922 */
923static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
924{
925 struct command {
926 uint16_t id;
927 uint16_t param1;
928 uint32_t param2;
929 };
930 struct intent_desc {
931 uint32_t size;
932 uint32_t id;
933 };
934 struct command cmd;
935 struct intent_desc intent;
936 struct intent_desc *intents;
937 int i;
938 bool granted;
939 unsigned long flags;
940 int rcu_id;
941 uint16_t rcid;
942 uint32_t name_len;
943 uint32_t len;
944 char *name;
945 char trash[FIFO_ALIGNMENT];
946 struct deferred_cmd *d_cmd;
947 void *cmd_data;
948
949 rcu_id = srcu_read_lock(&einfo->use_ref);
950
Chris Lewfa6135e2016-08-01 13:29:46 -0700951 if (einfo->in_ssr) {
952 srcu_read_unlock(&einfo->use_ref, rcu_id);
953 return;
954 }
Chris Lewfa6135e2016-08-01 13:29:46 -0700955
Dhoat Harpalb22ef4e2018-05-24 14:09:06 +0530956 if (!einfo->rx_fifo) {
957 if (!get_rx_fifo(einfo))
958 return;
959 einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
960 }
961
Dhoat Harpale9d73372017-03-10 21:23:03 +0530962 if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
963 (waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
Chris Lew398933d2017-05-11 16:54:10 -0700964 tx_wakeup_worker(einfo);
Chris Lewfa6135e2016-08-01 13:29:46 -0700965
966 /*
967 * Access to the fifo needs to be synchronized, however only the calls
968 * into the core from process_rx_data() are compatible with an atomic
969 * processing context. For everything else, we need to do all the fifo
970 * processing, then unlock the lock for the call into the core. Data
971 * in the fifo is allowed to be processed immediately instead of being
972 * ordered with the commands because the channel open process prevents
973 * intents from being queued (which prevents data from being sent) until
974 * all the channel open commands are processed by the core, thus
975 * eliminating a race.
976 */
977 spin_lock_irqsave(&einfo->rx_lock, flags);
978 while (fifo_read_avail(einfo) ||
979 (!atomic_ctx && !list_empty(&einfo->deferred_cmds))) {
980 if (einfo->in_ssr)
981 break;
982
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700983 if (atomic_ctx && !einfo->intentless &&
984 einfo->deferred_cmds_cnt >= DEFERRED_CMDS_THRESHOLD)
985 break;
986
Chris Lewfa6135e2016-08-01 13:29:46 -0700987 if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) {
988 d_cmd = list_first_entry(&einfo->deferred_cmds,
989 struct deferred_cmd, list_node);
990 list_del(&d_cmd->list_node);
Karthikeyan Ramasubramanian8acd2722016-12-09 10:44:34 -0700991 einfo->deferred_cmds_cnt--;
Chris Lewfa6135e2016-08-01 13:29:46 -0700992 cmd.id = d_cmd->id;
993 cmd.param1 = d_cmd->param1;
994 cmd.param2 = d_cmd->param2;
995 cmd_data = d_cmd->data;
996 kfree(d_cmd);
Dhoat Harpal4a052812017-10-06 22:14:10 +0530997 SMEM_IPC_LOG(einfo, "kthread", cmd.id, cmd.param1,
998 cmd.param2);
Chris Lewfa6135e2016-08-01 13:29:46 -0700999 } else {
1000 fifo_read(einfo, &cmd, sizeof(cmd));
Dhoat Harpal4a052812017-10-06 22:14:10 +05301001 SMEM_IPC_LOG(einfo, "IRQ", cmd.id, cmd.param1,
1002 cmd.param2);
Chris Lewfa6135e2016-08-01 13:29:46 -07001003 cmd_data = NULL;
1004 }
1005
1006 switch (cmd.id) {
1007 case VERSION_CMD:
1008 if (atomic_ctx) {
1009 queue_cmd(einfo, &cmd, NULL);
1010 break;
1011 }
1012 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1013 einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
1014 &einfo->xprt_if,
1015 cmd.param1,
1016 cmd.param2);
1017 spin_lock_irqsave(&einfo->rx_lock, flags);
1018 break;
1019 case VERSION_ACK_CMD:
1020 if (atomic_ctx) {
1021 queue_cmd(einfo, &cmd, NULL);
1022 break;
1023 }
1024 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1025 einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
1026 &einfo->xprt_if,
1027 cmd.param1,
1028 cmd.param2);
1029 spin_lock_irqsave(&einfo->rx_lock, flags);
1030 break;
1031 case OPEN_CMD:
1032 rcid = cmd.param1;
1033 name_len = cmd.param2;
1034
1035 if (cmd_data) {
1036 name = cmd_data;
1037 } else {
1038 len = ALIGN(name_len, FIFO_ALIGNMENT);
1039 name = kmalloc(len, GFP_ATOMIC);
1040 if (!name) {
1041 pr_err("No memory available to rx ch open cmd name. Discarding cmd.\n");
1042 while (len) {
1043 fifo_read(einfo, trash,
1044 FIFO_ALIGNMENT);
1045 len -= FIFO_ALIGNMENT;
1046 }
1047 break;
1048 }
1049 fifo_read(einfo, name, len);
1050 }
1051 if (atomic_ctx) {
1052 if (!queue_cmd(einfo, &cmd, name))
1053 kfree(name);
1054 break;
1055 }
1056
1057 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1058 einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
1059 &einfo->xprt_if,
1060 rcid,
1061 name,
1062 SMEM_XPRT_ID);
1063 kfree(name);
1064 spin_lock_irqsave(&einfo->rx_lock, flags);
1065 break;
1066 case CLOSE_CMD:
1067 if (atomic_ctx) {
1068 queue_cmd(einfo, &cmd, NULL);
1069 break;
1070 }
1071 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1072 einfo->xprt_if.glink_core_if_ptr->
1073 rx_cmd_ch_remote_close(
1074 &einfo->xprt_if,
1075 cmd.param1);
1076 spin_lock_irqsave(&einfo->rx_lock, flags);
1077 break;
1078 case OPEN_ACK_CMD:
1079 if (atomic_ctx) {
1080 queue_cmd(einfo, &cmd, NULL);
1081 break;
1082 }
1083 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1084 einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
1085 &einfo->xprt_if,
1086 cmd.param1,
1087 SMEM_XPRT_ID);
1088 spin_lock_irqsave(&einfo->rx_lock, flags);
1089 break;
1090 case RX_INTENT_CMD:
1091 /*
1092 * One intent listed with this command. This is the
1093 * expected case and can be optimized over the general
1094 * case of an array of intents.
1095 */
1096 if (cmd.param2 == 1) {
1097 if (cmd_data) {
1098 intent.id = ((struct intent_desc *)
1099 cmd_data)->id;
1100 intent.size = ((struct intent_desc *)
1101 cmd_data)->size;
1102 kfree(cmd_data);
1103 } else {
1104 fifo_read(einfo, &intent,
1105 sizeof(intent));
1106 }
1107 if (atomic_ctx) {
1108 cmd_data = kmalloc(sizeof(intent),
1109 GFP_ATOMIC);
1110 if (!cmd_data) {
1111 GLINK_ERR(
1112 "%s: dropping cmd %d\n",
1113 __func__, cmd.id);
1114 break;
1115 }
1116 ((struct intent_desc *)cmd_data)->id =
1117 intent.id;
1118 ((struct intent_desc *)cmd_data)->size =
1119 intent.size;
1120 if (!queue_cmd(einfo, &cmd, cmd_data))
1121 kfree(cmd_data);
1122 break;
1123 }
1124 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1125 einfo->xprt_if.glink_core_if_ptr->
1126 rx_cmd_remote_rx_intent_put(
1127 &einfo->xprt_if,
1128 cmd.param1,
1129 intent.id,
1130 intent.size);
1131 spin_lock_irqsave(&einfo->rx_lock, flags);
1132 break;
1133 }
1134
1135 /* Array of intents to process */
1136 if (cmd_data) {
1137 intents = cmd_data;
1138 } else {
1139 intents = kmalloc_array(cmd.param2,
1140 sizeof(*intents), GFP_ATOMIC);
1141 if (!intents) {
1142 for (i = 0; i < cmd.param2; ++i)
1143 fifo_read(einfo, &intent,
1144 sizeof(intent));
1145 break;
1146 }
1147 fifo_read(einfo, intents,
1148 sizeof(*intents) * cmd.param2);
1149 }
1150 if (atomic_ctx) {
1151 if (!queue_cmd(einfo, &cmd, intents))
1152 kfree(intents);
1153 break;
1154 }
1155 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1156 for (i = 0; i < cmd.param2; ++i) {
1157 einfo->xprt_if.glink_core_if_ptr->
1158 rx_cmd_remote_rx_intent_put(
1159 &einfo->xprt_if,
1160 cmd.param1,
1161 intents[i].id,
1162 intents[i].size);
1163 }
1164 kfree(intents);
1165 spin_lock_irqsave(&einfo->rx_lock, flags);
1166 break;
1167 case RX_DONE_CMD:
1168 if (atomic_ctx) {
1169 queue_cmd(einfo, &cmd, NULL);
1170 break;
1171 }
1172 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1173 einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
1174 &einfo->xprt_if,
1175 cmd.param1,
1176 cmd.param2,
1177 false);
1178 spin_lock_irqsave(&einfo->rx_lock, flags);
1179 break;
1180 case RX_INTENT_REQ_CMD:
1181 if (atomic_ctx) {
1182 queue_cmd(einfo, &cmd, NULL);
1183 break;
1184 }
1185 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1186 einfo->xprt_if.glink_core_if_ptr->
1187 rx_cmd_remote_rx_intent_req(
1188 &einfo->xprt_if,
1189 cmd.param1,
1190 cmd.param2);
1191 spin_lock_irqsave(&einfo->rx_lock, flags);
1192 break;
1193 case RX_INTENT_REQ_ACK_CMD:
1194 if (atomic_ctx) {
1195 queue_cmd(einfo, &cmd, NULL);
1196 break;
1197 }
1198 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1199 granted = false;
1200 if (cmd.param2 == 1)
1201 granted = true;
1202 einfo->xprt_if.glink_core_if_ptr->
1203 rx_cmd_rx_intent_req_ack(
1204 &einfo->xprt_if,
1205 cmd.param1,
1206 granted);
1207 spin_lock_irqsave(&einfo->rx_lock, flags);
1208 break;
1209 case TX_DATA_CMD:
1210 case TX_DATA_CONT_CMD:
1211 case TRACER_PKT_CMD:
1212 case TRACER_PKT_CONT_CMD:
1213 process_rx_data(einfo, cmd.id, cmd.param1, cmd.param2);
1214 break;
1215 case CLOSE_ACK_CMD:
1216 if (atomic_ctx) {
1217 queue_cmd(einfo, &cmd, NULL);
1218 break;
1219 }
1220 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1221 einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
1222 &einfo->xprt_if,
1223 cmd.param1);
1224 spin_lock_irqsave(&einfo->rx_lock, flags);
1225 break;
1226 case READ_NOTIF_CMD:
1227 send_irq(einfo);
1228 break;
1229 case SIGNALS_CMD:
1230 if (atomic_ctx) {
1231 queue_cmd(einfo, &cmd, NULL);
1232 break;
1233 }
1234 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1235 einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
1236 &einfo->xprt_if,
1237 cmd.param1,
1238 cmd.param2);
1239 spin_lock_irqsave(&einfo->rx_lock, flags);
1240 break;
1241 case RX_DONE_W_REUSE_CMD:
1242 if (atomic_ctx) {
1243 queue_cmd(einfo, &cmd, NULL);
1244 break;
1245 }
1246 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1247 einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
1248 &einfo->xprt_if,
1249 cmd.param1,
1250 cmd.param2,
1251 true);
1252 spin_lock_irqsave(&einfo->rx_lock, flags);
1253 break;
1254 default:
1255 pr_err("Unrecognized command: %d\n", cmd.id);
1256 break;
1257 }
1258 }
1259 spin_unlock_irqrestore(&einfo->rx_lock, flags);
1260 srcu_read_unlock(&einfo->use_ref, rcu_id);
1261}
1262
1263/**
Chris Lewfa6135e2016-08-01 13:29:46 -07001264 * rx_worker() - worker function to process received commands
1265 * @work: kwork associated with the edge to process commands on.
1266 */
1267static void rx_worker(struct kthread_work *work)
1268{
1269 struct edge_info *einfo;
1270
1271 einfo = container_of(work, struct edge_info, kwork);
1272 __rx_worker(einfo, false);
1273}
1274
1275irqreturn_t irq_handler(int irq, void *priv)
1276{
1277 struct edge_info *einfo = (struct edge_info *)priv;
1278
1279 if (einfo->rx_reset_reg)
1280 writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
1281
Dhoat Harpala270f9a2018-04-13 19:06:42 +05301282 __rx_worker(einfo, true);
Chris Lewfa6135e2016-08-01 13:29:46 -07001283 einfo->rx_irq_count++;
1284
1285 return IRQ_HANDLED;
1286}
1287
1288/**
1289 * tx_cmd_version() - convert a version cmd to wire format and transmit
1290 * @if_ptr: The transport to transmit on.
1291 * @version: The version number to encode.
1292 * @features: The features information to encode.
1293 */
1294static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
1295 uint32_t features)
1296{
1297 struct command {
1298 uint16_t id;
1299 uint16_t version;
1300 uint32_t features;
1301 };
1302 struct command cmd;
1303 struct edge_info *einfo;
1304 int rcu_id;
1305
1306 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1307
1308 rcu_id = srcu_read_lock(&einfo->use_ref);
1309 if (einfo->in_ssr) {
1310 srcu_read_unlock(&einfo->use_ref, rcu_id);
1311 return;
1312 }
1313
1314 cmd.id = VERSION_CMD;
1315 cmd.version = version;
1316 cmd.features = features;
1317
Dhoat Harpal4a052812017-10-06 22:14:10 +05301318 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.version, cmd.features);
Chris Lewfa6135e2016-08-01 13:29:46 -07001319 fifo_tx(einfo, &cmd, sizeof(cmd));
1320 srcu_read_unlock(&einfo->use_ref, rcu_id);
1321}
1322
1323/**
1324 * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
1325 * @if_ptr: The transport to transmit on.
1326 * @version: The version number to encode.
1327 * @features: The features information to encode.
1328 */
1329static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
1330 uint32_t version,
1331 uint32_t features)
1332{
1333 struct command {
1334 uint16_t id;
1335 uint16_t version;
1336 uint32_t features;
1337 };
1338 struct command cmd;
1339 struct edge_info *einfo;
1340 int rcu_id;
1341
1342 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1343
1344 rcu_id = srcu_read_lock(&einfo->use_ref);
1345 if (einfo->in_ssr) {
1346 srcu_read_unlock(&einfo->use_ref, rcu_id);
1347 return;
1348 }
1349
1350 cmd.id = VERSION_ACK_CMD;
1351 cmd.version = version;
1352 cmd.features = features;
1353
Dhoat Harpal4a052812017-10-06 22:14:10 +05301354 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.version, cmd.features);
Chris Lewfa6135e2016-08-01 13:29:46 -07001355 fifo_tx(einfo, &cmd, sizeof(cmd));
1356 srcu_read_unlock(&einfo->use_ref, rcu_id);
1357}
1358
1359/**
1360 * set_version() - activate a negotiated version and feature set
1361 * @if_ptr: The transport to configure.
1362 * @version: The version to use.
1363 * @features: The features to use.
1364 *
1365 * Return: The supported capabilities of the transport.
1366 */
1367static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
1368 uint32_t features)
1369{
1370 struct edge_info *einfo;
1371 uint32_t ret;
1372 int rcu_id;
1373
1374 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1375
1376 rcu_id = srcu_read_lock(&einfo->use_ref);
1377 if (einfo->in_ssr) {
1378 srcu_read_unlock(&einfo->use_ref, rcu_id);
1379 return 0;
1380 }
1381
1382 ret = einfo->intentless ?
1383 GCAP_INTENTLESS | GCAP_SIGNALS : GCAP_SIGNALS;
1384
1385 if (features & TRACER_PKT_FEATURE)
1386 ret |= GCAP_TRACER_PKT;
1387
1388 srcu_read_unlock(&einfo->use_ref, rcu_id);
1389 return ret;
1390}
1391
1392/**
1393 * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
1394 * @if_ptr: The transport to transmit on.
1395 * @lcid: The local channel id to encode.
1396 * @name: The channel name to encode.
1397 * @req_xprt: The transport the core would like to migrate this channel to.
1398 *
1399 * Return: 0 on success or standard Linux error code.
1400 */
1401static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
1402 const char *name, uint16_t req_xprt)
1403{
1404 struct command {
1405 uint16_t id;
1406 uint16_t lcid;
1407 uint32_t length;
1408 };
1409 struct command cmd;
1410 struct edge_info *einfo;
1411 uint32_t buf_size;
1412 void *buf;
1413 int rcu_id;
1414
1415 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1416
1417 rcu_id = srcu_read_lock(&einfo->use_ref);
1418 if (einfo->in_ssr) {
1419 srcu_read_unlock(&einfo->use_ref, rcu_id);
1420 return -EFAULT;
1421 }
1422
1423 cmd.id = OPEN_CMD;
1424 cmd.lcid = lcid;
1425 cmd.length = strlen(name) + 1;
1426
1427 buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
1428
1429 buf = kzalloc(buf_size, GFP_KERNEL);
1430 if (!buf) {
1431 GLINK_ERR("%s: malloc fail for %d size buf\n",
1432 __func__, buf_size);
1433 srcu_read_unlock(&einfo->use_ref, rcu_id);
1434 return -ENOMEM;
1435 }
1436
1437 memcpy(buf, &cmd, sizeof(cmd));
1438 memcpy(buf + sizeof(cmd), name, cmd.length);
1439
Dhoat Harpal4a052812017-10-06 22:14:10 +05301440 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.length);
Chris Lewfa6135e2016-08-01 13:29:46 -07001441 fifo_tx(einfo, buf, buf_size);
1442
1443 kfree(buf);
1444
1445 srcu_read_unlock(&einfo->use_ref, rcu_id);
1446 return 0;
1447}
1448
1449/**
1450 * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
1451 * @if_ptr: The transport to transmit on.
1452 * @lcid: The local channel id to encode.
1453 *
1454 * Return: 0 on success or standard Linux error code.
1455 */
1456static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
1457{
1458 struct command {
1459 uint16_t id;
1460 uint16_t lcid;
1461 uint32_t reserved;
1462 };
1463 struct command cmd;
1464 struct edge_info *einfo;
1465 int rcu_id;
1466
1467 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1468
1469 rcu_id = srcu_read_lock(&einfo->use_ref);
1470 if (einfo->in_ssr) {
1471 srcu_read_unlock(&einfo->use_ref, rcu_id);
1472 return -EFAULT;
1473 }
1474
1475 cmd.id = CLOSE_CMD;
1476 cmd.lcid = lcid;
1477 cmd.reserved = 0;
1478
Dhoat Harpal4a052812017-10-06 22:14:10 +05301479 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.reserved);
Chris Lewfa6135e2016-08-01 13:29:46 -07001480 fifo_tx(einfo, &cmd, sizeof(cmd));
1481
1482 srcu_read_unlock(&einfo->use_ref, rcu_id);
1483 return 0;
1484}
1485
1486/**
1487 * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
1488 * and transmit
1489 * @if_ptr: The transport to transmit on.
1490 * @rcid: The remote channel id to encode.
1491 * @xprt_resp: The response to a transport migration request.
1492 */
1493static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
1494 uint32_t rcid, uint16_t xprt_resp)
1495{
1496 struct command {
1497 uint16_t id;
1498 uint16_t rcid;
1499 uint32_t reserved;
1500 };
1501 struct command cmd;
1502 struct edge_info *einfo;
1503 int rcu_id;
1504
1505 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1506
1507 rcu_id = srcu_read_lock(&einfo->use_ref);
1508 if (einfo->in_ssr) {
1509 srcu_read_unlock(&einfo->use_ref, rcu_id);
1510 return;
1511 }
1512
1513 cmd.id = OPEN_ACK_CMD;
1514 cmd.rcid = rcid;
1515 cmd.reserved = 0;
1516
Dhoat Harpal4a052812017-10-06 22:14:10 +05301517 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.rcid, cmd.reserved);
Chris Lewfa6135e2016-08-01 13:29:46 -07001518 fifo_tx(einfo, &cmd, sizeof(cmd));
1519 srcu_read_unlock(&einfo->use_ref, rcu_id);
1520}
1521
1522/**
1523 * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
1524 * and transmit
1525 * @if_ptr: The transport to transmit on.
1526 * @rcid: The remote channel id to encode.
1527 */
1528static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
1529 uint32_t rcid)
1530{
1531 struct command {
1532 uint16_t id;
1533 uint16_t rcid;
1534 uint32_t reserved;
1535 };
1536 struct command cmd;
1537 struct edge_info *einfo;
1538 int rcu_id;
1539
1540 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1541
1542 rcu_id = srcu_read_lock(&einfo->use_ref);
1543 if (einfo->in_ssr) {
1544 srcu_read_unlock(&einfo->use_ref, rcu_id);
1545 return;
1546 }
1547
1548 cmd.id = CLOSE_ACK_CMD;
1549 cmd.rcid = rcid;
1550 cmd.reserved = 0;
1551
Dhoat Harpal4a052812017-10-06 22:14:10 +05301552 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.rcid, cmd.reserved);
Chris Lewfa6135e2016-08-01 13:29:46 -07001553 fifo_tx(einfo, &cmd, sizeof(cmd));
1554 srcu_read_unlock(&einfo->use_ref, rcu_id);
1555}
1556
1557/**
Dhoat Harpale4811372017-12-18 21:05:20 +05301558 * subsys_up() - process a subsystem up notification
1559 * @if_ptr: The transport which is up
1560 *
1561 */
1562static void subsys_up(struct glink_transport_if *if_ptr)
1563{
1564 struct edge_info *einfo;
1565
1566 einfo = container_of(if_ptr, struct edge_info, xprt_if);
Dhoat Harpalb22ef4e2018-05-24 14:09:06 +05301567 einfo->in_ssr = false;
Dhoat Harpale4811372017-12-18 21:05:20 +05301568 if (!einfo->rx_fifo) {
1569 if (!get_rx_fifo(einfo))
1570 return;
Dhoat Harpale4811372017-12-18 21:05:20 +05301571 einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
1572 }
1573}
1574
1575/**
Chris Lewfa6135e2016-08-01 13:29:46 -07001576 * ssr() - process a subsystem restart notification of a transport
1577 * @if_ptr: The transport to restart
1578 *
1579 * Return: 0 on success or standard Linux error code.
1580 */
1581static int ssr(struct glink_transport_if *if_ptr)
1582{
1583 struct edge_info *einfo;
1584 struct deferred_cmd *cmd;
1585
1586 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1587
1588 BUG_ON(einfo->remote_proc_id == SMEM_RPM);
1589
1590 einfo->in_ssr = true;
1591 wake_up_all(&einfo->tx_blocked_queue);
1592
1593 synchronize_srcu(&einfo->use_ref);
1594
1595 while (!list_empty(&einfo->deferred_cmds)) {
1596 cmd = list_first_entry(&einfo->deferred_cmds,
1597 struct deferred_cmd, list_node);
1598 list_del(&cmd->list_node);
1599 kfree(cmd->data);
1600 kfree(cmd);
1601 }
1602
1603 einfo->tx_resume_needed = false;
1604 einfo->tx_blocked_signal_sent = false;
1605 einfo->rx_fifo = NULL;
1606 einfo->rx_fifo_size = 0;
1607 einfo->tx_ch_desc->write_index = 0;
1608 einfo->rx_ch_desc->read_index = 0;
1609 einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
1610
1611 return 0;
1612}
1613
1614/**
1615 * int wait_link_down() - Check status of read/write indices
1616 * @if_ptr: The transport to check
1617 *
1618 * Return: 1 if indices are all zero, 0 otherwise
1619 */
1620int wait_link_down(struct glink_transport_if *if_ptr)
1621{
1622 struct edge_info *einfo;
1623
1624 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1625
1626 if (einfo->tx_ch_desc->write_index == 0 &&
1627 einfo->tx_ch_desc->read_index == 0 &&
1628 einfo->rx_ch_desc->write_index == 0 &&
1629 einfo->rx_ch_desc->read_index == 0)
1630 return 1;
1631 else
1632 return 0;
1633}
1634
1635/**
1636 * allocate_rx_intent() - allocate/reserve space for RX Intent
1637 * @if_ptr: The transport the intent is associated with.
1638 * @size: size of intent.
1639 * @intent: Pointer to the intent structure.
1640 *
1641 * Assign "data" with the buffer created, since the transport creates
1642 * a linear buffer and "iovec" with the "intent" itself, so that
1643 * the data can be passed to a client that receives only vector buffer.
1644 * Note that returning NULL for the pointer is valid (it means that space has
1645 * been reserved, but the actual pointer will be provided later).
1646 *
1647 * Return: 0 on success or standard Linux error code.
1648 */
1649static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
1650 struct glink_core_rx_intent *intent)
1651{
1652 void *t;
1653
1654 t = kmalloc(size, GFP_KERNEL);
1655 if (!t)
1656 return -ENOMEM;
1657
1658 intent->data = t;
1659 intent->iovec = (void *)intent;
1660 intent->vprovider = rx_linear_vbuf_provider;
1661 intent->pprovider = NULL;
1662 return 0;
1663}
1664
1665/**
1666 * deallocate_rx_intent() - Deallocate space created for RX Intent
1667 * @if_ptr: The transport the intent is associated with.
1668 * @intent: Pointer to the intent structure.
1669 *
1670 * Return: 0 on success or standard Linux error code.
1671 */
1672static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
1673 struct glink_core_rx_intent *intent)
1674{
1675 if (!intent || !intent->data)
1676 return -EINVAL;
1677
1678 kfree(intent->data);
1679 intent->data = NULL;
1680 intent->iovec = NULL;
1681 intent->vprovider = NULL;
1682 return 0;
1683}
1684
1685/**
1686 * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
1687 * transmit
1688 * @if_ptr: The transport to transmit on.
1689 * @lcid: The local channel id to encode.
1690 * @size: The intent size to encode.
1691 * @liid: The local intent id to encode.
1692 *
1693 * Return: 0 on success or standard Linux error code.
1694 */
1695static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
1696 uint32_t lcid, size_t size, uint32_t liid)
1697{
1698 struct command {
1699 uint16_t id;
1700 uint16_t lcid;
1701 uint32_t count;
1702 uint32_t size;
1703 uint32_t liid;
1704 };
1705 struct command cmd;
1706 struct edge_info *einfo;
1707 int rcu_id;
1708
1709 if (size > UINT_MAX) {
1710 pr_err("%s: size %zu is too large to encode\n", __func__, size);
1711 return -EMSGSIZE;
1712 }
1713
1714 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1715
1716 if (einfo->intentless)
1717 return -EOPNOTSUPP;
1718
1719 rcu_id = srcu_read_lock(&einfo->use_ref);
1720 if (einfo->in_ssr) {
1721 srcu_read_unlock(&einfo->use_ref, rcu_id);
1722 return -EFAULT;
1723 }
1724
1725 cmd.id = RX_INTENT_CMD;
1726 cmd.lcid = lcid;
1727 cmd.count = 1;
1728 cmd.size = size;
1729 cmd.liid = liid;
1730
Dhoat Harpal4a052812017-10-06 22:14:10 +05301731 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.count);
Chris Lewfa6135e2016-08-01 13:29:46 -07001732 fifo_tx(einfo, &cmd, sizeof(cmd));
1733
1734 srcu_read_unlock(&einfo->use_ref, rcu_id);
1735 return 0;
1736}
1737
1738/**
1739 * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
1740 * @if_ptr: The transport to transmit on.
1741 * @lcid: The local channel id to encode.
1742 * @liid: The local intent id to encode.
1743 * @reuse: Reuse the consumed intent.
1744 */
1745static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
1746 uint32_t lcid, uint32_t liid, bool reuse)
1747{
1748 struct command {
1749 uint16_t id;
1750 uint16_t lcid;
1751 uint32_t liid;
1752 };
1753 struct command cmd;
1754 struct edge_info *einfo;
1755 int rcu_id;
1756
1757 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1758
1759 if (einfo->intentless)
1760 return;
1761
1762 rcu_id = srcu_read_lock(&einfo->use_ref);
1763 if (einfo->in_ssr) {
1764 srcu_read_unlock(&einfo->use_ref, rcu_id);
1765 return;
1766 }
1767
1768 cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
1769 cmd.lcid = lcid;
1770 cmd.liid = liid;
1771
Dhoat Harpal4a052812017-10-06 22:14:10 +05301772 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.liid);
Chris Lewfa6135e2016-08-01 13:29:46 -07001773 fifo_tx(einfo, &cmd, sizeof(cmd));
1774 srcu_read_unlock(&einfo->use_ref, rcu_id);
1775}
1776
1777/**
1778 * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
1779 * transmit
1780 * @if_ptr: The transport to transmit on.
1781 * @lcid: The local channel id to encode.
1782 * @size: The requested intent size to encode.
1783 *
1784 * Return: 0 on success or standard Linux error code.
1785 */
1786static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
1787 uint32_t lcid, size_t size)
1788{
1789 struct command {
1790 uint16_t id;
1791 uint16_t lcid;
1792 uint32_t size;
1793 };
1794 struct command cmd;
1795 struct edge_info *einfo;
1796 int rcu_id;
1797
1798 if (size > UINT_MAX) {
1799 pr_err("%s: size %zu is too large to encode\n", __func__, size);
1800 return -EMSGSIZE;
1801 }
1802
1803 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1804
1805 if (einfo->intentless)
1806 return -EOPNOTSUPP;
1807
1808 rcu_id = srcu_read_lock(&einfo->use_ref);
1809 if (einfo->in_ssr) {
1810 srcu_read_unlock(&einfo->use_ref, rcu_id);
1811 return -EFAULT;
1812 }
1813
1814 cmd.id = RX_INTENT_REQ_CMD,
1815 cmd.lcid = lcid;
1816 cmd.size = size;
1817
Dhoat Harpal4a052812017-10-06 22:14:10 +05301818 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.size);
Chris Lewfa6135e2016-08-01 13:29:46 -07001819 fifo_tx(einfo, &cmd, sizeof(cmd));
1820
1821 srcu_read_unlock(&einfo->use_ref, rcu_id);
1822 return 0;
1823}
1824
1825/**
1826 * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
1827 * format and transmit
1828 * @if_ptr: The transport to transmit on.
1829 * @lcid: The local channel id to encode.
1830 * @granted: The request response to encode.
1831 *
1832 * Return: 0 on success or standard Linux error code.
1833 */
1834static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
1835 uint32_t lcid, bool granted)
1836{
1837 struct command {
1838 uint16_t id;
1839 uint16_t lcid;
1840 uint32_t response;
1841 };
1842 struct command cmd;
1843 struct edge_info *einfo;
1844 int rcu_id;
1845
1846 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1847
1848 if (einfo->intentless)
1849 return -EOPNOTSUPP;
1850
1851 rcu_id = srcu_read_lock(&einfo->use_ref);
1852 if (einfo->in_ssr) {
1853 srcu_read_unlock(&einfo->use_ref, rcu_id);
1854 return -EFAULT;
1855 }
1856
1857 cmd.id = RX_INTENT_REQ_ACK_CMD,
1858 cmd.lcid = lcid;
1859 if (granted)
1860 cmd.response = 1;
1861 else
1862 cmd.response = 0;
1863
Dhoat Harpal4a052812017-10-06 22:14:10 +05301864 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.response);
Chris Lewfa6135e2016-08-01 13:29:46 -07001865 fifo_tx(einfo, &cmd, sizeof(cmd));
1866
1867 srcu_read_unlock(&einfo->use_ref, rcu_id);
1868 return 0;
1869}
1870
1871/**
1872 * tx_cmd_set_sigs() - convert a signals ack cmd to wire format and transmit
1873 * @if_ptr: The transport to transmit on.
1874 * @lcid: The local channel id to encode.
1875 * @sigs: The signals to encode.
1876 *
1877 * Return: 0 on success or standard Linux error code.
1878 */
1879static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
1880 uint32_t sigs)
1881{
1882 struct command {
1883 uint16_t id;
1884 uint16_t lcid;
1885 uint32_t sigs;
1886 };
1887 struct command cmd;
1888 struct edge_info *einfo;
1889 int rcu_id;
1890
1891 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1892
1893 rcu_id = srcu_read_lock(&einfo->use_ref);
1894 if (einfo->in_ssr) {
1895 srcu_read_unlock(&einfo->use_ref, rcu_id);
1896 return -EFAULT;
1897 }
1898
1899 cmd.id = SIGNALS_CMD,
1900 cmd.lcid = lcid;
1901 cmd.sigs = sigs;
1902
Dhoat Harpal4a052812017-10-06 22:14:10 +05301903 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.sigs);
Chris Lewfa6135e2016-08-01 13:29:46 -07001904 fifo_tx(einfo, &cmd, sizeof(cmd));
1905
1906 srcu_read_unlock(&einfo->use_ref, rcu_id);
1907 return 0;
1908}
1909
1910/**
1911 * poll() - poll for data on a channel
1912 * @if_ptr: The transport the channel exists on.
1913 * @lcid: The local channel id.
1914 *
1915 * Return: 0 if no data available, 1 if data available.
1916 */
1917static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
1918{
1919 struct edge_info *einfo;
1920 int rcu_id;
1921
1922 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1923
1924 rcu_id = srcu_read_lock(&einfo->use_ref);
1925 if (einfo->in_ssr) {
1926 srcu_read_unlock(&einfo->use_ref, rcu_id);
1927 return -EFAULT;
1928 }
1929
1930 if (fifo_read_avail(einfo)) {
1931 __rx_worker(einfo, true);
1932 srcu_read_unlock(&einfo->use_ref, rcu_id);
1933 return 1;
1934 }
1935
1936 srcu_read_unlock(&einfo->use_ref, rcu_id);
1937 return 0;
1938}
1939
1940/**
1941 * mask_rx_irq() - mask the receive irq for a channel
1942 * @if_ptr: The transport the channel exists on.
1943 * @lcid: The local channel id for the channel.
1944 * @mask: True to mask the irq, false to unmask.
1945 * @pstruct: Platform defined structure for handling the masking.
1946 *
1947 * Return: 0 on success or standard Linux error code.
1948 */
1949static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
1950 bool mask, void *pstruct)
1951{
1952 struct edge_info *einfo;
1953 struct irq_chip *irq_chip;
1954 struct irq_data *irq_data;
1955 int rcu_id;
1956
1957 einfo = container_of(if_ptr, struct edge_info, xprt_if);
1958
1959 rcu_id = srcu_read_lock(&einfo->use_ref);
1960 if (einfo->in_ssr) {
1961 srcu_read_unlock(&einfo->use_ref, rcu_id);
1962 return -EFAULT;
1963 }
1964
1965 irq_chip = irq_get_chip(einfo->irq_line);
1966 if (!irq_chip) {
1967 srcu_read_unlock(&einfo->use_ref, rcu_id);
1968 return -ENODEV;
1969 }
1970
1971 irq_data = irq_get_irq_data(einfo->irq_line);
1972 if (!irq_data) {
1973 srcu_read_unlock(&einfo->use_ref, rcu_id);
1974 return -ENODEV;
1975 }
1976
1977 if (mask) {
1978 irq_chip->irq_mask(irq_data);
1979 einfo->irq_disabled = true;
1980 if (pstruct)
1981 irq_set_affinity(einfo->irq_line, pstruct);
1982 } else {
1983 irq_chip->irq_unmask(irq_data);
1984 einfo->irq_disabled = false;
1985 }
1986
1987 srcu_read_unlock(&einfo->use_ref, rcu_id);
1988 return 0;
1989}
1990
1991/**
1992 * tx_data() - convert a data/tracer_pkt to wire format and transmit
1993 * @if_ptr: The transport to transmit on.
1994 * @cmd_id: The command ID to transmit.
1995 * @lcid: The local channel id to encode.
1996 * @pctx: The data to encode.
1997 *
1998 * Return: Number of bytes written or standard Linux error code.
1999 */
2000static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
2001 uint32_t lcid, struct glink_core_tx_pkt *pctx)
2002{
2003 struct command {
2004 uint16_t id;
2005 uint16_t lcid;
2006 uint32_t riid;
2007 uint32_t size;
2008 uint32_t size_left;
2009 };
2010 struct command cmd;
2011 struct edge_info *einfo;
2012 uint32_t size;
2013 uint32_t zeros_size;
2014 const void *data_start;
2015 char zeros[FIFO_ALIGNMENT] = { 0 };
2016 unsigned long flags;
2017 size_t tx_size = 0;
2018 int rcu_id;
2019 int ret;
2020
2021 if (pctx->size < pctx->size_remaining) {
2022 GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
2023 __func__);
2024 pctx->size_remaining = pctx->size;
2025 }
2026 if (!pctx->size_remaining)
2027 return 0;
2028
2029 einfo = container_of(if_ptr, struct edge_info, xprt_if);
2030
2031 rcu_id = srcu_read_lock(&einfo->use_ref);
2032 if (einfo->in_ssr) {
2033 srcu_read_unlock(&einfo->use_ref, rcu_id);
2034 return -EFAULT;
2035 }
2036
2037 if (einfo->intentless &&
2038 (pctx->size_remaining != pctx->size || cmd_id == TRACER_PKT_CMD)) {
2039 srcu_read_unlock(&einfo->use_ref, rcu_id);
2040 return -EINVAL;
2041 }
2042
2043 if (cmd_id == TX_DATA_CMD) {
2044 if (pctx->size_remaining == pctx->size)
2045 cmd.id = TX_DATA_CMD;
2046 else
2047 cmd.id = TX_DATA_CONT_CMD;
2048 } else {
2049 if (pctx->size_remaining == pctx->size)
2050 cmd.id = TRACER_PKT_CMD;
2051 else
2052 cmd.id = TRACER_PKT_CONT_CMD;
2053 }
2054 cmd.lcid = lcid;
2055 cmd.riid = pctx->riid;
2056 data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
2057 &tx_size);
2058 if (!data_start) {
2059 GLINK_ERR("%s: invalid data_start\n", __func__);
2060 srcu_read_unlock(&einfo->use_ref, rcu_id);
2061 return -EINVAL;
2062 }
2063
2064 spin_lock_irqsave(&einfo->write_lock, flags);
2065 size = fifo_write_avail(einfo);
2066
2067 /* Intentless clients expect a complete commit or instant failure */
2068 if (einfo->intentless && size < sizeof(cmd) + pctx->size) {
2069 spin_unlock_irqrestore(&einfo->write_lock, flags);
2070 srcu_read_unlock(&einfo->use_ref, rcu_id);
2071 return -ENOSPC;
2072 }
2073
2074 /* Need enough space to write the command and some data */
2075 if (size <= sizeof(cmd)) {
2076 einfo->tx_resume_needed = true;
Dhoat Harpal54c7fb82017-12-14 17:24:17 +05302077 send_tx_blocked_signal(einfo);
Chris Lewfa6135e2016-08-01 13:29:46 -07002078 spin_unlock_irqrestore(&einfo->write_lock, flags);
2079 srcu_read_unlock(&einfo->use_ref, rcu_id);
2080 return -EAGAIN;
2081 }
2082 size -= sizeof(cmd);
2083 if (size > tx_size)
2084 size = tx_size;
2085
2086 cmd.size = size;
2087 pctx->size_remaining -= size;
2088 cmd.size_left = pctx->size_remaining;
2089 zeros_size = ALIGN(size, FIFO_ALIGNMENT) - cmd.size;
2090 if (cmd.id == TRACER_PKT_CMD)
2091 tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
2092
2093 ret = fifo_write_complex(einfo, &cmd, sizeof(cmd), data_start, size,
2094 zeros, zeros_size);
2095 if (ret < 0) {
2096 spin_unlock_irqrestore(&einfo->write_lock, flags);
2097 srcu_read_unlock(&einfo->use_ref, rcu_id);
2098 return ret;
2099 }
2100
Dhoat Harpal4a052812017-10-06 22:14:10 +05302101 SMEM_IPC_LOG(einfo, __func__, cmd.id, cmd.lcid, cmd.riid);
Chris Lewfa6135e2016-08-01 13:29:46 -07002102 GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
2103 "<SMEM>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
2104 cmd.size_left);
2105 spin_unlock_irqrestore(&einfo->write_lock, flags);
2106
2107 /* Fake tx_done for intentless since its not supported over the wire */
2108 if (einfo->intentless) {
2109 spin_lock_irqsave(&einfo->rx_lock, flags);
2110 cmd.id = RX_DONE_CMD;
2111 cmd.lcid = pctx->rcid;
2112 queue_cmd(einfo, &cmd, NULL);
2113 spin_unlock_irqrestore(&einfo->rx_lock, flags);
2114 }
2115
2116 srcu_read_unlock(&einfo->use_ref, rcu_id);
2117 return cmd.size;
2118}
2119
2120/**
2121 * tx() - convert a data transmit cmd to wire format and transmit
2122 * @if_ptr: The transport to transmit on.
2123 * @lcid: The local channel id to encode.
2124 * @pctx: The data to encode.
2125 *
2126 * Return: Number of bytes written or standard Linux error code.
2127 */
2128static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
2129 struct glink_core_tx_pkt *pctx)
2130{
2131 return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
2132}
2133
2134/**
2135 * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
2136 * @if_ptr: The transport to transmit on.
2137 * @lcid: The local channel id to encode.
2138 * @pctx: The data to encode.
2139 *
2140 * Return: Number of bytes written or standard Linux error code.
2141 */
2142static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
2143 struct glink_core_tx_pkt *pctx)
2144{
2145 return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
2146}
2147
2148/**
2149 * get_power_vote_ramp_time() - Get the ramp time required for the power
2150 * votes to be applied
2151 * @if_ptr: The transport interface on which power voting is requested.
2152 * @state: The power state for which ramp time is required.
2153 *
2154 * Return: The ramp time specific to the power state, standard error otherwise.
2155 */
2156static unsigned long get_power_vote_ramp_time(
2157 struct glink_transport_if *if_ptr,
2158 uint32_t state)
2159{
2160 struct edge_info *einfo;
2161
2162 einfo = container_of(if_ptr, struct edge_info, xprt_if);
2163
2164 if (state >= einfo->num_pw_states || !(einfo->ramp_time_us))
2165 return (unsigned long)ERR_PTR(-EINVAL);
2166
2167 return einfo->ramp_time_us[state];
2168}
2169
2170/**
2171 * power_vote() - Update the power votes to meet qos requirement
2172 * @if_ptr: The transport interface on which power voting is requested.
2173 * @state: The power state for which the voting should be done.
2174 *
2175 * Return: 0 on Success, standard error otherwise.
2176 */
2177static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
2178{
2179 return 0;
2180}
2181
2182/**
2183 * power_unvote() - Remove the all the power votes
2184 * @if_ptr: The transport interface on which power voting is requested.
2185 *
2186 * Return: 0 on Success, standard error otherwise.
2187 */
2188static int power_unvote(struct glink_transport_if *if_ptr)
2189{
2190 return 0;
2191}
2192
2193/**
Chris Lewa9a78ae2017-05-11 16:47:37 -07002194 * rx_rt_vote() - Increment and RX thread RT vote
2195 * @if_ptr: The transport interface on which power voting is requested.
2196 *
2197 * Return: 0 on Success, standard error otherwise.
2198 */
2199static int rx_rt_vote(struct glink_transport_if *if_ptr)
2200{
2201 struct edge_info *einfo;
2202 struct sched_param param = { .sched_priority = 1 };
2203 int ret = 0;
2204 unsigned long flags;
2205
2206 einfo = container_of(if_ptr, struct edge_info, xprt_if);
2207 spin_lock_irqsave(&einfo->rt_vote_lock, flags);
2208 if (!einfo->rt_votes)
2209 ret = sched_setscheduler_nocheck(einfo->task, SCHED_FIFO,
2210 &param);
2211 einfo->rt_votes++;
2212 spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
2213 return ret;
2214}
2215
2216/**
2217 * rx_rt_unvote() - Remove a RX thread RT vote
2218 * @if_ptr: The transport interface on which power voting is requested.
2219 *
2220 * Return: 0 on Success, standard error otherwise.
2221 */
2222static int rx_rt_unvote(struct glink_transport_if *if_ptr)
2223{
2224 struct edge_info *einfo;
2225 struct sched_param param = { .sched_priority = 0 };
2226 int ret = 0;
2227 unsigned long flags;
2228
2229 einfo = container_of(if_ptr, struct edge_info, xprt_if);
2230 spin_lock_irqsave(&einfo->rt_vote_lock, flags);
2231 einfo->rt_votes--;
2232 if (!einfo->rt_votes)
2233 ret = sched_setscheduler_nocheck(einfo->task, SCHED_NORMAL,
2234 &param);
2235 spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
2236 return ret;
2237}
2238
2239/**
Chris Lewfa6135e2016-08-01 13:29:46 -07002240 * negotiate_features_v1() - determine what features of a version can be used
2241 * @if_ptr: The transport for which features are negotiated for.
2242 * @version: The version negotiated.
2243 * @features: The set of requested features.
2244 *
2245 * Return: What set of the requested features can be supported.
2246 */
2247static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
2248 const struct glink_core_version *version,
2249 uint32_t features)
2250{
2251 return features & version->features;
2252}
2253
2254/**
2255 * init_xprt_if() - initialize the xprt_if for an edge
2256 * @einfo: The edge to initialize.
2257 */
2258static void init_xprt_if(struct edge_info *einfo)
2259{
2260 einfo->xprt_if.tx_cmd_version = tx_cmd_version;
2261 einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
2262 einfo->xprt_if.set_version = set_version;
2263 einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
2264 einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
2265 einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
2266 einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
2267 einfo->xprt_if.ssr = ssr;
Dhoat Harpale4811372017-12-18 21:05:20 +05302268 einfo->xprt_if.subsys_up = subsys_up;
Chris Lewfa6135e2016-08-01 13:29:46 -07002269 einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
2270 einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
2271 einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
2272 einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
2273 einfo->xprt_if.tx = tx;
2274 einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
2275 einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
2276 tx_cmd_remote_rx_intent_req_ack;
2277 einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
2278 einfo->xprt_if.poll = poll;
2279 einfo->xprt_if.mask_rx_irq = mask_rx_irq;
2280 einfo->xprt_if.wait_link_down = wait_link_down;
2281 einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
2282 einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
2283 einfo->xprt_if.power_vote = power_vote;
2284 einfo->xprt_if.power_unvote = power_unvote;
Chris Lewa9a78ae2017-05-11 16:47:37 -07002285 einfo->xprt_if.rx_rt_vote = rx_rt_vote;
2286 einfo->xprt_if.rx_rt_unvote = rx_rt_unvote;
Chris Lewfa6135e2016-08-01 13:29:46 -07002287}
2288
2289/**
2290 * init_xprt_cfg() - initialize the xprt_cfg for an edge
2291 * @einfo: The edge to initialize.
2292 * @name: The name of the remote side this edge communicates to.
2293 */
2294static void init_xprt_cfg(struct edge_info *einfo, const char *name)
2295{
2296 einfo->xprt_cfg.name = XPRT_NAME;
2297 einfo->xprt_cfg.edge = name;
2298 einfo->xprt_cfg.versions = versions;
2299 einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
2300 einfo->xprt_cfg.max_cid = SZ_64K;
2301 einfo->xprt_cfg.max_iid = SZ_2G;
2302}
2303
2304/**
2305 * parse_qos_dt_params() - Parse the power states from DT
2306 * @dev: Reference to the platform device for a specific edge.
2307 * @einfo: Edge information for the edge probe function is called.
2308 *
2309 * Return: 0 on success, standard error code otherwise.
2310 */
2311static int parse_qos_dt_params(struct device_node *node,
2312 struct edge_info *einfo)
2313{
2314 int rc;
2315 int i;
2316 char *key;
2317 uint32_t *arr32;
2318 uint32_t num_states;
2319
2320 key = "qcom,ramp-time";
2321 if (!of_find_property(node, key, &num_states))
2322 return -ENODEV;
2323
2324 num_states /= sizeof(uint32_t);
2325
2326 einfo->num_pw_states = num_states;
2327
2328 arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
2329 if (!arr32)
2330 return -ENOMEM;
2331
2332 einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
2333 GFP_KERNEL);
2334 if (!einfo->ramp_time_us) {
2335 rc = -ENOMEM;
2336 goto mem_alloc_fail;
2337 }
2338
2339 rc = of_property_read_u32_array(node, key, arr32, num_states);
2340 if (rc) {
2341 rc = -ENODEV;
2342 goto invalid_key;
2343 }
2344 for (i = 0; i < num_states; i++)
2345 einfo->ramp_time_us[i] = arr32[i];
2346
2347 rc = 0;
Dhoat Harpal650c5302017-08-03 20:27:38 +05302348 kfree(arr32);
Chris Lewfa6135e2016-08-01 13:29:46 -07002349 return rc;
2350
2351invalid_key:
2352 kfree(einfo->ramp_time_us);
2353mem_alloc_fail:
2354 kfree(arr32);
2355 return rc;
2356}
2357
2358/**
2359 * subsys_name_to_id() - translate a subsystem name to a processor id
2360 * @name: The subsystem name to look up.
2361 *
2362 * Return: The processor id corresponding to @name or standard Linux error code.
2363 */
2364static int subsys_name_to_id(const char *name)
2365{
2366 if (!name)
2367 return -ENODEV;
2368
2369 if (!strcmp(name, "apss"))
2370 return SMEM_APPS;
2371 if (!strcmp(name, "dsps"))
2372 return SMEM_DSPS;
2373 if (!strcmp(name, "lpass"))
2374 return SMEM_Q6;
2375 if (!strcmp(name, "mpss"))
2376 return SMEM_MODEM;
2377 if (!strcmp(name, "rpm"))
2378 return SMEM_RPM;
2379 if (!strcmp(name, "wcnss"))
2380 return SMEM_WCNSS;
2381 if (!strcmp(name, "spss"))
2382 return SMEM_SPSS;
2383 if (!strcmp(name, "cdsp"))
2384 return SMEM_CDSP;
2385 return -ENODEV;
2386}
2387
Chris Lew490a42a2017-10-02 15:20:54 -07002388static void glink_set_affinity(struct edge_info *einfo, u32 *arr, size_t size)
2389{
2390 struct cpumask cpumask;
2391 pid_t pid;
2392 int i;
2393
2394 cpumask_clear(&cpumask);
2395 for (i = 0; i < size; i++) {
2396 if (arr[i] < num_possible_cpus())
2397 cpumask_set_cpu(arr[i], &cpumask);
2398 }
2399 if (irq_set_affinity(einfo->irq_line, &cpumask))
2400 pr_err("%s: Failed to set irq affinity\n", __func__);
2401
2402 if (sched_setaffinity(einfo->task->pid, &cpumask))
2403 pr_err("%s: Failed to set rx cpu affinity\n", __func__);
2404
2405 pid = einfo->xprt_cfg.tx_task->pid;
2406 if (sched_setaffinity(pid, &cpumask))
2407 pr_err("%s: Failed to set tx cpu affinity\n", __func__);
2408}
2409
Chris Lewfa6135e2016-08-01 13:29:46 -07002410static int glink_smem_native_probe(struct platform_device *pdev)
2411{
2412 struct device_node *node;
2413 struct device_node *phandle_node;
2414 struct edge_info *einfo;
Chris Lew490a42a2017-10-02 15:20:54 -07002415 int rc, cpu_size;
Chris Lewfa6135e2016-08-01 13:29:46 -07002416 char *key;
2417 const char *subsys_name;
2418 uint32_t irq_line;
2419 uint32_t irq_mask;
2420 struct resource *r;
Chris Lew490a42a2017-10-02 15:20:54 -07002421 u32 *cpu_array;
Dhoat Harpal4a052812017-10-06 22:14:10 +05302422 char log_name[GLINK_NAME_SIZE*2+7] = {0};
Chris Lewfa6135e2016-08-01 13:29:46 -07002423
2424 node = pdev->dev.of_node;
2425
2426 einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
2427 if (!einfo) {
2428 rc = -ENOMEM;
2429 goto edge_info_alloc_fail;
2430 }
2431
2432 key = "label";
2433 subsys_name = of_get_property(node, key, NULL);
2434 if (!subsys_name) {
2435 pr_err("%s: missing key %s\n", __func__, key);
2436 rc = -ENODEV;
2437 goto missing_key;
2438 }
2439
2440 key = "interrupts";
2441 irq_line = irq_of_parse_and_map(node, 0);
2442 if (!irq_line) {
2443 pr_err("%s: missing key %s\n", __func__, key);
2444 rc = -ENODEV;
2445 goto missing_key;
2446 }
2447
2448 key = "qcom,irq-mask";
2449 rc = of_property_read_u32(node, key, &irq_mask);
2450 if (rc) {
2451 pr_err("%s: missing key %s\n", __func__, key);
2452 rc = -ENODEV;
2453 goto missing_key;
2454 }
2455
2456 key = "irq-reg-base";
2457 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2458 if (!r) {
2459 pr_err("%s: missing key %s\n", __func__, key);
2460 rc = -ENODEV;
2461 goto missing_key;
2462 }
2463
2464 if (subsys_name_to_id(subsys_name) == -ENODEV) {
2465 pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
2466 rc = -ENODEV;
2467 goto invalid_key;
2468 }
2469 einfo->remote_proc_id = subsys_name_to_id(subsys_name);
2470
2471 init_xprt_cfg(einfo, subsys_name);
2472 init_xprt_if(einfo);
2473 spin_lock_init(&einfo->write_lock);
2474 init_waitqueue_head(&einfo->tx_blocked_queue);
Kyle Yan65be4a52016-10-31 15:05:00 -07002475 kthread_init_work(&einfo->kwork, rx_worker);
2476 kthread_init_worker(&einfo->kworker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002477 einfo->read_from_fifo = read_from_fifo;
2478 einfo->write_to_fifo = write_to_fifo;
2479 init_srcu_struct(&einfo->use_ref);
2480 spin_lock_init(&einfo->rx_lock);
2481 INIT_LIST_HEAD(&einfo->deferred_cmds);
Chris Lewa9a78ae2017-05-11 16:47:37 -07002482 spin_lock_init(&einfo->rt_vote_lock);
2483 einfo->rt_votes = 0;
Chris Lewfa6135e2016-08-01 13:29:46 -07002484
2485 mutex_lock(&probe_lock);
2486 if (edge_infos[einfo->remote_proc_id]) {
2487 pr_err("%s: duplicate subsys %s is not valid\n", __func__,
2488 subsys_name);
2489 rc = -ENODEV;
2490 mutex_unlock(&probe_lock);
2491 goto invalid_key;
2492 }
2493 edge_infos[einfo->remote_proc_id] = einfo;
2494 mutex_unlock(&probe_lock);
2495
2496 einfo->out_irq_mask = irq_mask;
2497 einfo->out_irq_reg = ioremap_nocache(r->start, resource_size(r));
2498 if (!einfo->out_irq_reg) {
2499 pr_err("%s: unable to map irq reg\n", __func__);
2500 rc = -ENOMEM;
2501 goto ioremap_fail;
2502 }
2503
2504 einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
2505 "smem_native_%s", subsys_name);
2506 if (IS_ERR(einfo->task)) {
2507 rc = PTR_ERR(einfo->task);
2508 pr_err("%s: kthread_run failed %d\n", __func__, rc);
2509 goto kthread_fail;
2510 }
2511
2512 einfo->tx_ch_desc = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
2513 SMEM_CH_DESC_SIZE,
2514 einfo->remote_proc_id,
2515 0);
2516 if (PTR_ERR(einfo->tx_ch_desc) == -EPROBE_DEFER) {
2517 rc = -EPROBE_DEFER;
2518 goto smem_alloc_fail;
2519 }
2520 if (!einfo->tx_ch_desc) {
2521 pr_err("%s: smem alloc of ch descriptor failed\n", __func__);
2522 rc = -ENOMEM;
2523 goto smem_alloc_fail;
2524 }
2525 einfo->rx_ch_desc = einfo->tx_ch_desc + 1;
2526
2527 einfo->tx_fifo_size = SZ_16K;
2528 einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0,
2529 einfo->tx_fifo_size,
2530 einfo->remote_proc_id,
Dhoat Harpal07eb7032017-04-19 11:46:59 +05302531 0);
Chris Lewfa6135e2016-08-01 13:29:46 -07002532 if (!einfo->tx_fifo) {
2533 pr_err("%s: smem alloc of tx fifo failed\n", __func__);
2534 rc = -ENOMEM;
2535 goto smem_alloc_fail;
2536 }
2537
2538 key = "qcom,qos-config";
2539 phandle_node = of_parse_phandle(node, key, 0);
2540 if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
2541 &einfo->xprt_cfg)))
2542 parse_qos_dt_params(node, einfo);
2543
2544 rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
2545 if (rc == -EPROBE_DEFER)
2546 goto reg_xprt_fail;
2547 if (rc) {
2548 pr_err("%s: glink core register transport failed: %d\n",
2549 __func__, rc);
2550 goto reg_xprt_fail;
2551 }
2552
2553 einfo->irq_line = irq_line;
2554 rc = request_irq(irq_line, irq_handler,
Chris Lewdc33e3a2018-04-04 10:19:32 -07002555 IRQF_TRIGGER_RISING | IRQF_SHARED,
Chris Lewfa6135e2016-08-01 13:29:46 -07002556 node->name, einfo);
2557 if (rc < 0) {
2558 pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
2559 rc);
2560 goto request_irq_fail;
2561 }
Dhoat Harpale4811372017-12-18 21:05:20 +05302562 einfo->in_ssr = true;
Chris Lewfa6135e2016-08-01 13:29:46 -07002563 rc = enable_irq_wake(irq_line);
2564 if (rc < 0)
2565 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
2566 irq_line);
2567
Chris Lew490a42a2017-10-02 15:20:54 -07002568 key = "cpu-affinity";
2569 cpu_size = of_property_count_u32_elems(node, key);
2570 if (cpu_size > 0) {
2571 cpu_array = kmalloc_array(cpu_size, sizeof(u32), GFP_KERNEL);
2572 if (!cpu_array) {
2573 rc = -ENOMEM;
2574 goto request_irq_fail;
2575 }
2576 rc = of_property_read_u32_array(node, key, cpu_array, cpu_size);
2577 if (!rc)
2578 glink_set_affinity(einfo, cpu_array, cpu_size);
2579 kfree(cpu_array);
2580 }
2581
Dhoat Harpal4a052812017-10-06 22:14:10 +05302582 einfo->debug_mask = QCOM_GLINK_DEBUG_ENABLE;
2583 snprintf(log_name, sizeof(log_name), "%s_%s_xprt",
2584 einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
2585 if (einfo->debug_mask & QCOM_GLINK_DEBUG_ENABLE)
2586 einfo->log_ctx =
2587 ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
2588 if (!einfo->log_ctx)
2589 GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
2590 __func__, einfo->xprt_cfg.edge,
2591 einfo->xprt_cfg.name);
Chris Lewfa6135e2016-08-01 13:29:46 -07002592 register_debugfs_info(einfo);
2593 /* fake an interrupt on this edge to see if the remote side is up */
2594 irq_handler(0, einfo);
2595 return 0;
2596
2597request_irq_fail:
2598 glink_core_unregister_transport(&einfo->xprt_if);
2599reg_xprt_fail:
2600smem_alloc_fail:
Kyle Yan65be4a52016-10-31 15:05:00 -07002601 kthread_flush_worker(&einfo->kworker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002602 kthread_stop(einfo->task);
2603 einfo->task = NULL;
Chris Lewfa6135e2016-08-01 13:29:46 -07002604kthread_fail:
2605 iounmap(einfo->out_irq_reg);
2606ioremap_fail:
2607 mutex_lock(&probe_lock);
2608 edge_infos[einfo->remote_proc_id] = NULL;
2609 mutex_unlock(&probe_lock);
2610invalid_key:
2611missing_key:
2612 kfree(einfo);
2613edge_info_alloc_fail:
2614 return rc;
2615}
2616
2617static int glink_rpm_native_probe(struct platform_device *pdev)
2618{
2619 struct device_node *node;
2620 struct edge_info *einfo;
2621 int rc;
2622 char *key;
2623 const char *subsys_name;
2624 uint32_t irq_line;
2625 uint32_t irq_mask;
2626 struct resource *irq_r;
2627 struct resource *msgram_r;
2628 void __iomem *msgram;
2629 char toc[RPM_TOC_SIZE];
2630 uint32_t *tocp;
2631 uint32_t num_toc_entries;
Dhoat Harpal4a052812017-10-06 22:14:10 +05302632 char log_name[GLINK_NAME_SIZE*2+7] = {0};
Chris Lewfa6135e2016-08-01 13:29:46 -07002633
2634 node = pdev->dev.of_node;
2635
2636 einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
2637 if (!einfo) {
2638 rc = -ENOMEM;
2639 goto edge_info_alloc_fail;
2640 }
2641
2642 subsys_name = "rpm";
2643
2644 key = "interrupts";
2645 irq_line = irq_of_parse_and_map(node, 0);
2646 if (!irq_line) {
2647 pr_err("%s: missing key %s\n", __func__, key);
2648 rc = -ENODEV;
2649 goto missing_key;
2650 }
2651
2652 key = "qcom,irq-mask";
2653 rc = of_property_read_u32(node, key, &irq_mask);
2654 if (rc) {
2655 pr_err("%s: missing key %s\n", __func__, key);
2656 rc = -ENODEV;
2657 goto missing_key;
2658 }
2659
2660 key = "irq-reg-base";
2661 irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2662 if (!irq_r) {
2663 pr_err("%s: missing key %s\n", __func__, key);
2664 rc = -ENODEV;
2665 goto missing_key;
2666 }
2667
2668 key = "msgram";
2669 msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2670 if (!msgram_r) {
2671 pr_err("%s: missing key %s\n", __func__, key);
2672 rc = -ENODEV;
2673 goto missing_key;
2674 }
2675
2676 if (subsys_name_to_id(subsys_name) == -ENODEV) {
2677 pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
2678 rc = -ENODEV;
2679 goto invalid_key;
2680 }
2681 einfo->remote_proc_id = subsys_name_to_id(subsys_name);
2682
2683 init_xprt_cfg(einfo, subsys_name);
2684 init_xprt_if(einfo);
2685 spin_lock_init(&einfo->write_lock);
2686 init_waitqueue_head(&einfo->tx_blocked_queue);
Kyle Yan65be4a52016-10-31 15:05:00 -07002687 kthread_init_work(&einfo->kwork, rx_worker);
2688 kthread_init_worker(&einfo->kworker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002689 einfo->intentless = true;
2690 einfo->read_from_fifo = memcpy32_fromio;
2691 einfo->write_to_fifo = memcpy32_toio;
2692 init_srcu_struct(&einfo->use_ref);
2693 spin_lock_init(&einfo->rx_lock);
2694 INIT_LIST_HEAD(&einfo->deferred_cmds);
2695
2696 mutex_lock(&probe_lock);
2697 if (edge_infos[einfo->remote_proc_id]) {
2698 pr_err("%s: duplicate subsys %s is not valid\n", __func__,
2699 subsys_name);
2700 rc = -ENODEV;
2701 mutex_unlock(&probe_lock);
2702 goto invalid_key;
2703 }
2704 edge_infos[einfo->remote_proc_id] = einfo;
2705 mutex_unlock(&probe_lock);
2706
2707 einfo->out_irq_mask = irq_mask;
2708 einfo->out_irq_reg = ioremap_nocache(irq_r->start,
2709 resource_size(irq_r));
2710 if (!einfo->out_irq_reg) {
2711 pr_err("%s: unable to map irq reg\n", __func__);
2712 rc = -ENOMEM;
2713 goto irq_ioremap_fail;
2714 }
2715
2716 msgram = ioremap_nocache(msgram_r->start, resource_size(msgram_r));
2717 if (!msgram) {
2718 pr_err("%s: unable to map msgram\n", __func__);
2719 rc = -ENOMEM;
2720 goto msgram_ioremap_fail;
2721 }
2722
2723 einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
2724 "smem_native_%s", subsys_name);
2725 if (IS_ERR(einfo->task)) {
2726 rc = PTR_ERR(einfo->task);
2727 pr_err("%s: kthread_run failed %d\n", __func__, rc);
2728 goto kthread_fail;
2729 }
2730
2731 memcpy32_fromio(toc, msgram + resource_size(msgram_r) - RPM_TOC_SIZE,
2732 RPM_TOC_SIZE);
2733 tocp = (uint32_t *)toc;
2734 if (*tocp != RPM_TOC_ID) {
2735 rc = -ENODEV;
2736 pr_err("%s: TOC id %d is not valid\n", __func__, *tocp);
2737 goto toc_init_fail;
2738 }
2739 ++tocp;
2740 num_toc_entries = *tocp;
2741 if (num_toc_entries > RPM_MAX_TOC_ENTRIES) {
2742 rc = -ENODEV;
2743 pr_err("%s: %d is too many toc entries\n", __func__,
2744 num_toc_entries);
2745 goto toc_init_fail;
2746 }
2747 ++tocp;
2748
2749 for (rc = 0; rc < num_toc_entries; ++rc) {
2750 if (*tocp != RPM_TX_FIFO_ID) {
2751 tocp += 3;
2752 continue;
2753 }
2754 ++tocp;
2755 einfo->tx_ch_desc = msgram + *tocp;
2756 einfo->tx_fifo = einfo->tx_ch_desc + 1;
2757 if ((uintptr_t)einfo->tx_fifo >
2758 (uintptr_t)(msgram + resource_size(msgram_r))) {
2759 pr_err("%s: invalid tx fifo address\n", __func__);
2760 einfo->tx_fifo = NULL;
2761 break;
2762 }
2763 ++tocp;
2764 einfo->tx_fifo_size = *tocp;
2765 if (einfo->tx_fifo_size > resource_size(msgram_r) ||
2766 (uintptr_t)(einfo->tx_fifo + einfo->tx_fifo_size) >
2767 (uintptr_t)(msgram + resource_size(msgram_r))) {
2768 pr_err("%s: invalid tx fifo size\n", __func__);
2769 einfo->tx_fifo = NULL;
2770 break;
2771 }
2772 break;
2773 }
2774 if (!einfo->tx_fifo) {
2775 rc = -ENODEV;
2776 pr_err("%s: tx fifo not found\n", __func__);
2777 goto toc_init_fail;
2778 }
2779
2780 tocp = (uint32_t *)toc;
2781 tocp += 2;
2782 for (rc = 0; rc < num_toc_entries; ++rc) {
2783 if (*tocp != RPM_RX_FIFO_ID) {
2784 tocp += 3;
2785 continue;
2786 }
2787 ++tocp;
2788 einfo->rx_ch_desc = msgram + *tocp;
2789 einfo->rx_fifo = einfo->rx_ch_desc + 1;
2790 if ((uintptr_t)einfo->rx_fifo >
2791 (uintptr_t)(msgram + resource_size(msgram_r))) {
2792 pr_err("%s: invalid rx fifo address\n", __func__);
2793 einfo->rx_fifo = NULL;
2794 break;
2795 }
2796 ++tocp;
2797 einfo->rx_fifo_size = *tocp;
2798 if (einfo->rx_fifo_size > resource_size(msgram_r) ||
2799 (uintptr_t)(einfo->rx_fifo + einfo->rx_fifo_size) >
2800 (uintptr_t)(msgram + resource_size(msgram_r))) {
2801 pr_err("%s: invalid rx fifo size\n", __func__);
2802 einfo->rx_fifo = NULL;
2803 break;
2804 }
2805 break;
2806 }
2807 if (!einfo->rx_fifo) {
2808 rc = -ENODEV;
2809 pr_err("%s: rx fifo not found\n", __func__);
2810 goto toc_init_fail;
2811 }
2812
2813 einfo->tx_ch_desc->write_index = 0;
2814 einfo->rx_ch_desc->read_index = 0;
2815
2816 rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
2817 if (rc == -EPROBE_DEFER)
2818 goto reg_xprt_fail;
2819 if (rc) {
2820 pr_err("%s: glink core register transport failed: %d\n",
2821 __func__, rc);
2822 goto reg_xprt_fail;
2823 }
2824
2825 einfo->irq_line = irq_line;
2826 rc = request_irq(irq_line, irq_handler,
2827 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
2828 node->name, einfo);
2829 if (rc < 0) {
2830 pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
2831 rc);
2832 goto request_irq_fail;
2833 }
2834 rc = enable_irq_wake(irq_line);
2835 if (rc < 0)
2836 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
2837 irq_line);
Dhoat Harpal4a052812017-10-06 22:14:10 +05302838 einfo->debug_mask = QCOM_GLINK_DEBUG_DISABLE;
2839 snprintf(log_name, sizeof(log_name), "%s_%s_xprt",
2840 einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
2841 if (einfo->debug_mask & QCOM_GLINK_DEBUG_ENABLE)
2842 einfo->log_ctx =
2843 ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
2844 if (!einfo->log_ctx)
2845 GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
2846 __func__, einfo->xprt_cfg.edge,
2847 einfo->xprt_cfg.name);
Chris Lewfa6135e2016-08-01 13:29:46 -07002848 register_debugfs_info(einfo);
2849 einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
2850 return 0;
2851
2852request_irq_fail:
2853 glink_core_unregister_transport(&einfo->xprt_if);
2854reg_xprt_fail:
2855toc_init_fail:
Kyle Yan65be4a52016-10-31 15:05:00 -07002856 kthread_flush_worker(&einfo->kworker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002857 kthread_stop(einfo->task);
2858 einfo->task = NULL;
Chris Lewfa6135e2016-08-01 13:29:46 -07002859kthread_fail:
2860 iounmap(msgram);
2861msgram_ioremap_fail:
2862 iounmap(einfo->out_irq_reg);
2863irq_ioremap_fail:
2864 mutex_lock(&probe_lock);
2865 edge_infos[einfo->remote_proc_id] = NULL;
2866 mutex_unlock(&probe_lock);
2867invalid_key:
2868missing_key:
2869 kfree(einfo);
2870edge_info_alloc_fail:
2871 return rc;
2872}
2873
2874static int glink_mailbox_probe(struct platform_device *pdev)
2875{
2876 struct device_node *node;
2877 struct edge_info *einfo;
2878 int rc;
2879 char *key;
2880 const char *subsys_name;
2881 uint32_t irq_line;
2882 uint32_t irq_mask;
2883 struct resource *irq_r;
2884 struct resource *mbox_loc_r;
2885 struct resource *mbox_size_r;
2886 struct resource *rx_reset_r;
2887 void *mbox_loc;
2888 void *mbox_size;
2889 struct mailbox_config_info *mbox_cfg;
2890 uint32_t mbox_cfg_size;
2891 phys_addr_t cfg_p_addr;
Dhoat Harpal4a052812017-10-06 22:14:10 +05302892 char log_name[GLINK_NAME_SIZE*2+7] = {0};
Chris Lewfa6135e2016-08-01 13:29:46 -07002893
2894 node = pdev->dev.of_node;
2895
2896 einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
2897 if (!einfo) {
2898 rc = -ENOMEM;
2899 goto edge_info_alloc_fail;
2900 }
2901
2902 key = "label";
2903 subsys_name = of_get_property(node, key, NULL);
2904 if (!subsys_name) {
2905 pr_err("%s: missing key %s\n", __func__, key);
2906 rc = -ENODEV;
2907 goto missing_key;
2908 }
2909
2910 key = "interrupts";
2911 irq_line = irq_of_parse_and_map(node, 0);
2912 if (!irq_line) {
2913 pr_err("%s: missing key %s\n", __func__, key);
2914 rc = -ENODEV;
2915 goto missing_key;
2916 }
2917
2918 key = "qcom,irq-mask";
2919 rc = of_property_read_u32(node, key, &irq_mask);
2920 if (rc) {
2921 pr_err("%s: missing key %s\n", __func__, key);
2922 rc = -ENODEV;
2923 goto missing_key;
2924 }
2925
2926 key = "irq-reg-base";
2927 irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2928 if (!irq_r) {
2929 pr_err("%s: missing key %s\n", __func__, key);
2930 rc = -ENODEV;
2931 goto missing_key;
2932 }
2933
2934 key = "mbox-loc-addr";
2935 mbox_loc_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2936 if (!mbox_loc_r) {
2937 pr_err("%s: missing key %s\n", __func__, key);
2938 rc = -ENODEV;
2939 goto missing_key;
2940 }
2941
2942 key = "mbox-loc-size";
2943 mbox_size_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2944 if (!mbox_size_r) {
2945 pr_err("%s: missing key %s\n", __func__, key);
2946 rc = -ENODEV;
2947 goto missing_key;
2948 }
2949
2950 key = "irq-rx-reset";
2951 rx_reset_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
2952 if (!rx_reset_r) {
2953 pr_err("%s: missing key %s\n", __func__, key);
2954 rc = -ENODEV;
2955 goto missing_key;
2956 }
2957
2958 key = "qcom,tx-ring-size";
2959 rc = of_property_read_u32(node, key, &einfo->tx_fifo_size);
2960 if (rc) {
2961 pr_err("%s: missing key %s\n", __func__, key);
2962 rc = -ENODEV;
2963 goto missing_key;
2964 }
2965
2966 key = "qcom,rx-ring-size";
2967 rc = of_property_read_u32(node, key, &einfo->rx_fifo_size);
2968 if (rc) {
2969 pr_err("%s: missing key %s\n", __func__, key);
2970 rc = -ENODEV;
2971 goto missing_key;
2972 }
2973
2974 if (subsys_name_to_id(subsys_name) == -ENODEV) {
2975 pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
2976 rc = -ENODEV;
2977 goto invalid_key;
2978 }
2979 einfo->remote_proc_id = subsys_name_to_id(subsys_name);
2980
2981 init_xprt_cfg(einfo, subsys_name);
2982 einfo->xprt_cfg.name = "mailbox";
2983 init_xprt_if(einfo);
2984 spin_lock_init(&einfo->write_lock);
2985 init_waitqueue_head(&einfo->tx_blocked_queue);
Kyle Yan65be4a52016-10-31 15:05:00 -07002986 kthread_init_work(&einfo->kwork, rx_worker);
2987 kthread_init_worker(&einfo->kworker);
Chris Lewfa6135e2016-08-01 13:29:46 -07002988 einfo->read_from_fifo = read_from_fifo;
2989 einfo->write_to_fifo = write_to_fifo;
2990 init_srcu_struct(&einfo->use_ref);
2991 spin_lock_init(&einfo->rx_lock);
2992 INIT_LIST_HEAD(&einfo->deferred_cmds);
2993
2994 mutex_lock(&probe_lock);
2995 if (edge_infos[einfo->remote_proc_id]) {
2996 pr_err("%s: duplicate subsys %s is not valid\n", __func__,
2997 subsys_name);
2998 rc = -ENODEV;
2999 mutex_unlock(&probe_lock);
3000 goto invalid_key;
3001 }
3002 edge_infos[einfo->remote_proc_id] = einfo;
3003 mutex_unlock(&probe_lock);
3004
3005 einfo->out_irq_mask = irq_mask;
3006 einfo->out_irq_reg = ioremap_nocache(irq_r->start,
3007 resource_size(irq_r));
3008 if (!einfo->out_irq_reg) {
3009 pr_err("%s: unable to map irq reg\n", __func__);
3010 rc = -ENOMEM;
3011 goto irq_ioremap_fail;
3012 }
3013
3014 mbox_loc = ioremap_nocache(mbox_loc_r->start,
3015 resource_size(mbox_loc_r));
3016 if (!mbox_loc) {
3017 pr_err("%s: unable to map mailbox location reg\n", __func__);
3018 rc = -ENOMEM;
3019 goto mbox_loc_ioremap_fail;
3020 }
3021
3022 mbox_size = ioremap_nocache(mbox_size_r->start,
3023 resource_size(mbox_size_r));
3024 if (!mbox_size) {
3025 pr_err("%s: unable to map mailbox size reg\n", __func__);
3026 rc = -ENOMEM;
3027 goto mbox_size_ioremap_fail;
3028 }
3029
3030 einfo->rx_reset_reg = ioremap_nocache(rx_reset_r->start,
3031 resource_size(rx_reset_r));
3032 if (!einfo->rx_reset_reg) {
3033 pr_err("%s: unable to map rx reset reg\n", __func__);
3034 rc = -ENOMEM;
3035 goto rx_reset_ioremap_fail;
3036 }
3037
3038 einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
3039 "smem_native_%s", subsys_name);
3040 if (IS_ERR(einfo->task)) {
3041 rc = PTR_ERR(einfo->task);
3042 pr_err("%s: kthread_run failed %d\n", __func__, rc);
3043 goto kthread_fail;
3044 }
3045
3046 mbox_cfg_size = sizeof(*mbox_cfg) + einfo->tx_fifo_size +
3047 einfo->rx_fifo_size;
3048 mbox_cfg = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
3049 mbox_cfg_size,
3050 einfo->remote_proc_id,
3051 0);
3052 if (PTR_ERR(mbox_cfg) == -EPROBE_DEFER) {
3053 rc = -EPROBE_DEFER;
3054 goto smem_alloc_fail;
3055 }
3056 if (!mbox_cfg) {
3057 pr_err("%s: smem alloc of mailbox struct failed\n", __func__);
3058 rc = -ENOMEM;
3059 goto smem_alloc_fail;
3060 }
3061 einfo->mailbox = mbox_cfg;
3062 einfo->tx_ch_desc = (struct channel_desc *)(&mbox_cfg->tx_read_index);
3063 einfo->rx_ch_desc = (struct channel_desc *)(&mbox_cfg->rx_read_index);
3064 mbox_cfg->tx_size = einfo->tx_fifo_size;
3065 mbox_cfg->rx_size = einfo->rx_fifo_size;
3066 einfo->tx_fifo = &mbox_cfg->fifo[0];
3067
3068 rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
3069 if (rc == -EPROBE_DEFER)
3070 goto reg_xprt_fail;
3071 if (rc) {
3072 pr_err("%s: glink core register transport failed: %d\n",
3073 __func__, rc);
3074 goto reg_xprt_fail;
3075 }
3076
3077 einfo->irq_line = irq_line;
3078 rc = request_irq(irq_line, irq_handler,
3079 IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND | IRQF_SHARED,
3080 node->name, einfo);
3081 if (rc < 0) {
3082 pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
3083 rc);
3084 goto request_irq_fail;
3085 }
3086 rc = enable_irq_wake(irq_line);
3087 if (rc < 0)
3088 pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
3089 irq_line);
Dhoat Harpal4a052812017-10-06 22:14:10 +05303090 einfo->debug_mask = QCOM_GLINK_DEBUG_DISABLE;
3091 snprintf(log_name, sizeof(log_name), "%s_%s_xprt",
3092 einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
3093 if (einfo->debug_mask & QCOM_GLINK_DEBUG_ENABLE)
3094 einfo->log_ctx =
3095 ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
3096 if (!einfo->log_ctx)
3097 GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
3098 __func__, einfo->xprt_cfg.edge,
3099 einfo->xprt_cfg.name);
Chris Lewfa6135e2016-08-01 13:29:46 -07003100 register_debugfs_info(einfo);
3101
3102 writel_relaxed(mbox_cfg_size, mbox_size);
3103 cfg_p_addr = smem_virt_to_phys(mbox_cfg);
3104 writel_relaxed(lower_32_bits(cfg_p_addr), mbox_loc);
3105 writel_relaxed(upper_32_bits(cfg_p_addr), mbox_loc + 4);
Dhoat Harpale4811372017-12-18 21:05:20 +05303106 einfo->in_ssr = true;
Chris Lewfa6135e2016-08-01 13:29:46 -07003107 send_irq(einfo);
3108 iounmap(mbox_size);
3109 iounmap(mbox_loc);
3110 return 0;
3111
3112request_irq_fail:
3113 glink_core_unregister_transport(&einfo->xprt_if);
3114reg_xprt_fail:
3115smem_alloc_fail:
Kyle Yan65be4a52016-10-31 15:05:00 -07003116 kthread_flush_worker(&einfo->kworker);
Chris Lewfa6135e2016-08-01 13:29:46 -07003117 kthread_stop(einfo->task);
3118 einfo->task = NULL;
Chris Lewfa6135e2016-08-01 13:29:46 -07003119kthread_fail:
3120 iounmap(einfo->rx_reset_reg);
3121rx_reset_ioremap_fail:
3122 iounmap(mbox_size);
3123mbox_size_ioremap_fail:
3124 iounmap(mbox_loc);
3125mbox_loc_ioremap_fail:
3126 iounmap(einfo->out_irq_reg);
3127irq_ioremap_fail:
3128 mutex_lock(&probe_lock);
3129 edge_infos[einfo->remote_proc_id] = NULL;
3130 mutex_unlock(&probe_lock);
3131invalid_key:
3132missing_key:
3133 kfree(einfo);
3134edge_info_alloc_fail:
3135 return rc;
3136}
3137
3138#if defined(CONFIG_DEBUG_FS)
3139/**
3140 * debug_edge() - generates formatted text output displaying current edge state
3141 * @s: File to send the output to.
3142 */
3143static void debug_edge(struct seq_file *s)
3144{
3145 struct edge_info *einfo;
3146 struct glink_dbgfs_data *dfs_d;
3147
3148 dfs_d = s->private;
3149 einfo = dfs_d->priv_data;
3150
3151/*
3152 * formatted, human readable edge state output, ie:
3153 * TX/RX fifo information:
3154ID|EDGE |TX READ |TX WRITE |TX SIZE |RX READ |RX WRITE |RX SIZE
3155-------------------------------------------------------------------------------
315601|mpss |0x00000128|0x00000128|0x00000800|0x00000256|0x00000256|0x00001000
3157 *
3158 * Interrupt information:
3159 * EDGE |TX INT |RX INT
3160 * --------------------------------
3161 * mpss |0x00000006|0x00000008
3162 */
3163 seq_puts(s, "TX/RX fifo information:\n");
3164 seq_printf(s, "%2s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s\n",
3165 "ID",
3166 "EDGE",
3167 "TX READ",
3168 "TX WRITE",
3169 "TX SIZE",
3170 "RX READ",
3171 "RX WRITE",
3172 "RX SIZE");
3173 seq_puts(s,
3174 "-------------------------------------------------------------------------------\n");
3175 if (!einfo)
3176 return;
3177
3178 seq_printf(s, "%02i|%-10s|", einfo->remote_proc_id,
3179 einfo->xprt_cfg.edge);
3180 if (!einfo->rx_fifo)
3181 seq_puts(s, "Link Not Up\n");
3182 else
3183 seq_printf(s, "0x%08X|0x%08X|0x%08X|0x%08X|0x%08X|0x%08X\n",
3184 einfo->tx_ch_desc->read_index,
3185 einfo->tx_ch_desc->write_index,
3186 einfo->tx_fifo_size,
3187 einfo->rx_ch_desc->read_index,
3188 einfo->rx_ch_desc->write_index,
3189 einfo->rx_fifo_size);
3190
3191 seq_puts(s, "\nInterrupt information:\n");
3192 seq_printf(s, "%-10s|%-10s|%-10s\n", "EDGE", "TX INT", "RX INT");
3193 seq_puts(s, "--------------------------------\n");
3194 seq_printf(s, "%-10s|0x%08X|0x%08X\n", einfo->xprt_cfg.edge,
3195 einfo->tx_irq_count,
3196 einfo->rx_irq_count);
3197}
3198
3199/**
3200 * register_debugfs_info() - initialize debugfs device entries
3201 * @einfo: Pointer to specific edge_info for which register is called.
3202 */
3203static void register_debugfs_info(struct edge_info *einfo)
3204{
3205 struct glink_dbgfs dfs;
3206 char *curr_dir_name;
3207 int dir_name_len;
3208
3209 dir_name_len = strlen(einfo->xprt_cfg.edge) +
3210 strlen(einfo->xprt_cfg.name) + 2;
3211 curr_dir_name = kmalloc(dir_name_len, GFP_KERNEL);
3212 if (!curr_dir_name) {
3213 GLINK_ERR("%s: Memory allocation failed\n", __func__);
3214 return;
3215 }
3216
3217 snprintf(curr_dir_name, dir_name_len, "%s_%s",
3218 einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
3219 dfs.curr_name = curr_dir_name;
3220 dfs.par_name = "xprt";
3221 dfs.b_dir_create = false;
3222 glink_debugfs_create("XPRT_INFO", debug_edge,
3223 &dfs, einfo, false);
3224 kfree(curr_dir_name);
3225}
3226
3227#else
3228static void register_debugfs_info(struct edge_info *einfo)
3229{
3230}
3231#endif /* CONFIG_DEBUG_FS */
3232
3233static const struct of_device_id smem_match_table[] = {
3234 { .compatible = "qcom,glink-smem-native-xprt" },
3235 {},
3236};
3237
3238static struct platform_driver glink_smem_native_driver = {
3239 .probe = glink_smem_native_probe,
3240 .driver = {
3241 .name = "msm_glink_smem_native_xprt",
3242 .owner = THIS_MODULE,
3243 .of_match_table = smem_match_table,
3244 },
3245};
3246
3247static const struct of_device_id rpm_match_table[] = {
3248 { .compatible = "qcom,glink-rpm-native-xprt" },
3249 {},
3250};
3251
3252static struct platform_driver glink_rpm_native_driver = {
3253 .probe = glink_rpm_native_probe,
3254 .driver = {
3255 .name = "msm_glink_rpm_native_xprt",
3256 .owner = THIS_MODULE,
3257 .of_match_table = rpm_match_table,
3258 },
3259};
3260
3261static const struct of_device_id mailbox_match_table[] = {
3262 { .compatible = "qcom,glink-mailbox-xprt" },
3263 {},
3264};
3265
3266static struct platform_driver glink_mailbox_driver = {
3267 .probe = glink_mailbox_probe,
3268 .driver = {
3269 .name = "msm_glink_mailbox_xprt",
3270 .owner = THIS_MODULE,
3271 .of_match_table = mailbox_match_table,
3272 },
3273};
3274
3275static int __init glink_smem_native_xprt_init(void)
3276{
3277 int rc;
3278
3279 rc = platform_driver_register(&glink_smem_native_driver);
3280 if (rc) {
3281 pr_err("%s: glink_smem_native_driver register failed %d\n",
3282 __func__, rc);
3283 return rc;
3284 }
3285
3286 rc = platform_driver_register(&glink_rpm_native_driver);
3287 if (rc) {
3288 pr_err("%s: glink_rpm_native_driver register failed %d\n",
3289 __func__, rc);
3290 return rc;
3291 }
3292
3293 rc = platform_driver_register(&glink_mailbox_driver);
3294 if (rc) {
3295 pr_err("%s: glink_mailbox_driver register failed %d\n",
3296 __func__, rc);
3297 return rc;
3298 }
3299
3300 return 0;
3301}
3302arch_initcall(glink_smem_native_xprt_init);
3303
3304MODULE_DESCRIPTION("MSM G-Link SMEM Native Transport");
3305MODULE_LICENSE("GPL v2");