blob: b8deec19ff8b9d2a93a5618d21716cc8f1fbb5b2 [file] [log] [blame]
Dhoat Harpal907e0cf2017-12-28 16:03:16 +05301/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
Chris Lewfa6135e2016-08-01 13:29:46 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <asm/arch_timer.h>
13#include <linux/err.h>
14#include <linux/ipc_logging.h>
15#include <linux/kthread.h>
16#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/of.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/types.h>
24#include <linux/workqueue.h>
25#include <linux/rwsem.h>
26#include <linux/pm_qos.h>
27#include <soc/qcom/glink.h>
28#include <soc/qcom/tracer_pkt.h>
29#include "glink_core_if.h"
30#include "glink_private.h"
31#include "glink_xprt_if.h"
32
33/* Number of internal IPC Logging log pages */
34#define NUM_LOG_PAGES 10
35#define GLINK_PM_QOS_HOLDOFF_MS 10
36#define GLINK_QOS_DEF_NUM_TOKENS 10
37#define GLINK_QOS_DEF_NUM_PRIORITY 1
38#define GLINK_QOS_DEF_MTU 2048
39
Dhoat Harpal021061e2018-02-19 20:07:12 +053040#define GLINK_CH_XPRT_NAME_SIZE ((3 * GLINK_NAME_SIZE) + 4)
Chris Lewfa6135e2016-08-01 13:29:46 -070041#define GLINK_KTHREAD_PRIO 1
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +053042
Chris Lewfa6135e2016-08-01 13:29:46 -070043/**
44 * struct glink_qos_priority_bin - Packet Scheduler's priority bucket
45 * @max_rate_kBps: Maximum rate supported by the priority bucket.
46 * @power_state: Transport power state for this priority bin.
47 * @tx_ready: List of channels ready for tx in the priority bucket.
48 * @active_ch_cnt: Active channels of this priority.
49 */
50struct glink_qos_priority_bin {
51 unsigned long max_rate_kBps;
52 uint32_t power_state;
53 struct list_head tx_ready;
54 uint32_t active_ch_cnt;
55};
56
57/**
58 * struct glink_core_xprt_ctx - transport representation structure
59 * @xprt_state_lhb0: controls read/write access to transport state
60 * @list_node: used to chain this transport in a global
61 * transport list
62 * @name: name of this transport
63 * @edge: what this transport connects to
64 * @id: the id to use for channel migration
65 * @versions: array of transport versions this implementation
66 * supports
67 * @versions_entries: number of entries in @versions
68 * @local_version_idx: local version index into @versions this
69 * transport is currently running
70 * @remote_version_idx: remote version index into @versions this
71 * transport is currently running
72 * @l_features: Features negotiated by the local side
73 * @capabilities: Capabilities of underlying transport
74 * @ops: transport defined implementation of common
75 * operations
76 * @local_state: value from local_channel_state_e representing
77 * the local state of this transport
78 * @remote_neg_completed: is the version negotiation with the remote end
79 * completed
80 * @xprt_ctx_lock_lhb1 lock to protect @next_lcid and @channels
81 * @next_lcid: logical channel identifier to assign to the next
82 * created channel
83 * @max_cid: maximum number of channel identifiers supported
84 * @max_iid: maximum number of intent identifiers supported
85 * @tx_kwork: work item to process @tx_ready
86 * @tx_wq: workqueue to run @tx_kwork
87 * @tx_task: handle to the running kthread
88 * @channels: list of all existing channels on this transport
89 * @dummy_in_use: True when channels are being migrated to dummy.
90 * @notified: list holds channels during dummy xprt cleanup.
91 * @mtu: MTU supported by this transport.
92 * @token_count: Number of tokens to be assigned per assignment.
93 * @curr_qos_rate_kBps: Aggregate of currently supported QoS requests.
94 * @threshold_rate_kBps: Maximum Rate allocated for QoS traffic.
95 * @num_priority: Number of priority buckets in the transport.
96 * @tx_ready_lock_lhb3: lock to protect @tx_ready
97 * @active_high_prio: Highest priority of active channels.
98 * @prio_bin: Pointer to priority buckets.
99 * @pm_qos_req: power management QoS request for TX path
100 * @qos_req_active: a vote is active with the PM QoS system
101 * @tx_path_activity: transmit activity has occurred
102 * @pm_qos_work: removes PM QoS vote due to inactivity
103 * @xprt_dbgfs_lock_lhb4: debugfs channel structure lock
104 * @log_ctx: IPC logging context for this transport.
105 */
106struct glink_core_xprt_ctx {
107 struct rwref_lock xprt_state_lhb0;
108 struct list_head list_node;
109 char name[GLINK_NAME_SIZE];
110 char edge[GLINK_NAME_SIZE];
111 uint16_t id;
112 const struct glink_core_version *versions;
113 size_t versions_entries;
114 uint32_t local_version_idx;
115 uint32_t remote_version_idx;
116 uint32_t l_features;
117 uint32_t capabilities;
118 struct glink_transport_if *ops;
119 enum transport_state_e local_state;
120 bool remote_neg_completed;
121
122 spinlock_t xprt_ctx_lock_lhb1;
123 struct list_head channels;
124 uint32_t next_lcid;
125 struct list_head free_lcid_list;
126 struct list_head notified;
127 bool dummy_in_use;
128
129 uint32_t max_cid;
130 uint32_t max_iid;
131 struct kthread_work tx_kwork;
132 struct kthread_worker tx_wq;
133 struct task_struct *tx_task;
134
135 size_t mtu;
136 uint32_t token_count;
137 unsigned long curr_qos_rate_kBps;
138 unsigned long threshold_rate_kBps;
139 uint32_t num_priority;
140 spinlock_t tx_ready_lock_lhb3;
141 uint32_t active_high_prio;
142 struct glink_qos_priority_bin *prio_bin;
143
144 struct pm_qos_request pm_qos_req;
145 bool qos_req_active;
146 bool tx_path_activity;
147 struct delayed_work pm_qos_work;
148 struct glink_core_edge_ctx *edge_ctx;
149
150 struct mutex xprt_dbgfs_lock_lhb4;
151 void *log_ctx;
152};
153
154/**
155 * Edge Context
156 * @list_node edge list node used by edge list
157 * @name: name of the edge
158 * @edge_migration_lock:mutex lock for migration over edge
159 * @edge_ref_lock: lock for reference count
160 */
161struct glink_core_edge_ctx {
162 struct list_head list_node;
163 char name[GLINK_NAME_SIZE];
164 struct mutex edge_migration_lock_lhd2;
165 struct rwref_lock edge_ref_lock_lhd1;
166};
167
168static LIST_HEAD(edge_list);
169static DEFINE_MUTEX(edge_list_lock_lhd0);
170/**
171 * Channel Context
172 * @xprt_state_lhb0: controls read/write access to channel state
173 * @port_list_node: channel list node used by transport "channels" list
174 * @tx_ready_list_node: channels that have data ready to transmit
175 * @name: name of the channel
176 *
177 * @user_priv: user opaque data type passed into glink_open()
178 * @notify_rx: RX notification function
179 * @notify_tx_done: TX-done notification function (remote side is done)
180 * @notify_state: Channel state (connected / disconnected) notifications
181 * @notify_rx_intent_req: Request from remote side for an intent
182 * @notify_rxv: RX notification function (for io buffer chain)
183 * @notify_rx_sigs: RX signal change notification
184 * @notify_rx_abort: Channel close RX Intent aborted
185 * @notify_tx_abort: Channel close TX aborted
186 * @notify_rx_tracer_pkt: Receive notification for tracer packet
187 * @notify_remote_rx_intent: Receive notification for remote-queued RX intent
188 *
189 * @transport_ptr: Transport this channel uses
190 * @lcid: Local channel ID
191 * @rcid: Remote channel ID
192 * @local_open_state: Local channel state
193 * @remote_opened: Remote channel state (opened or closed)
194 * @int_req_ack: Remote side intent request ACK state
195 * @int_req_ack_complete: Intent tracking completion - received remote ACK
196 * @int_req_complete: Intent tracking completion - received intent
197 * @rx_intent_req_timeout_jiffies: Timeout for requesting an RX intent, in
198 * jiffies; if set to 0, timeout is infinite
199 *
200 * @local_rx_intent_lst_lock_lhc1: RX intent list lock
201 * @local_rx_intent_list: Active RX Intents queued by client
202 * @local_rx_intent_ntfy_list: Client notified, waiting for rx_done()
203 * @local_rx_intent_free_list: Available intent container structure
204 *
205 * @rmt_rx_intent_lst_lock_lhc2: Remote RX intent list lock
206 * @rmt_rx_intent_list: Remote RX intent list
207 *
208 * @max_used_liid: Maximum Local Intent ID used
209 * @dummy_riid: Dummy remote intent ID
210 *
211 * @tx_lists_lock_lhc3: TX list lock
212 * @tx_active: Ready to transmit
213 *
214 * @tx_pending_rmt_done_lock_lhc4: Remote-done list lock
215 * @tx_pending_remote_done: Transmitted, waiting for remote done
216 * @lsigs: Local signals
217 * @rsigs: Remote signals
218 * @pending_delete: waiting for channel to be deleted
219 * @no_migrate: The local client does not want to
220 * migrate transports
221 * @local_xprt_req: The transport the local side requested
222 * @local_xprt_resp: The response to @local_xprt_req
223 * @remote_xprt_req: The transport the remote side requested
224 * @remote_xprt_resp: The response to @remote_xprt_req
225 * @curr_priority: Channel's current priority.
226 * @initial_priority: Channel's initial priority.
227 * @token_count: Tokens for consumption by packet.
228 * @txd_len: Transmitted data size in the current
229 * token assignment cycle.
230 * @token_start_time: Time at which tokens are assigned.
231 * @req_rate_kBps: Current QoS request by the channel.
232 * @tx_intent_cnt: Intent count to transmit soon in future.
233 * @tx_cnt: Packets to be picked by tx scheduler.
Chris Lewa9a78ae2017-05-11 16:47:37 -0700234 * @rt_vote_on: Number of times RT vote on is called.
235 * @rt_vote_off: Number of times RT vote off is called.
Chris Lewfa6135e2016-08-01 13:29:46 -0700236 */
237struct channel_ctx {
238 struct rwref_lock ch_state_lhb2;
239 struct list_head port_list_node;
240 struct list_head tx_ready_list_node;
241 char name[GLINK_NAME_SIZE];
242
243 /* user info */
244 void *user_priv;
245 void (*notify_rx)(void *handle, const void *priv, const void *pkt_priv,
246 const void *ptr, size_t size);
247 void (*notify_tx_done)(void *handle, const void *priv,
248 const void *pkt_priv, const void *ptr);
249 void (*notify_state)(void *handle, const void *priv,
250 unsigned int event);
251 bool (*notify_rx_intent_req)(void *handle, const void *priv,
252 size_t req_size);
253 void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv,
254 void *iovec, size_t size,
255 void * (*vbuf_provider)(void *iovec, size_t offset,
256 size_t *size),
257 void * (*pbuf_provider)(void *iovec, size_t offset,
258 size_t *size));
259 void (*notify_rx_sigs)(void *handle, const void *priv,
260 uint32_t old_sigs, uint32_t new_sigs);
261 void (*notify_rx_abort)(void *handle, const void *priv,
262 const void *pkt_priv);
263 void (*notify_tx_abort)(void *handle, const void *priv,
264 const void *pkt_priv);
265 void (*notify_rx_tracer_pkt)(void *handle, const void *priv,
266 const void *pkt_priv, const void *ptr, size_t size);
267 void (*notify_remote_rx_intent)(void *handle, const void *priv,
268 size_t size);
269
270 /* internal port state */
271 struct glink_core_xprt_ctx *transport_ptr;
272 uint32_t lcid;
273 uint32_t rcid;
274 enum local_channel_state_e local_open_state;
275 bool remote_opened;
276 bool int_req_ack;
277 struct completion int_req_ack_complete;
278 struct completion int_req_complete;
279 unsigned long rx_intent_req_timeout_jiffies;
280
281 spinlock_t local_rx_intent_lst_lock_lhc1;
282 struct list_head local_rx_intent_list;
283 struct list_head local_rx_intent_ntfy_list;
284 struct list_head local_rx_intent_free_list;
285
286 spinlock_t rmt_rx_intent_lst_lock_lhc2;
287 struct list_head rmt_rx_intent_list;
288
289 uint32_t max_used_liid;
290 uint32_t dummy_riid;
291
292 spinlock_t tx_lists_lock_lhc3;
293 struct list_head tx_active;
294
295 spinlock_t tx_pending_rmt_done_lock_lhc4;
296 struct list_head tx_pending_remote_done;
297
298 uint32_t lsigs;
299 uint32_t rsigs;
300 bool pending_delete;
301
302 bool no_migrate;
303 uint16_t local_xprt_req;
304 uint16_t local_xprt_resp;
305 uint16_t remote_xprt_req;
306 uint16_t remote_xprt_resp;
307
308 uint32_t curr_priority;
309 uint32_t initial_priority;
310 uint32_t token_count;
311 size_t txd_len;
312 unsigned long token_start_time;
313 unsigned long req_rate_kBps;
314 uint32_t tx_intent_cnt;
315 uint32_t tx_cnt;
Chris Lewa9a78ae2017-05-11 16:47:37 -0700316
317 uint32_t rt_vote_on;
318 uint32_t rt_vote_off;
Chris Lewfa6135e2016-08-01 13:29:46 -0700319};
320
321static struct glink_core_if core_impl;
322static void *log_ctx;
323static unsigned int glink_debug_mask = QCOM_GLINK_INFO;
324module_param_named(debug_mask, glink_debug_mask,
325 uint, S_IRUGO | S_IWUSR | S_IWGRP);
326
327static unsigned int glink_pm_qos;
328module_param_named(pm_qos_enable, glink_pm_qos,
329 uint, S_IRUGO | S_IWUSR | S_IWGRP);
330
331
332static LIST_HEAD(transport_list);
333
334/*
335 * Used while notifying the clients about link state events. Since the clients
336 * need to store the callback information temporarily and since all the
337 * existing accesses to transport list are in non-IRQ context, defining the
338 * transport_list_lock as a mutex.
339 */
340static DEFINE_MUTEX(transport_list_lock_lha0);
341
342struct link_state_notifier_info {
343 struct list_head list;
344 char transport[GLINK_NAME_SIZE];
345 char edge[GLINK_NAME_SIZE];
346 void (*glink_link_state_notif_cb)(
347 struct glink_link_state_cb_info *cb_info, void *priv);
348 void *priv;
349};
350static LIST_HEAD(link_state_notifier_list);
351static DEFINE_MUTEX(link_state_notifier_lock_lha1);
352
353static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
354 const char *name,
355 bool initial_xprt,
356 uint16_t *best_id);
357
358static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt);
359
360static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
361 struct glink_core_xprt_ctx *xprt_ctx,
362 uint32_t lcid);
363
364static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
365 struct glink_core_xprt_ctx *xprt_ctx,
366 uint32_t rcid);
367
368static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
369 struct channel_ctx *ch_ptr,
370 struct glink_core_tx_pkt *tx_info);
371
372static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
373 struct channel_ctx *ch_ptr,
374 struct glink_core_tx_pkt *tx_info);
375
376static void tx_func(struct kthread_work *work);
377
378static struct channel_ctx *ch_name_to_ch_ctx_create(
379 struct glink_core_xprt_ctx *xprt_ctx,
Dhoat Harpalae706e12018-01-18 00:29:20 +0530380 const char *name, bool local);
Chris Lewfa6135e2016-08-01 13:29:46 -0700381
382static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
383 uint32_t riid, void *cookie);
384
385static int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
386 uint32_t *riid_ptr, size_t *intent_size, void **cookie);
387
388static struct glink_core_rx_intent *ch_push_local_rx_intent(
389 struct channel_ctx *ctx, const void *pkt_priv, size_t size);
390
391static void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid);
392
393static struct glink_core_rx_intent *ch_get_local_rx_intent(
394 struct channel_ctx *ctx, uint32_t liid);
395
396static void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
397 struct glink_core_rx_intent *intent_ptr);
398
399static struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
400 struct channel_ctx *ctx, const void *ptr);
401
402static void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
403 struct glink_core_rx_intent *liid_ptr, bool reuse);
404
405static struct glink_core_rx_intent *ch_get_free_local_rx_intent(
406 struct channel_ctx *ctx);
407
408static void ch_purge_intent_lists(struct channel_ctx *ctx);
409
410static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
411 struct channel_ctx *ctx,
412 uint32_t rcid);
413
414static bool ch_is_fully_opened(struct channel_ctx *ctx);
415static bool ch_is_fully_closed(struct channel_ctx *ctx);
416
417struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(struct channel_ctx *ctx,
418 uint32_t riid);
419
420static void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
421 struct glink_core_tx_pkt *tx_pkt);
422
423static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
424 *if_ptr, uint32_t rcid, bool granted);
425
426static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe);
427
428static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
429 enum glink_link_state link_state);
430
431static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr);
432static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr);
433static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr);
434static void glink_pm_qos_cancel_worker(struct work_struct *work);
435static bool ch_update_local_state(struct channel_ctx *ctx,
436 enum local_channel_state_e lstate);
437static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate);
438static void glink_core_deinit_xprt_qos_cfg(
439 struct glink_core_xprt_ctx *xprt_ptr);
440
441#define glink_prio_to_power_state(xprt_ctx, priority) \
442 ((xprt_ctx)->prio_bin[priority].power_state)
443
444#define GLINK_GET_CH_TX_STATE(ctx) \
445 ((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
446
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +0530447static int glink_get_ch_ctx(struct channel_ctx *ctx)
448{
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +0530449 if (!ctx)
450 return -EINVAL;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +0530451 rwref_get(&ctx->ch_state_lhb2);
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +0530452 return 0;
453}
454
Dhoat Harpal8c838482017-06-21 21:33:45 +0530455static void glink_put_ch_ctx(struct channel_ctx *ctx)
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +0530456{
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +0530457 rwref_put(&ctx->ch_state_lhb2);
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +0530458}
459
Dhoat Harpale4811372017-12-18 21:05:20 +0530460
461/**
462 * glink_subsys_up() - Inform transport about remote subsystem up.
463 * @subsystem: The name of the subsystem
464 *
465 * Call into the transport using the subsys_up(if_ptr) function to allow it to
466 * initialize any necessary structures.
467 *
468 * Return: Standard error codes.
469 */
470int glink_subsys_up(const char *subsystem)
471{
472 int ret = 0;
473 bool transport_found = false;
474 struct glink_core_xprt_ctx *xprt_ctx = NULL;
475
476 mutex_lock(&transport_list_lock_lha0);
477 list_for_each_entry(xprt_ctx, &transport_list, list_node) {
478 if (!strcmp(subsystem, xprt_ctx->edge) &&
479 !xprt_is_fully_opened(xprt_ctx)) {
480 GLINK_INFO_XPRT(xprt_ctx, "%s: %s Subsystem up\n",
481 __func__, subsystem);
482 if (xprt_ctx->ops->subsys_up)
483 xprt_ctx->ops->subsys_up(xprt_ctx->ops);
484 transport_found = true;
485 }
486 }
487 mutex_unlock(&transport_list_lock_lha0);
488
489 if (!transport_found)
490 ret = -ENODEV;
491
492 return ret;
493}
494EXPORT_SYMBOL(glink_subsys_up);
495
Chris Lewfa6135e2016-08-01 13:29:46 -0700496/**
497 * glink_ssr() - Clean up locally for SSR by simulating remote close
498 * @subsystem: The name of the subsystem being restarted
499 *
500 * Call into the transport using the ssr(if_ptr) function to allow it to
501 * clean up any necessary structures, then simulate a remote close from
502 * subsystem for all channels on that edge.
503 *
504 * Return: Standard error codes.
505 */
506int glink_ssr(const char *subsystem)
507{
508 int ret = 0;
509 bool transport_found = false;
510 struct glink_core_xprt_ctx *xprt_ctx = NULL;
511 struct channel_ctx *ch_ctx, *temp_ch_ctx;
512 uint32_t i;
513 unsigned long flags;
514
515 mutex_lock(&transport_list_lock_lha0);
516 list_for_each_entry(xprt_ctx, &transport_list, list_node) {
517 if (!strcmp(subsystem, xprt_ctx->edge) &&
518 xprt_is_fully_opened(xprt_ctx)) {
519 GLINK_INFO_XPRT(xprt_ctx, "%s: SSR\n", __func__);
520 spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb3,
521 flags);
522 for (i = 0; i < xprt_ctx->num_priority; i++)
523 list_for_each_entry_safe(ch_ctx, temp_ch_ctx,
524 &xprt_ctx->prio_bin[i].tx_ready,
525 tx_ready_list_node)
526 list_del_init(
527 &ch_ctx->tx_ready_list_node);
528 spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb3,
529 flags);
530
531 xprt_ctx->ops->ssr(xprt_ctx->ops);
532 transport_found = true;
533 }
534 }
535 mutex_unlock(&transport_list_lock_lha0);
536
537 if (!transport_found)
538 ret = -ENODEV;
539
540 return ret;
541}
542EXPORT_SYMBOL(glink_ssr);
543
544/**
545 * glink_core_ch_close_ack_common() - handles the common operations during
546 * close ack.
547 * @ctx: Pointer to channel instance.
548 * @is_safe: Is function called while holding ctx lock
549 *
550 * Return: True if the channel is fully closed after the state change,
551 * false otherwise.
552 */
553static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx, bool safe)
554{
555 bool is_fully_closed;
556
557 if (ctx == NULL)
558 return false;
559
560 if (safe) {
561 ctx->local_open_state = GLINK_CHANNEL_CLOSED;
562 is_fully_closed = ch_is_fully_closed(ctx);
563 } else {
564 is_fully_closed = ch_update_local_state(ctx,
565 GLINK_CHANNEL_CLOSED);
566 }
567
568 GLINK_INFO_PERF_CH(ctx,
569 "%s: local:GLINK_CHANNEL_CLOSING->GLINK_CHANNEL_CLOSED\n",
570 __func__);
571
572 if (ctx->notify_state) {
573 ctx->notify_state(ctx, ctx->user_priv,
574 GLINK_LOCAL_DISCONNECTED);
575 ch_purge_intent_lists(ctx);
576 GLINK_INFO_PERF_CH(ctx,
577 "%s: notify state: GLINK_LOCAL_DISCONNECTED\n",
578 __func__);
579 }
580
581 return is_fully_closed;
582}
583
584/**
585 * glink_core_remote_close_common() - Handles the common operations during
586 * a remote close.
587 * @ctx: Pointer to channel instance.
588 * @safe: Is function called with ctx rwref lock already acquired.
589 * Return: True if the channel is fully closed after the state change,
590 * false otherwise.
591 */
592static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe)
593{
594 bool is_fully_closed;
595
596 if (ctx == NULL)
597 return false;
598
599 if (safe) {
600 ctx->remote_opened = false;
601 is_fully_closed = ch_is_fully_closed(ctx);
602 } else {
603 is_fully_closed = ch_update_rmt_state(ctx, false);
604 }
605 ctx->rcid = 0;
606
Dhoat Harpald52887d2017-01-05 13:51:59 +0530607 ctx->int_req_ack = false;
608 complete_all(&ctx->int_req_ack_complete);
609 complete_all(&ctx->int_req_complete);
Chris Lewfa6135e2016-08-01 13:29:46 -0700610 if (ctx->local_open_state != GLINK_CHANNEL_CLOSED &&
611 ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
612 if (ctx->notify_state)
613 ctx->notify_state(ctx, ctx->user_priv,
614 GLINK_REMOTE_DISCONNECTED);
615 GLINK_INFO_CH(ctx,
616 "%s: %s: GLINK_REMOTE_DISCONNECTED\n",
617 __func__, "notify state");
618 }
619
620 if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
621 GLINK_INFO_CH(ctx,
622 "%s: %s, %s\n", __func__,
623 "Did not send GLINK_REMOTE_DISCONNECTED",
624 "local state is already CLOSED");
625
Chris Lewfa6135e2016-08-01 13:29:46 -0700626 ch_purge_intent_lists(ctx);
627
628 return is_fully_closed;
629}
630
631/**
632 * glink_qos_calc_rate_kBps() - Calculate the transmit rate in kBps
633 * @pkt_size: Worst case packet size per transmission.
634 * @interval_us: Packet transmit interval in us.
635 *
636 * This function is used to calculate the rate of transmission rate of
637 * a channel in kBps.
638 *
639 * Return: Transmission rate in kBps.
640 */
641static unsigned long glink_qos_calc_rate_kBps(size_t pkt_size,
642 unsigned long interval_us)
643{
Chris Lew39393242017-08-11 15:56:56 -0700644 unsigned long rem;
645 uint64_t rate_kBps;
Chris Lewfa6135e2016-08-01 13:29:46 -0700646
647 rate_kBps = pkt_size * USEC_PER_SEC;
Chris Lew39393242017-08-11 15:56:56 -0700648 rem = do_div(rate_kBps, interval_us * 1024);
Chris Lewfa6135e2016-08-01 13:29:46 -0700649 return rate_kBps;
650}
651
652/**
653 * glink_qos_check_feasibility() - Feasibility test on a QoS Request
654 * @xprt_ctx: Transport in which the QoS request is made.
655 * @req_rate_kBps: QoS Request.
656 *
657 * This function is used to perform the schedulability test on a QoS request
658 * over a specific transport.
659 *
660 * Return: 0 on success, standard Linux error codes on failure.
661 */
662static int glink_qos_check_feasibility(struct glink_core_xprt_ctx *xprt_ctx,
663 unsigned long req_rate_kBps)
664{
665 unsigned long new_rate_kBps;
666
667 if (xprt_ctx->num_priority == GLINK_QOS_DEF_NUM_PRIORITY)
668 return -EOPNOTSUPP;
669
670 new_rate_kBps = xprt_ctx->curr_qos_rate_kBps + req_rate_kBps;
671 if (new_rate_kBps > xprt_ctx->threshold_rate_kBps) {
672 GLINK_ERR_XPRT(xprt_ctx,
673 "New_rate(%lu + %lu) > threshold_rate(%lu)\n",
674 xprt_ctx->curr_qos_rate_kBps, req_rate_kBps,
675 xprt_ctx->threshold_rate_kBps);
676 return -EBUSY;
677 }
678 return 0;
679}
680
681/**
682 * glink_qos_update_ch_prio() - Update the channel priority
683 * @ctx: Channel context whose priority is updated.
684 * @new_priority: New priority of the channel.
685 *
686 * This function is called to update the channel priority during QoS request,
687 * QoS Cancel or Priority evaluation by packet scheduler. This function must
688 * be called with transport's tx_ready_lock_lhb3 lock and channel's
689 * tx_lists_lock_lhc3 locked.
690 */
691static void glink_qos_update_ch_prio(struct channel_ctx *ctx,
692 uint32_t new_priority)
693{
694 uint32_t old_priority;
695
696 if (unlikely(!ctx))
697 return;
698
699 old_priority = ctx->curr_priority;
700 if (!list_empty(&ctx->tx_ready_list_node)) {
701 ctx->transport_ptr->prio_bin[old_priority].active_ch_cnt--;
702 list_move(&ctx->tx_ready_list_node,
703 &ctx->transport_ptr->prio_bin[new_priority].tx_ready);
704 ctx->transport_ptr->prio_bin[new_priority].active_ch_cnt++;
705 }
706 ctx->curr_priority = new_priority;
707}
708
709/**
710 * glink_qos_assign_priority() - Assign priority to a channel
711 * @ctx: Channel for which the priority has to be assigned.
712 * @req_rate_kBps: QoS request by the channel.
713 *
714 * This function is used to assign a priority to the channel depending on its
715 * QoS Request.
716 *
717 * Return: 0 on success, standard Linux error codes on failure.
718 */
719static int glink_qos_assign_priority(struct channel_ctx *ctx,
720 unsigned long req_rate_kBps)
721{
722 int ret;
723 uint32_t i;
724 unsigned long flags;
725
726 spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
727 if (ctx->req_rate_kBps) {
728 spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3,
729 flags);
730 GLINK_ERR_CH(ctx, "%s: QoS Request already exists\n", __func__);
731 return -EINVAL;
732 }
733
734 ret = glink_qos_check_feasibility(ctx->transport_ptr, req_rate_kBps);
735 if (ret < 0) {
736 spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3,
737 flags);
738 return ret;
739 }
740
741 spin_lock(&ctx->tx_lists_lock_lhc3);
742 i = ctx->transport_ptr->num_priority - 1;
743 while (i > 0 &&
744 ctx->transport_ptr->prio_bin[i-1].max_rate_kBps >= req_rate_kBps)
745 i--;
746
747 ctx->initial_priority = i;
748 glink_qos_update_ch_prio(ctx, i);
749 ctx->req_rate_kBps = req_rate_kBps;
750 if (i > 0) {
751 ctx->transport_ptr->curr_qos_rate_kBps += req_rate_kBps;
752 ctx->token_count = ctx->transport_ptr->token_count;
753 ctx->txd_len = 0;
754 ctx->token_start_time = arch_counter_get_cntvct();
755 }
756 spin_unlock(&ctx->tx_lists_lock_lhc3);
757 spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
758 return 0;
759}
760
761/**
762 * glink_qos_reset_priority() - Reset the channel priority
763 * @ctx: Channel for which the priority is reset.
764 *
765 * This function is used to reset the channel priority when the QoS request
766 * is cancelled by the channel.
767 *
768 * Return: 0 on success, standard Linux error codes on failure.
769 */
770static int glink_qos_reset_priority(struct channel_ctx *ctx)
771{
772 unsigned long flags;
773
774 spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
775 spin_lock(&ctx->tx_lists_lock_lhc3);
776 if (ctx->initial_priority > 0) {
777 ctx->initial_priority = 0;
778 glink_qos_update_ch_prio(ctx, 0);
779 ctx->transport_ptr->curr_qos_rate_kBps -= ctx->req_rate_kBps;
780 ctx->txd_len = 0;
781 ctx->req_rate_kBps = 0;
782 }
783 spin_unlock(&ctx->tx_lists_lock_lhc3);
784 spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
785 return 0;
786}
787
788/**
789 * glink_qos_ch_vote_xprt() - Vote the transport that channel is active
790 * @ctx: Channel context which is active.
791 *
792 * This function is called to vote for the transport either when the channel
793 * is transmitting or when it shows an intention to transmit sooner. This
794 * function must be called with transport's tx_ready_lock_lhb3 lock and
795 * channel's tx_lists_lock_lhc3 locked.
796 *
797 * Return: 0 on success, standard Linux error codes on failure.
798 */
799static int glink_qos_ch_vote_xprt(struct channel_ctx *ctx)
800{
801 uint32_t prio;
802
803 if (unlikely(!ctx || !ctx->transport_ptr))
804 return -EINVAL;
805
806 prio = ctx->curr_priority;
807 ctx->transport_ptr->prio_bin[prio].active_ch_cnt++;
808
809 if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt == 1 &&
810 ctx->transport_ptr->active_high_prio < prio) {
811 /*
812 * One active channel in this priority and this is the
813 * highest active priority bucket
814 */
815 ctx->transport_ptr->active_high_prio = prio;
816 return ctx->transport_ptr->ops->power_vote(
817 ctx->transport_ptr->ops,
818 glink_prio_to_power_state(ctx->transport_ptr,
819 prio));
820 }
821 return 0;
822}
823
824/**
825 * glink_qos_ch_unvote_xprt() - Unvote the transport when channel is inactive
826 * @ctx: Channel context which is inactive.
827 *
828 * This function is called to unvote for the transport either when all the
829 * packets queued by the channel are transmitted by the scheduler. This
830 * function must be called with transport's tx_ready_lock_lhb3 lock and
831 * channel's tx_lists_lock_lhc3 locked.
832 *
833 * Return: 0 on success, standard Linux error codes on failure.
834 */
835static int glink_qos_ch_unvote_xprt(struct channel_ctx *ctx)
836{
837 uint32_t prio;
838
839 if (unlikely(!ctx || !ctx->transport_ptr))
840 return -EINVAL;
841
842 prio = ctx->curr_priority;
843 ctx->transport_ptr->prio_bin[prio].active_ch_cnt--;
844
845 if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt ||
846 ctx->transport_ptr->active_high_prio > prio)
847 return 0;
848
849 /*
850 * No active channel in this priority and this is the
851 * highest active priority bucket
852 */
853 while (prio > 0) {
854 prio--;
855 if (!ctx->transport_ptr->prio_bin[prio].active_ch_cnt)
856 continue;
857
858 ctx->transport_ptr->active_high_prio = prio;
859 return ctx->transport_ptr->ops->power_vote(
860 ctx->transport_ptr->ops,
861 glink_prio_to_power_state(ctx->transport_ptr,
862 prio));
863 }
864 return ctx->transport_ptr->ops->power_unvote(ctx->transport_ptr->ops);
865}
866
867/**
868 * glink_qos_add_ch_tx_intent() - Add the channel's intention to transmit soon
869 * @ctx: Channel context which is going to be active.
870 *
871 * This function is called to update the channel state when it is intending to
872 * transmit sooner. This function must be called with transport's
873 * tx_ready_lock_lhb3 lock and channel's tx_lists_lock_lhc3 locked.
874 *
875 * Return: 0 on success, standard Linux error codes on failure.
876 */
877static int glink_qos_add_ch_tx_intent(struct channel_ctx *ctx)
878{
879 bool active_tx;
880
881 if (unlikely(!ctx))
882 return -EINVAL;
883
884 active_tx = GLINK_GET_CH_TX_STATE(ctx);
885 ctx->tx_intent_cnt++;
886 if (!active_tx)
887 glink_qos_ch_vote_xprt(ctx);
888 return 0;
889}
890
891/**
892 * glink_qos_do_ch_tx() - Update the channel's state that it is transmitting
893 * @ctx: Channel context which is transmitting.
894 *
895 * This function is called to update the channel state when it is queueing a
896 * packet to transmit. This function must be called with transport's
897 * tx_ready_lock_lhb3 lock and channel's tx_lists_lock_lhc3 locked.
898 *
899 * Return: 0 on success, standard Linux error codes on failure.
900 */
901static int glink_qos_do_ch_tx(struct channel_ctx *ctx)
902{
903 bool active_tx;
904
905 if (unlikely(!ctx))
906 return -EINVAL;
907
908 active_tx = GLINK_GET_CH_TX_STATE(ctx);
909 ctx->tx_cnt++;
910 if (ctx->tx_intent_cnt)
911 ctx->tx_intent_cnt--;
912 if (!active_tx)
913 glink_qos_ch_vote_xprt(ctx);
914 return 0;
915}
916
917/**
918 * glink_qos_done_ch_tx() - Update the channel's state when transmission is done
919 * @ctx: Channel context for which all packets are transmitted.
920 *
921 * This function is called to update the channel state when all packets in its
922 * transmit queue are successfully transmitted. This function must be called
923 * with transport's tx_ready_lock_lhb3 lock and channel's tx_lists_lock_lhc3
924 * locked.
925 *
926 * Return: 0 on success, standard Linux error codes on failure.
927 */
928static int glink_qos_done_ch_tx(struct channel_ctx *ctx)
929{
930 bool active_tx;
931
932 if (unlikely(!ctx))
933 return -EINVAL;
934
935 WARN_ON(ctx->tx_cnt == 0);
936 ctx->tx_cnt = 0;
937 active_tx = GLINK_GET_CH_TX_STATE(ctx);
938 if (!active_tx)
939 glink_qos_ch_unvote_xprt(ctx);
940 return 0;
941}
942
943/**
944 * tx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers
945 * @iovec: Pointer to the beginning of the linear buffer.
946 * @offset: Offset into the buffer whose address is needed.
947 * @size: Pointer to hold the length of the contiguous buffer space.
948 *
949 * This function is used when a linear buffer is transmitted.
950 *
951 * Return: Address of the buffer which is at offset "offset" from the beginning
952 * of the buffer.
953 */
954static void *tx_linear_vbuf_provider(void *iovec, size_t offset, size_t *size)
955{
956 struct glink_core_tx_pkt *tx_info = (struct glink_core_tx_pkt *)iovec;
957
958 if (unlikely(!iovec || !size))
959 return NULL;
960
961 if (offset >= tx_info->size)
962 return NULL;
963
964 if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, tx_info->data, offset)))
965 return NULL;
966
967 *size = tx_info->size - offset;
968
969 return (void *)tx_info->data + offset;
970}
971
972/**
973 * linearize_vector() - Linearize the vector buffer
974 * @iovec: Pointer to the vector buffer.
975 * @size: Size of data in the vector buffer.
976 * vbuf_provider: Virtual address-space Buffer Provider for the vector.
977 * pbuf_provider: Physical address-space Buffer Provider for the vector.
978 *
979 * This function is used to linearize the vector buffer provided by the
980 * transport when the client has registered to receive only the vector
981 * buffer.
982 *
983 * Return: address of the linear buffer on success, NULL on failure.
984 */
985static void *linearize_vector(void *iovec, size_t size,
986 void * (*vbuf_provider)(void *iovec, size_t offset, size_t *buf_size),
987 void * (*pbuf_provider)(void *iovec, size_t offset, size_t *buf_size))
988{
989 void *bounce_buf;
990 void *pdata;
991 void *vdata;
992 size_t data_size;
993 size_t offset = 0;
994
995 bounce_buf = kmalloc(size, GFP_KERNEL);
996 if (!bounce_buf)
997 return ERR_PTR(-ENOMEM);
998
999 do {
1000 if (vbuf_provider) {
1001 vdata = vbuf_provider(iovec, offset, &data_size);
1002 } else {
1003 pdata = pbuf_provider(iovec, offset, &data_size);
1004 vdata = phys_to_virt((unsigned long)pdata);
1005 }
1006
1007 if (!vdata)
1008 break;
1009
1010 if (OVERFLOW_ADD_UNSIGNED(size_t, data_size, offset)) {
1011 GLINK_ERR("%s: overflow data_size %zu + offset %zu\n",
1012 __func__, data_size, offset);
1013 goto err;
1014 }
1015
1016 memcpy(bounce_buf + offset, vdata, data_size);
1017 offset += data_size;
1018 } while (offset < size);
1019
1020 if (offset != size) {
1021 GLINK_ERR("%s: Error size_copied %zu != total_size %zu\n",
1022 __func__, offset, size);
1023 goto err;
1024 }
1025 return bounce_buf;
1026
1027err:
1028 kfree(bounce_buf);
1029 return NULL;
1030}
1031
1032/**
1033 * glink_core_migration_edge_lock() - gains a reference count for edge and
1034 * take muted lock
1035 * @xprt_ctx: transport of the edge
1036 */
1037static void glink_core_migration_edge_lock(struct glink_core_xprt_ctx *xprt_ctx)
1038{
1039 struct glink_core_edge_ctx *edge_ctx = xprt_ctx->edge_ctx;
1040
1041 rwref_get(&edge_ctx->edge_ref_lock_lhd1);
1042 mutex_lock(&edge_ctx->edge_migration_lock_lhd2);
1043}
1044
1045/**
1046 * glink_core_migration_edge_unlock() - release a reference count for edge
1047 * and release muted lock.
1048 * @xprt_ctx: transport of the edge
1049 */
1050static void glink_core_migration_edge_unlock(
1051 struct glink_core_xprt_ctx *xprt_ctx)
1052{
1053 struct glink_core_edge_ctx *edge_ctx = xprt_ctx->edge_ctx;
1054
1055 mutex_unlock(&edge_ctx->edge_migration_lock_lhd2);
1056 rwref_put(&edge_ctx->edge_ref_lock_lhd1);
1057}
1058
1059/**
1060 * glink_edge_ctx_release - Free the edge context
1061 * @ch_st_lock: handle to the rwref_lock associated with the edge
1062 *
1063 * This should only be called when the reference count associated with the
1064 * edge goes to zero.
1065 */
1066static void glink_edge_ctx_release(struct rwref_lock *ch_st_lock)
1067{
1068 struct glink_core_edge_ctx *ctx = container_of(ch_st_lock,
1069 struct glink_core_edge_ctx,
1070 edge_ref_lock_lhd1);
1071
1072 mutex_lock(&edge_list_lock_lhd0);
1073 list_del(&ctx->list_node);
1074 mutex_unlock(&edge_list_lock_lhd0);
1075 kfree(ctx);
1076}
1077
1078
1079/**
1080 * edge_name_to_ctx_create() - lookup a edge by name, create the edge ctx if
1081 * it is not found.
1082 * @xprt_ctx: Transport to search for a matching edge.
1083 *
Dhoat Harpal9a8ed652016-11-08 15:00:20 +05301084 * Return: The edge ctx corresponding to edge of @xprt_ctx or
1085 * NULL if memory allocation fails.
Chris Lewfa6135e2016-08-01 13:29:46 -07001086 */
1087static struct glink_core_edge_ctx *edge_name_to_ctx_create(
1088 struct glink_core_xprt_ctx *xprt_ctx)
1089{
1090 struct glink_core_edge_ctx *edge_ctx;
1091
1092 mutex_lock(&edge_list_lock_lhd0);
1093 list_for_each_entry(edge_ctx, &edge_list, list_node) {
1094 if (!strcmp(edge_ctx->name, xprt_ctx->edge)) {
1095 rwref_get(&edge_ctx->edge_ref_lock_lhd1);
1096 mutex_unlock(&edge_list_lock_lhd0);
1097 return edge_ctx;
1098 }
1099 }
1100 edge_ctx = kzalloc(sizeof(struct glink_core_edge_ctx), GFP_KERNEL);
Dhoat Harpal9a8ed652016-11-08 15:00:20 +05301101 if (!edge_ctx) {
1102 mutex_unlock(&edge_list_lock_lhd0);
1103 return NULL;
1104 }
Chris Lewfa6135e2016-08-01 13:29:46 -07001105 strlcpy(edge_ctx->name, xprt_ctx->edge, GLINK_NAME_SIZE);
1106 rwref_lock_init(&edge_ctx->edge_ref_lock_lhd1, glink_edge_ctx_release);
1107 mutex_init(&edge_ctx->edge_migration_lock_lhd2);
1108 INIT_LIST_HEAD(&edge_ctx->list_node);
1109 list_add_tail(&edge_ctx->list_node, &edge_list);
1110 mutex_unlock(&edge_list_lock_lhd0);
1111 return edge_ctx;
1112}
1113
1114/**
1115 * xprt_lcid_to_ch_ctx_get() - lookup a channel by local id
1116 * @xprt_ctx: Transport to search for a matching channel.
1117 * @lcid: Local channel identifier corresponding to the desired channel.
1118 *
1119 * If the channel is found, the reference count is incremented to ensure the
1120 * lifetime of the channel context. The caller must call rwref_put() when done.
1121 *
1122 * Return: The channel corresponding to @lcid or NULL if a matching channel
1123 * is not found.
1124 */
1125static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
1126 struct glink_core_xprt_ctx *xprt_ctx,
1127 uint32_t lcid)
1128{
1129 struct channel_ctx *entry;
1130 unsigned long flags;
1131
1132 spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
1133 list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
1134 if (entry->lcid == lcid) {
1135 rwref_get(&entry->ch_state_lhb2);
1136 spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
1137 flags);
1138 return entry;
1139 }
1140 spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
1141
1142 return NULL;
1143}
1144
1145/**
1146 * xprt_rcid_to_ch_ctx_get() - lookup a channel by remote id
1147 * @xprt_ctx: Transport to search for a matching channel.
1148 * @rcid: Remote channel identifier corresponding to the desired channel.
1149 *
1150 * If the channel is found, the reference count is incremented to ensure the
1151 * lifetime of the channel context. The caller must call rwref_put() when done.
1152 *
1153 * Return: The channel corresponding to @rcid or NULL if a matching channel
1154 * is not found.
1155 */
1156static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
1157 struct glink_core_xprt_ctx *xprt_ctx,
1158 uint32_t rcid)
1159{
1160 struct channel_ctx *entry;
1161 unsigned long flags;
1162
1163 spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
1164 list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
1165 if (entry->rcid == rcid) {
1166 rwref_get(&entry->ch_state_lhb2);
1167 spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
1168 flags);
1169 return entry;
1170 }
1171 spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
1172
1173 return NULL;
1174}
1175
1176/**
1177 * ch_check_duplicate_riid() - Checks for duplicate riid
1178 * @ctx: Local channel context
1179 * @riid: Remote intent ID
1180 *
1181 * This functions check the riid is present in the remote_rx_list or not
1182 */
1183bool ch_check_duplicate_riid(struct channel_ctx *ctx, int riid)
1184{
1185 struct glink_core_rx_intent *intent;
1186 unsigned long flags;
1187
1188 spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1189 list_for_each_entry(intent, &ctx->rmt_rx_intent_list, list) {
1190 if (riid == intent->id) {
1191 spin_unlock_irqrestore(
1192 &ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1193 return true;
1194 }
1195 }
1196 spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1197 return false;
1198}
1199
1200/**
1201 * ch_pop_remote_rx_intent() - Finds a matching RX intent
1202 * @ctx: Local channel context
1203 * @size: Size of Intent
1204 * @riid_ptr: Pointer to return value of remote intent ID
1205 * @cookie: Transport-specific cookie to return
1206 *
1207 * This functions searches for an RX intent that is >= to the requested size.
1208 */
1209int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
1210 uint32_t *riid_ptr, size_t *intent_size, void **cookie)
1211{
1212 struct glink_core_rx_intent *intent;
1213 struct glink_core_rx_intent *intent_tmp;
1214 struct glink_core_rx_intent *best_intent = NULL;
1215 unsigned long flags;
1216
1217 if (size >= GLINK_MAX_PKT_SIZE) {
1218 GLINK_ERR_CH(ctx, "%s: R[]:%zu Invalid size.\n", __func__,
1219 size);
1220 return -EINVAL;
1221 }
1222
1223 if (riid_ptr == NULL)
1224 return -EINVAL;
1225
1226 *riid_ptr = 0;
1227 spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1228 if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
1229 *riid_ptr = ++ctx->dummy_riid;
1230 spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2,
1231 flags);
1232 return 0;
1233 }
1234 list_for_each_entry_safe(intent, intent_tmp, &ctx->rmt_rx_intent_list,
1235 list) {
1236 if (intent->intent_size >= size) {
1237 if (!best_intent)
1238 best_intent = intent;
1239 else if (best_intent->intent_size > intent->intent_size)
1240 best_intent = intent;
1241 if (best_intent->intent_size == size)
1242 break;
1243 }
1244 }
1245 if (best_intent) {
1246 list_del(&best_intent->list);
1247 GLINK_DBG_CH(ctx,
1248 "%s: R[%u]:%zu Removed remote intent\n",
1249 __func__,
1250 best_intent->id,
1251 best_intent->intent_size);
1252 *riid_ptr = best_intent->id;
1253 *intent_size = best_intent->intent_size;
1254 *cookie = best_intent->cookie;
1255 kfree(best_intent);
1256 spin_unlock_irqrestore(
1257 &ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1258 return 0;
1259 }
1260 spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1261 return -EAGAIN;
1262}
1263
1264/**
1265 * ch_push_remote_rx_intent() - Registers a remote RX intent
1266 * @ctx: Local channel context
1267 * @size: Size of Intent
1268 * @riid: Remote intent ID
1269 * @cookie: Transport-specific cookie to cache
1270 *
1271 * This functions adds a remote RX intent to the remote RX intent list.
1272 */
1273void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
1274 uint32_t riid, void *cookie)
1275{
1276 struct glink_core_rx_intent *intent;
1277 unsigned long flags;
1278 gfp_t gfp_flag;
1279
1280 if (size >= GLINK_MAX_PKT_SIZE) {
1281 GLINK_ERR_CH(ctx, "%s: R[%u]:%zu Invalid size.\n", __func__,
1282 riid, size);
1283 return;
1284 }
1285
1286 if (ch_check_duplicate_riid(ctx, riid)) {
1287 GLINK_ERR_CH(ctx, "%s: R[%d]:%zu Duplicate RIID found\n",
1288 __func__, riid, size);
1289 return;
1290 }
1291
1292 gfp_flag = (ctx->transport_ptr->capabilities & GCAP_AUTO_QUEUE_RX_INT) ?
1293 GFP_ATOMIC : GFP_KERNEL;
1294 intent = kzalloc(sizeof(struct glink_core_rx_intent), gfp_flag);
1295 if (!intent) {
1296 GLINK_ERR_CH(ctx,
1297 "%s: R[%u]:%zu Memory allocation for intent failed\n",
1298 __func__, riid, size);
1299 return;
1300 }
1301 intent->id = riid;
1302 intent->intent_size = size;
1303 intent->cookie = cookie;
1304
1305 spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1306 list_add_tail(&intent->list, &ctx->rmt_rx_intent_list);
1307
1308 complete_all(&ctx->int_req_complete);
1309 if (ctx->notify_remote_rx_intent)
1310 ctx->notify_remote_rx_intent(ctx, ctx->user_priv, size);
1311 spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1312
1313 GLINK_DBG_CH(ctx, "%s: R[%u]:%zu Pushed remote intent\n", __func__,
Dhoat Harpal950c5f22018-02-28 17:03:40 +05301314 riid, size);
Chris Lewfa6135e2016-08-01 13:29:46 -07001315}
1316
1317/**
1318 * ch_push_local_rx_intent() - Create an rx_intent
1319 * @ctx: Local channel context
1320 * @pkt_priv: Opaque private pointer provided by client to be returned later
1321 * @size: Size of intent
1322 *
1323 * This functions creates a local intent and adds it to the local
1324 * intent list.
1325 */
1326struct glink_core_rx_intent *ch_push_local_rx_intent(struct channel_ctx *ctx,
1327 const void *pkt_priv, size_t size)
1328{
1329 struct glink_core_rx_intent *intent;
1330 unsigned long flags;
1331 int ret;
1332
1333 if (size >= GLINK_MAX_PKT_SIZE) {
1334 GLINK_ERR_CH(ctx,
1335 "%s: L[]:%zu Invalid size\n", __func__, size);
1336 return NULL;
1337 }
1338
1339 intent = ch_get_free_local_rx_intent(ctx);
1340 if (!intent) {
1341 if (ctx->max_used_liid >= ctx->transport_ptr->max_iid) {
1342 GLINK_ERR_CH(ctx,
1343 "%s: All intents are in USE max_iid[%d]",
1344 __func__, ctx->transport_ptr->max_iid);
1345 return NULL;
1346 }
1347
1348 intent = kzalloc(sizeof(struct glink_core_rx_intent),
1349 GFP_KERNEL);
1350 if (!intent) {
1351 GLINK_ERR_CH(ctx,
1352 "%s: Memory Allocation for local rx_intent failed",
1353 __func__);
1354 return NULL;
1355 }
1356 intent->id = ++ctx->max_used_liid;
1357 }
1358
1359 /* transport is responsible for allocating/reserving for the intent */
1360 ret = ctx->transport_ptr->ops->allocate_rx_intent(
1361 ctx->transport_ptr->ops, size, intent);
1362 if (ret < 0) {
1363 /* intent data allocation failure */
1364 GLINK_ERR_CH(ctx, "%s: unable to allocate intent sz[%zu] %d",
1365 __func__, size, ret);
1366 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1367 list_add_tail(&intent->list,
1368 &ctx->local_rx_intent_free_list);
1369 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
1370 flags);
1371 return NULL;
1372 }
1373
1374 intent->pkt_priv = pkt_priv;
1375 intent->intent_size = size;
1376 intent->write_offset = 0;
1377 intent->pkt_size = 0;
1378 intent->bounce_buf = NULL;
1379
1380 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1381 list_add_tail(&intent->list, &ctx->local_rx_intent_list);
1382 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1383 GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
1384 intent->id,
1385 intent->intent_size);
1386 return intent;
1387}
1388
1389/**
1390 * ch_remove_local_rx_intent() - Find and remove RX Intent from list
1391 * @ctx: Local channel context
1392 * @liid: Local channel Intent ID
1393 *
1394 * This functions parses the local intent list for a specific channel
1395 * and checks for the intent using the intent ID. If found, the intent
1396 * is deleted from the list.
1397 */
1398void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid)
1399{
1400 struct glink_core_rx_intent *intent, *tmp_intent;
1401 unsigned long flags;
1402
1403 if (ctx->transport_ptr->max_iid < liid) {
1404 GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
1405 liid);
1406 return;
1407 }
1408
1409 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1410 list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
1411 list) {
1412 if (liid == intent->id) {
1413 list_del(&intent->list);
1414 list_add_tail(&intent->list,
1415 &ctx->local_rx_intent_free_list);
1416 spin_unlock_irqrestore(
1417 &ctx->local_rx_intent_lst_lock_lhc1,
1418 flags);
1419 GLINK_DBG_CH(ctx,
1420 "%s: L[%u]:%zu moved intent to Free/unused list\n",
1421 __func__,
1422 intent->id,
1423 intent->intent_size);
1424 return;
1425 }
1426 }
1427 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1428 GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
1429 liid);
1430}
1431
1432/**
1433 * ch_get_dummy_rx_intent() - Get a dummy rx_intent
1434 * @ctx: Local channel context
1435 * @liid: Local channel Intent ID
1436 *
1437 * This functions parses the local intent list for a specific channel and
1438 * returns either a matching intent or allocates a dummy one if no matching
1439 * intents can be found.
1440 *
1441 * Return: Pointer to the intent if intent is found else NULL
1442 */
1443struct glink_core_rx_intent *ch_get_dummy_rx_intent(struct channel_ctx *ctx,
1444 uint32_t liid)
1445{
1446 struct glink_core_rx_intent *intent;
1447 unsigned long flags;
1448
1449 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1450 if (!list_empty(&ctx->local_rx_intent_list)) {
1451 intent = list_first_entry(&ctx->local_rx_intent_list,
1452 struct glink_core_rx_intent, list);
1453 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
1454 flags);
1455 return intent;
1456 }
1457 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1458
1459 intent = ch_get_free_local_rx_intent(ctx);
1460 if (!intent) {
1461 intent = kzalloc(sizeof(struct glink_core_rx_intent),
1462 GFP_ATOMIC);
1463 if (!intent) {
1464 GLINK_ERR_CH(ctx,
1465 "%s: Memory Allocation for local rx_intent failed",
1466 __func__);
1467 return NULL;
1468 }
1469 intent->id = ++ctx->max_used_liid;
1470 }
1471 intent->intent_size = 0;
1472 intent->write_offset = 0;
1473 intent->pkt_size = 0;
1474 intent->bounce_buf = NULL;
1475 intent->pkt_priv = NULL;
1476
1477 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1478 list_add_tail(&intent->list, &ctx->local_rx_intent_list);
1479 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1480 GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
1481 intent->id,
1482 intent->intent_size);
1483 return intent;
1484}
1485
1486/**
1487 * ch_get_local_rx_intent() - Search for an rx_intent
1488 * @ctx: Local channel context
1489 * @liid: Local channel Intent ID
1490 *
1491 * This functions parses the local intent list for a specific channel
1492 * and checks for the intent using the intent ID. If found, pointer to
1493 * the intent is returned.
1494 *
1495 * Return: Pointer to the intent if intent is found else NULL
1496 */
1497struct glink_core_rx_intent *ch_get_local_rx_intent(struct channel_ctx *ctx,
1498 uint32_t liid)
1499{
1500 struct glink_core_rx_intent *intent;
1501 unsigned long flags;
1502
1503 if (ctx->transport_ptr->max_iid < liid) {
1504 GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
1505 liid);
1506 return NULL;
1507 }
1508
1509 if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
1510 return ch_get_dummy_rx_intent(ctx, liid);
1511
1512 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1513 list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
1514 if (liid == intent->id) {
1515 spin_unlock_irqrestore(
1516 &ctx->local_rx_intent_lst_lock_lhc1, flags);
1517 return intent;
1518 }
1519 }
1520 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1521 GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
1522 liid);
1523 return NULL;
1524}
1525
1526/**
1527 * ch_set_local_rx_intent_notified() - Add a rx intent to local intent
1528 * notified list
1529 * @ctx: Local channel context
1530 * @intent_ptr: Pointer to the local intent
1531 *
1532 * This functions parses the local intent list for a specific channel
1533 * and checks for the intent. If found, the function deletes the intent
1534 * from local_rx_intent list and adds it to local_rx_intent_notified list.
1535 */
1536void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
1537 struct glink_core_rx_intent *intent_ptr)
1538{
1539 struct glink_core_rx_intent *tmp_intent, *intent;
1540 unsigned long flags;
1541
1542 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1543 list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
1544 list) {
1545 if (intent == intent_ptr) {
1546 list_del(&intent->list);
1547 list_add_tail(&intent->list,
1548 &ctx->local_rx_intent_ntfy_list);
1549 GLINK_DBG_CH(ctx,
1550 "%s: L[%u]:%zu Moved intent %s",
1551 __func__,
1552 intent_ptr->id,
1553 intent_ptr->intent_size,
1554 "from local to notify list\n");
1555 spin_unlock_irqrestore(
1556 &ctx->local_rx_intent_lst_lock_lhc1,
1557 flags);
1558 return;
1559 }
1560 }
1561 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1562 GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
1563 intent_ptr->id);
1564}
1565
1566/**
1567 * ch_get_local_rx_intent_notified() - Find rx intent in local notified list
1568 * @ctx: Local channel context
1569 * @ptr: Pointer to the rx intent
1570 *
1571 * This functions parses the local intent notify list for a specific channel
1572 * and checks for the intent.
1573 *
1574 * Return: Pointer to the intent if intent is found else NULL.
1575 */
1576struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
1577 struct channel_ctx *ctx, const void *ptr)
1578{
1579 struct glink_core_rx_intent *ptr_intent;
1580 unsigned long flags;
1581
1582 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1583 list_for_each_entry(ptr_intent, &ctx->local_rx_intent_ntfy_list,
1584 list) {
1585 if (ptr_intent->data == ptr || ptr_intent->iovec == ptr ||
1586 ptr_intent->bounce_buf == ptr) {
1587 spin_unlock_irqrestore(
1588 &ctx->local_rx_intent_lst_lock_lhc1,
1589 flags);
1590 return ptr_intent;
1591 }
1592 }
1593 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1594 GLINK_ERR_CH(ctx, "%s: Local intent not found\n", __func__);
1595 return NULL;
1596}
1597
1598/**
1599 * ch_remove_local_rx_intent_notified() - Remove a rx intent in local intent
1600 * notified list
1601 * @ctx: Local channel context
1602 * @ptr: Pointer to the rx intent
1603 * @reuse: Reuse the rx intent
1604 *
1605 * This functions parses the local intent notify list for a specific channel
1606 * and checks for the intent. If found, the function deletes the intent
1607 * from local_rx_intent_notified list and adds it to local_rx_intent_free list.
1608 */
1609void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
1610 struct glink_core_rx_intent *liid_ptr, bool reuse)
1611{
1612 struct glink_core_rx_intent *ptr_intent, *tmp_intent;
1613 unsigned long flags;
1614
1615 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1616 list_for_each_entry_safe(ptr_intent, tmp_intent,
1617 &ctx->local_rx_intent_ntfy_list, list) {
1618 if (ptr_intent == liid_ptr) {
1619 list_del(&ptr_intent->list);
1620 GLINK_DBG_CH(ctx,
1621 "%s: L[%u]:%zu Removed intent from notify list\n",
1622 __func__,
1623 ptr_intent->id,
1624 ptr_intent->intent_size);
1625 kfree(ptr_intent->bounce_buf);
1626 ptr_intent->bounce_buf = NULL;
1627 ptr_intent->write_offset = 0;
1628 ptr_intent->pkt_size = 0;
1629 if (reuse)
1630 list_add_tail(&ptr_intent->list,
1631 &ctx->local_rx_intent_list);
1632 else
1633 list_add_tail(&ptr_intent->list,
1634 &ctx->local_rx_intent_free_list);
1635 spin_unlock_irqrestore(
1636 &ctx->local_rx_intent_lst_lock_lhc1,
1637 flags);
1638 return;
1639 }
1640 }
1641 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1642 GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
1643 liid_ptr->id);
1644}
1645
1646/**
1647 * ch_get_free_local_rx_intent() - Return a rx intent in local intent
1648 * free list
1649 * @ctx: Local channel context
1650 *
1651 * This functions parses the local_rx_intent_free list for a specific channel
1652 * and checks for the free unused intent. If found, the function returns
1653 * the free intent pointer else NULL pointer.
1654 */
1655struct glink_core_rx_intent *ch_get_free_local_rx_intent(
1656 struct channel_ctx *ctx)
1657{
1658 struct glink_core_rx_intent *ptr_intent = NULL;
1659 unsigned long flags;
1660
1661 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1662 if (!list_empty(&ctx->local_rx_intent_free_list)) {
1663 ptr_intent = list_first_entry(&ctx->local_rx_intent_free_list,
1664 struct glink_core_rx_intent,
1665 list);
1666 list_del(&ptr_intent->list);
1667 }
1668 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1669 return ptr_intent;
1670}
1671
1672/**
1673 * ch_purge_intent_lists() - Remove all intents for a channel
1674 *
1675 * @ctx: Local channel context
1676 *
1677 * This functions parses the local intent lists for a specific channel and
1678 * removes and frees all intents.
1679 */
1680void ch_purge_intent_lists(struct channel_ctx *ctx)
1681{
1682 struct glink_core_rx_intent *ptr_intent, *tmp_intent;
1683 struct glink_core_tx_pkt *tx_info, *tx_info_temp;
1684 unsigned long flags;
1685
1686 spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
1687 list_for_each_entry_safe(tx_info, tx_info_temp, &ctx->tx_active,
1688 list_node) {
1689 ctx->notify_tx_abort(ctx, ctx->user_priv,
1690 tx_info->pkt_priv);
1691 rwref_put(&tx_info->pkt_ref);
1692 }
1693 spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
1694
Chris Lew0fa91ef2016-11-14 18:11:50 -08001695 spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
1696 list_for_each_entry_safe(tx_info, tx_info_temp,
1697 &ctx->tx_pending_remote_done, list_done) {
1698 ctx->notify_tx_abort(ctx, ctx->user_priv, tx_info->pkt_priv);
1699 rwref_put(&tx_info->pkt_ref);
1700 }
1701 spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
1702
Chris Lewfa6135e2016-08-01 13:29:46 -07001703 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1704 list_for_each_entry_safe(ptr_intent, tmp_intent,
1705 &ctx->local_rx_intent_list, list) {
1706 ctx->notify_rx_abort(ctx, ctx->user_priv,
1707 ptr_intent->pkt_priv);
Dhoat Harpal41ef6642017-09-23 01:41:24 +05301708 ctx->transport_ptr->ops->deallocate_rx_intent(
1709 ctx->transport_ptr->ops, ptr_intent);
Chris Lewfa6135e2016-08-01 13:29:46 -07001710 list_del(&ptr_intent->list);
1711 kfree(ptr_intent);
1712 }
1713
1714 if (!list_empty(&ctx->local_rx_intent_ntfy_list))
1715 /*
1716 * The client is still processing an rx_notify() call and has
1717 * not yet called glink_rx_done() to return the pointer to us.
1718 * glink_rx_done() will do the appropriate cleanup when this
1719 * call occurs, but log a message here just for internal state
1720 * tracking.
1721 */
1722 GLINK_INFO_CH(ctx, "%s: waiting on glink_rx_done()\n",
1723 __func__);
1724
1725 list_for_each_entry_safe(ptr_intent, tmp_intent,
1726 &ctx->local_rx_intent_free_list, list) {
1727 list_del(&ptr_intent->list);
1728 kfree(ptr_intent);
1729 }
1730 ctx->max_used_liid = 0;
1731 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
1732
1733 spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1734 list_for_each_entry_safe(ptr_intent, tmp_intent,
1735 &ctx->rmt_rx_intent_list, list) {
1736 list_del(&ptr_intent->list);
1737 kfree(ptr_intent);
1738 }
1739 spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
1740}
1741
1742/**
1743 * ch_get_tx_pending_remote_done() - Lookup for a packet that is waiting for
1744 * the remote-done notification.
1745 * @ctx: Pointer to the channel context
1746 * @riid: riid of transmit packet
1747 *
1748 * This function adds a packet to the tx_pending_remote_done list.
1749 *
1750 * The tx_lists_lock_lhc3 lock needs to be held while calling this function.
1751 *
1752 * Return: Pointer to the tx packet
1753 */
1754struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(
1755 struct channel_ctx *ctx, uint32_t riid)
1756{
1757 struct glink_core_tx_pkt *tx_pkt;
1758 unsigned long flags;
1759
1760 if (!ctx) {
1761 GLINK_ERR("%s: Invalid context pointer", __func__);
1762 return NULL;
1763 }
1764
1765 spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
1766 list_for_each_entry(tx_pkt, &ctx->tx_pending_remote_done, list_done) {
1767 if (tx_pkt->riid == riid) {
1768 if (tx_pkt->size_remaining) {
1769 GLINK_ERR_CH(ctx, "%s: R[%u] TX not complete",
1770 __func__, riid);
1771 tx_pkt = NULL;
1772 }
1773 spin_unlock_irqrestore(
1774 &ctx->tx_pending_rmt_done_lock_lhc4, flags);
1775 return tx_pkt;
1776 }
1777 }
1778 spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
1779
1780 GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found.\n",
1781 __func__, riid);
1782 return NULL;
1783}
1784
1785/**
1786 * ch_remove_tx_pending_remote_done() - Removes a packet transmit context for a
1787 * packet that is waiting for the remote-done notification
1788 * @ctx: Pointer to the channel context
1789 * @tx_pkt: Pointer to the transmit packet
1790 *
1791 * This function parses through tx_pending_remote_done and removes a
1792 * packet that matches with the tx_pkt.
1793 */
1794void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
1795 struct glink_core_tx_pkt *tx_pkt)
1796{
1797 struct glink_core_tx_pkt *local_tx_pkt, *tmp_tx_pkt;
1798 unsigned long flags;
1799
1800 if (!ctx || !tx_pkt) {
1801 GLINK_ERR("%s: Invalid input", __func__);
1802 return;
1803 }
1804
1805 spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
1806 list_for_each_entry_safe(local_tx_pkt, tmp_tx_pkt,
1807 &ctx->tx_pending_remote_done, list_done) {
1808 if (tx_pkt == local_tx_pkt) {
1809 list_del_init(&tx_pkt->list_done);
1810 GLINK_DBG_CH(ctx,
1811 "%s: R[%u] Removed Tx packet for intent\n",
1812 __func__,
1813 tx_pkt->riid);
1814 rwref_put(&tx_pkt->pkt_ref);
1815 spin_unlock_irqrestore(
1816 &ctx->tx_pending_rmt_done_lock_lhc4, flags);
1817 return;
1818 }
1819 }
1820 spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
1821
1822 GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found", __func__,
1823 tx_pkt->riid);
1824}
1825
1826/**
1827 * glink_add_free_lcid_list() - adds the lcid of a to be deleted channel to
1828 * available lcid list
1829 * @ctx: Pointer to channel context.
1830 */
1831static void glink_add_free_lcid_list(struct channel_ctx *ctx)
1832{
1833 struct channel_lcid *free_lcid;
1834 unsigned long flags;
1835
1836 free_lcid = kzalloc(sizeof(*free_lcid), GFP_KERNEL);
1837 if (!free_lcid) {
1838 GLINK_ERR(
1839 "%s: allocation failed on xprt:edge [%s:%s] for lcid [%d]\n",
1840 __func__, ctx->transport_ptr->name,
1841 ctx->transport_ptr->edge, ctx->lcid);
1842 return;
1843 }
1844 free_lcid->lcid = ctx->lcid;
1845 spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
1846 list_add_tail(&free_lcid->list_node,
1847 &ctx->transport_ptr->free_lcid_list);
1848 spin_unlock_irqrestore(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
1849 flags);
1850}
1851
1852/**
1853 * glink_ch_ctx_release - Free the channel context
1854 * @ch_st_lock: handle to the rwref_lock associated with the chanel
1855 *
1856 * This should only be called when the reference count associated with the
1857 * channel goes to zero.
1858 */
1859static void glink_ch_ctx_release(struct rwref_lock *ch_st_lock)
1860{
1861 struct channel_ctx *ctx = container_of(ch_st_lock, struct channel_ctx,
1862 ch_state_lhb2);
1863 ctx->transport_ptr = NULL;
1864 kfree(ctx);
1865 GLINK_INFO("%s: freed the channel ctx in pid [%d]\n", __func__,
1866 current->pid);
1867 ctx = NULL;
1868}
1869
1870/**
1871 * ch_name_to_ch_ctx_create() - lookup a channel by name, create the channel if
Dhoat Harpal390dd202017-04-11 12:32:33 +05301872 * it is not found and get reference of context.
Chris Lewfa6135e2016-08-01 13:29:46 -07001873 * @xprt_ctx: Transport to search for a matching channel.
1874 * @name: Name of the desired channel.
Dhoat Harpalae706e12018-01-18 00:29:20 +05301875 * @local: If called from local open or not
Chris Lewfa6135e2016-08-01 13:29:46 -07001876 *
1877 * Return: The channel corresponding to @name, NULL if a matching channel was
1878 * not found AND a new channel could not be created.
1879 */
1880static struct channel_ctx *ch_name_to_ch_ctx_create(
1881 struct glink_core_xprt_ctx *xprt_ctx,
Dhoat Harpalae706e12018-01-18 00:29:20 +05301882 const char *name, bool local)
Chris Lewfa6135e2016-08-01 13:29:46 -07001883{
1884 struct channel_ctx *entry;
1885 struct channel_ctx *ctx;
1886 struct channel_ctx *temp;
1887 unsigned long flags;
1888 struct channel_lcid *flcid;
1889
1890 ctx = kzalloc(sizeof(struct channel_ctx), GFP_KERNEL);
1891 if (!ctx) {
1892 GLINK_ERR_XPRT(xprt_ctx, "%s: Failed to allocated ctx, %s",
1893 "checking if there is one existing\n",
1894 __func__);
1895 goto check_ctx;
1896 }
1897
1898 ctx->local_open_state = GLINK_CHANNEL_CLOSED;
1899 strlcpy(ctx->name, name, GLINK_NAME_SIZE);
1900 rwref_lock_init(&ctx->ch_state_lhb2, glink_ch_ctx_release);
1901 INIT_LIST_HEAD(&ctx->tx_ready_list_node);
1902 init_completion(&ctx->int_req_ack_complete);
1903 init_completion(&ctx->int_req_complete);
1904 INIT_LIST_HEAD(&ctx->local_rx_intent_list);
1905 INIT_LIST_HEAD(&ctx->local_rx_intent_ntfy_list);
1906 INIT_LIST_HEAD(&ctx->local_rx_intent_free_list);
1907 spin_lock_init(&ctx->local_rx_intent_lst_lock_lhc1);
1908 INIT_LIST_HEAD(&ctx->rmt_rx_intent_list);
1909 spin_lock_init(&ctx->rmt_rx_intent_lst_lock_lhc2);
1910 INIT_LIST_HEAD(&ctx->tx_active);
1911 spin_lock_init(&ctx->tx_pending_rmt_done_lock_lhc4);
1912 INIT_LIST_HEAD(&ctx->tx_pending_remote_done);
1913 spin_lock_init(&ctx->tx_lists_lock_lhc3);
1914
1915check_ctx:
1916 rwref_write_get(&xprt_ctx->xprt_state_lhb0);
1917 if (xprt_ctx->local_state != GLINK_XPRT_OPENED) {
1918 kfree(ctx);
1919 rwref_write_put(&xprt_ctx->xprt_state_lhb0);
1920 return NULL;
1921 }
1922 spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
1923 list_for_each_entry_safe(entry, temp, &xprt_ctx->channels,
1924 port_list_node)
1925 if (!strcmp(entry->name, name) && !entry->pending_delete) {
Dhoat Harpalae706e12018-01-18 00:29:20 +05301926 rwref_get(&entry->ch_state_lhb2);
1927 /* port already exists */
1928 if (entry->local_open_state != GLINK_CHANNEL_CLOSED
1929 && local) {
1930 /* not ready to be re-opened */
1931 GLINK_INFO_CH_XPRT(entry, xprt_ctx,
1932 "%s: Ch not ready. State: %u\n",
1933 __func__, entry->local_open_state);
1934 rwref_put(&entry->ch_state_lhb2);
1935 entry = NULL;
1936 } else if (local) {
1937 entry->local_open_state =
1938 GLINK_CHANNEL_OPENING;
1939 }
Chris Lewfa6135e2016-08-01 13:29:46 -07001940 spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
1941 flags);
1942 kfree(ctx);
1943 rwref_write_put(&xprt_ctx->xprt_state_lhb0);
1944 return entry;
1945 }
1946
1947 if (ctx) {
1948 if (list_empty(&xprt_ctx->free_lcid_list)) {
1949 if (xprt_ctx->next_lcid > xprt_ctx->max_cid) {
1950 /* no more channels available */
1951 GLINK_ERR_XPRT(xprt_ctx,
1952 "%s: unable to exceed %u channels\n",
1953 __func__, xprt_ctx->max_cid);
1954 spin_unlock_irqrestore(
1955 &xprt_ctx->xprt_ctx_lock_lhb1,
1956 flags);
1957 kfree(ctx);
1958 rwref_write_put(&xprt_ctx->xprt_state_lhb0);
1959 return NULL;
1960 }
1961 ctx->lcid = xprt_ctx->next_lcid++;
1962 } else {
1963 flcid = list_first_entry(&xprt_ctx->free_lcid_list,
1964 struct channel_lcid, list_node);
1965 ctx->lcid = flcid->lcid;
1966 list_del(&flcid->list_node);
1967 kfree(flcid);
1968 }
1969
Dhoat Harpalf59ff972017-05-12 20:33:21 +05301970 ctx->transport_ptr = xprt_ctx;
Dhoat Harpal76d9ba52017-06-20 21:12:42 +05301971 rwref_get(&ctx->ch_state_lhb2);
Dhoat Harpalae706e12018-01-18 00:29:20 +05301972 if (local)
1973 ctx->local_open_state = GLINK_CHANNEL_OPENING;
Chris Lewfa6135e2016-08-01 13:29:46 -07001974 list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
1975
1976 GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
1977 "%s: local:GLINK_CHANNEL_CLOSED\n",
1978 __func__);
1979 }
1980 spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
1981 rwref_write_put(&xprt_ctx->xprt_state_lhb0);
1982 mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
1983 if (ctx != NULL)
1984 glink_debugfs_add_channel(ctx, xprt_ctx);
1985 mutex_unlock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
1986 return ctx;
1987}
1988
1989/**
1990 * ch_add_rcid() - add a remote channel identifier to an existing channel
1991 * @xprt_ctx: Transport the channel resides on.
1992 * @ctx: Channel receiving the identifier.
1993 * @rcid: The remote channel identifier.
1994 */
1995static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
1996 struct channel_ctx *ctx,
1997 uint32_t rcid)
1998{
1999 ctx->rcid = rcid;
2000}
2001
2002/**
2003 * ch_update_local_state() - Update the local channel state
2004 * @ctx: Pointer to channel context.
2005 * @lstate: Local channel state.
2006 *
2007 * Return: True if the channel is fully closed as a result of this update,
2008 * false otherwise.
2009 */
2010static bool ch_update_local_state(struct channel_ctx *ctx,
2011 enum local_channel_state_e lstate)
2012{
2013 bool is_fully_closed;
2014
2015 rwref_write_get(&ctx->ch_state_lhb2);
2016 ctx->local_open_state = lstate;
2017 is_fully_closed = ch_is_fully_closed(ctx);
2018 rwref_write_put(&ctx->ch_state_lhb2);
2019
2020 return is_fully_closed;
2021}
2022
2023/**
2024 * ch_update_local_state() - Update the local channel state
2025 * @ctx: Pointer to channel context.
2026 * @rstate: Remote Channel state.
2027 *
2028 * Return: True if the channel is fully closed as result of this update,
2029 * false otherwise.
2030 */
2031static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate)
2032{
2033 bool is_fully_closed;
2034
2035 rwref_write_get(&ctx->ch_state_lhb2);
2036 ctx->remote_opened = rstate;
2037 is_fully_closed = ch_is_fully_closed(ctx);
2038 rwref_write_put(&ctx->ch_state_lhb2);
2039
2040 return is_fully_closed;
2041}
2042
2043/*
2044 * ch_is_fully_opened() - Verify if a channel is open
2045 * ctx: Pointer to channel context
2046 *
2047 * Return: True if open, else flase
2048 */
2049static bool ch_is_fully_opened(struct channel_ctx *ctx)
2050{
2051 if (ctx->remote_opened && ctx->local_open_state == GLINK_CHANNEL_OPENED)
2052 return true;
2053
2054 return false;
2055}
2056
2057/*
2058 * ch_is_fully_closed() - Verify if a channel is closed on both sides
2059 * @ctx: Pointer to channel context
2060 * @returns: True if open, else flase
2061 */
2062static bool ch_is_fully_closed(struct channel_ctx *ctx)
2063{
2064 if (!ctx->remote_opened &&
2065 ctx->local_open_state == GLINK_CHANNEL_CLOSED)
2066 return true;
2067
2068 return false;
2069}
2070
2071/**
2072 * find_open_transport() - find a specific open transport
2073 * @edge: Edge the transport is on.
2074 * @name: Name of the transport (or NULL if no preference)
2075 * @initial_xprt: The specified transport is the start for migration
2076 * @best_id: The best transport found for this connection
2077 *
2078 * Find an open transport corresponding to the specified @name and @edge. @edge
2079 * is expected to be valid. @name is expected to be NULL (unspecified) or
2080 * valid. If @name is not specified, then the best transport found on the
2081 * specified edge will be returned.
2082 *
2083 * Return: Transport with the specified name on the specified edge, if open.
2084 * NULL if the transport exists, but is not fully open. ENODEV if no such
2085 * transport exists.
2086 */
2087static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
2088 const char *name,
2089 bool initial_xprt,
2090 uint16_t *best_id)
2091{
2092 struct glink_core_xprt_ctx *xprt = NULL;
2093 struct glink_core_xprt_ctx *best_xprt = NULL;
2094 struct glink_core_xprt_ctx *ret = NULL;
2095 bool first = true;
2096
2097 ret = (struct glink_core_xprt_ctx *)ERR_PTR(-ENODEV);
2098 *best_id = USHRT_MAX;
2099
2100 mutex_lock(&transport_list_lock_lha0);
2101 list_for_each_entry(xprt, &transport_list, list_node) {
2102 if (strcmp(edge, xprt->edge))
2103 continue;
2104 if (first) {
2105 first = false;
2106 ret = NULL;
2107 }
2108 if (!xprt_is_fully_opened(xprt))
2109 continue;
2110
2111 if (xprt->id < *best_id) {
2112 *best_id = xprt->id;
2113 best_xprt = xprt;
2114 }
2115
2116 /*
2117 * Braces are required in this instacne because the else will
2118 * attach to the wrong if otherwise.
2119 */
2120 if (name) {
2121 if (!strcmp(name, xprt->name))
2122 ret = xprt;
2123 } else {
2124 ret = best_xprt;
2125 }
2126 }
2127
2128 mutex_unlock(&transport_list_lock_lha0);
2129
2130 if (IS_ERR_OR_NULL(ret))
2131 return ret;
2132 if (!initial_xprt)
2133 *best_id = ret->id;
2134
2135 return ret;
2136}
2137
2138/**
2139 * xprt_is_fully_opened() - check the open status of a transport
2140 * @xprt: Transport being checked.
2141 *
2142 * Return: True if the transport is fully opened, false otherwise.
2143 */
2144static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt)
2145{
2146 if (xprt->remote_neg_completed &&
2147 xprt->local_state == GLINK_XPRT_OPENED)
2148 return true;
2149
2150 return false;
2151}
2152
2153/**
2154 * glink_dummy_notify_rx_intent_req() - Dummy RX Request
2155 *
2156 * @handle: Channel handle (ignored)
2157 * @priv: Private data pointer (ignored)
2158 * @req_size: Requested size (ignored)
2159 *
2160 * Dummy RX intent request if client does not implement the optional callback
2161 * function.
2162 *
2163 * Return: False
2164 */
2165static bool glink_dummy_notify_rx_intent_req(void *handle, const void *priv,
2166 size_t req_size)
2167{
2168 return false;
2169}
2170
2171/**
2172 * glink_dummy_notify_rx_sigs() - Dummy signal callback
2173 *
2174 * @handle: Channel handle (ignored)
2175 * @priv: Private data pointer (ignored)
2176 * @req_size: Requested size (ignored)
2177 *
2178 * Dummy signal callback if client does not implement the optional callback
2179 * function.
2180 *
2181 * Return: False
2182 */
2183static void glink_dummy_notify_rx_sigs(void *handle, const void *priv,
2184 uint32_t old_sigs, uint32_t new_sigs)
2185{
2186 /* intentionally left blank */
2187}
2188
2189/**
2190 * glink_dummy_rx_abort() - Dummy rx abort callback
2191 *
2192 * handle: Channel handle (ignored)
2193 * priv: Private data pointer (ignored)
2194 * pkt_priv: Private intent data pointer (ignored)
2195 *
2196 * Dummy rx abort callback if client does not implement the optional callback
2197 * function.
2198 */
2199static void glink_dummy_notify_rx_abort(void *handle, const void *priv,
2200 const void *pkt_priv)
2201{
2202 /* intentionally left blank */
2203}
2204
2205/**
2206 * glink_dummy_tx_abort() - Dummy tx abort callback
2207 *
2208 * @handle: Channel handle (ignored)
2209 * @priv: Private data pointer (ignored)
2210 * @pkt_priv: Private intent data pointer (ignored)
2211 *
2212 * Dummy tx abort callback if client does not implement the optional callback
2213 * function.
2214 */
2215static void glink_dummy_notify_tx_abort(void *handle, const void *priv,
2216 const void *pkt_priv)
2217{
2218 /* intentionally left blank */
2219}
2220
2221/**
2222 * dummy_poll() - a dummy poll() for transports that don't define one
2223 * @if_ptr: The transport interface handle for this transport.
2224 * @lcid: The channel to poll.
2225 *
2226 * Return: An error to indicate that this operation is unsupported.
2227 */
2228static int dummy_poll(struct glink_transport_if *if_ptr, uint32_t lcid)
2229{
2230 return -EOPNOTSUPP;
2231}
2232
2233/**
2234 * dummy_reuse_rx_intent() - a dummy reuse_rx_intent() for transports that
2235 * don't define one
2236 * @if_ptr: The transport interface handle for this transport.
2237 * @intent: The intent to reuse.
2238 *
2239 * Return: Success.
2240 */
2241static int dummy_reuse_rx_intent(struct glink_transport_if *if_ptr,
2242 struct glink_core_rx_intent *intent)
2243{
2244 return 0;
2245}
2246
2247/**
2248 * dummy_mask_rx_irq() - a dummy mask_rx_irq() for transports that don't define
2249 * one
2250 * @if_ptr: The transport interface handle for this transport.
2251 * @lcid: The local channel id for this channel.
2252 * @mask: True to mask the irq, false to unmask.
2253 * @pstruct: Platform defined structure with data necessary for masking.
2254 *
2255 * Return: An error to indicate that this operation is unsupported.
2256 */
2257static int dummy_mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
2258 bool mask, void *pstruct)
2259{
2260 return -EOPNOTSUPP;
2261}
2262
2263/**
2264 * dummy_wait_link_down() - a dummy wait_link_down() for transports that don't
2265 * define one
2266 * @if_ptr: The transport interface handle for this transport.
2267 *
2268 * Return: An error to indicate that this operation is unsupported.
2269 */
2270static int dummy_wait_link_down(struct glink_transport_if *if_ptr)
2271{
2272 return -EOPNOTSUPP;
2273}
2274
2275/**
2276 * dummy_allocate_rx_intent() - a dummy RX intent allocation function that does
2277 * not allocate anything
2278 * @if_ptr: The transport the intent is associated with.
2279 * @size: Size of intent.
2280 * @intent: Pointer to the intent structure.
2281 *
2282 * Return: Success.
2283 */
2284static int dummy_allocate_rx_intent(struct glink_transport_if *if_ptr,
2285 size_t size, struct glink_core_rx_intent *intent)
2286{
2287 return 0;
2288}
2289
2290/**
2291 * dummy_tx_cmd_tracer_pkt() - a dummy tracer packet tx cmd for transports
2292 * that don't define one
2293 * @if_ptr: The transport interface handle for this transport.
2294 * @lcid: The channel in which the tracer packet is transmitted.
2295 * @pctx: Context of the packet to be transmitted.
2296 *
2297 * Return: 0.
2298 */
2299static int dummy_tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr,
2300 uint32_t lcid, struct glink_core_tx_pkt *pctx)
2301{
2302 pctx->size_remaining = 0;
2303 return 0;
2304}
2305
2306/**
2307 * dummy_deallocate_rx_intent() - a dummy rx intent deallocation function that
2308 * does not deallocate anything
2309 * @if_ptr: The transport the intent is associated with.
2310 * @intent: Pointer to the intent structure.
2311 *
2312 * Return: Success.
2313 */
2314static int dummy_deallocate_rx_intent(struct glink_transport_if *if_ptr,
2315 struct glink_core_rx_intent *intent)
2316{
2317 return 0;
2318}
2319
2320/**
2321 * dummy_tx_cmd_local_rx_intent() - dummy local rx intent request
2322 * @if_ptr: The transport to transmit on.
2323 * @lcid: The local channel id to encode.
2324 * @size: The intent size to encode.
2325 * @liid: The local intent id to encode.
2326 *
2327 * Return: Success.
2328 */
2329static int dummy_tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
2330 uint32_t lcid, size_t size, uint32_t liid)
2331{
2332 return 0;
2333}
2334
2335/**
2336 * dummy_tx_cmd_local_rx_done() - dummy rx done command
2337 * @if_ptr: The transport to transmit on.
2338 * @lcid: The local channel id to encode.
2339 * @liid: The local intent id to encode.
2340 * @reuse: Reuse the consumed intent.
2341 */
2342static void dummy_tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
2343 uint32_t lcid, uint32_t liid, bool reuse)
2344{
2345 /* intentionally left blank */
2346}
2347
2348/**
2349 * dummy_tx() - dummy tx() that does not send anything
2350 * @if_ptr: The transport to transmit on.
2351 * @lcid: The local channel id to encode.
2352 * @pctx: The data to encode.
2353 *
2354 * Return: Number of bytes written i.e. zero.
2355 */
2356static int dummy_tx(struct glink_transport_if *if_ptr, uint32_t lcid,
2357 struct glink_core_tx_pkt *pctx)
2358{
2359 return 0;
2360}
2361
2362/**
2363 * dummy_tx_cmd_rx_intent_req() - dummy rx intent request functon
2364 * @if_ptr: The transport to transmit on.
2365 * @lcid: The local channel id to encode.
2366 * @size: The requested intent size to encode.
2367 *
2368 * Return: Success.
2369 */
2370static int dummy_tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
2371 uint32_t lcid, size_t size)
2372{
2373 return 0;
2374}
2375
2376/**
2377 * dummy_tx_cmd_rx_intent_req_ack() - dummy rx intent request ack
2378 * @if_ptr: The transport to transmit on.
2379 * @lcid: The local channel id to encode.
2380 * @granted: The request response to encode.
2381 *
2382 * Return: Success.
2383 */
2384static int dummy_tx_cmd_remote_rx_intent_req_ack(
2385 struct glink_transport_if *if_ptr,
2386 uint32_t lcid, bool granted)
2387{
2388 return 0;
2389}
2390
2391/**
2392 * dummy_tx_cmd_set_sigs() - dummy signals ack transmit function
2393 * @if_ptr: The transport to transmit on.
2394 * @lcid: The local channel id to encode.
2395 * @sigs: The signals to encode.
2396 *
2397 * Return: Success.
2398 */
2399static int dummy_tx_cmd_set_sigs(struct glink_transport_if *if_ptr,
2400 uint32_t lcid, uint32_t sigs)
2401{
2402 return 0;
2403}
2404
2405/**
2406 * dummy_tx_cmd_ch_close() - dummy channel close transmit function
2407 * @if_ptr: The transport to transmit on.
2408 * @lcid: The local channel id to encode.
2409 *
2410 * Return: Success.
2411 */
2412static int dummy_tx_cmd_ch_close(struct glink_transport_if *if_ptr,
2413 uint32_t lcid)
2414{
2415 return 0;
2416}
2417
2418/**
2419 * dummy_tx_cmd_ch_remote_close_ack() - dummy channel close ack sending function
2420 * @if_ptr: The transport to transmit on.
2421 * @rcid: The remote channel id to encode.
2422 */
2423static void dummy_tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
2424 uint32_t rcid)
2425{
2426 /* intentionally left blank */
2427}
2428
2429/**
Dhoat Harpal8e06fcc2017-08-18 16:06:48 +05302430 * dummy_tx_cmd_ch_open() - dummy channel open cmd sending function
2431 * @if_ptr: The transport to transmit on.
2432 * @lcid: The local channel id to encode.
2433 * @name: The channel name to encode.
2434 * @req_xprt: The transport the core would like to migrate this channel to.
2435 *
2436 * Return: 0 on success or standard Linux error code.
2437 */
2438static int dummy_tx_cmd_ch_open(struct glink_transport_if *if_ptr,
2439 uint32_t lcid, const char *name,
2440 uint16_t req_xprt)
2441{
2442 return -EOPNOTSUPP;
2443}
2444
2445/**
2446 * dummy_tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire
2447 * format and transmit
2448 * @if_ptr: The transport to transmit on.
2449 * @rcid: The remote channel id to encode.
2450 * @xprt_resp: The response to a transport migration request.
2451 */
2452static void dummy_tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
2453 uint32_t rcid, uint16_t xprt_resp)
2454{
2455 /* intentionally left blank */
2456}
2457
2458/**
Chris Lewfa6135e2016-08-01 13:29:46 -07002459 * dummy_get_power_vote_ramp_time() - Dummy Power vote ramp time
2460 * @if_ptr: The transport to transmit on.
2461 * @state: The power state being requested from the transport.
2462 */
2463static unsigned long dummy_get_power_vote_ramp_time(
2464 struct glink_transport_if *if_ptr, uint32_t state)
2465{
2466 return (unsigned long)-EOPNOTSUPP;
2467}
2468
2469/**
2470 * dummy_power_vote() - Dummy Power vote operation
2471 * @if_ptr: The transport to transmit on.
2472 * @state: The power state being requested from the transport.
2473 */
2474static int dummy_power_vote(struct glink_transport_if *if_ptr,
2475 uint32_t state)
2476{
2477 return -EOPNOTSUPP;
2478}
2479
2480/**
2481 * dummy_power_unvote() - Dummy Power unvote operation
2482 * @if_ptr: The transport to transmit on.
2483 */
2484static int dummy_power_unvote(struct glink_transport_if *if_ptr)
2485{
2486 return -EOPNOTSUPP;
2487}
2488
2489/**
Chris Lewa9a78ae2017-05-11 16:47:37 -07002490 * dummy_rx_rt_vote() - Dummy RX Realtime thread vote
2491 * @if_ptr: The transport to transmit on.
2492
2493 */
2494static int dummy_rx_rt_vote(struct glink_transport_if *if_ptr)
2495{
2496 return -EOPNOTSUPP;
2497}
2498
2499/**
2500 * dummy_rx_rt_unvote() - Dummy RX Realtime thread unvote
2501 * @if_ptr: The transport to transmit on.
2502 */
2503static int dummy_rx_rt_unvote(struct glink_transport_if *if_ptr)
2504{
2505 return -EOPNOTSUPP;
2506}
2507
2508/**
Chris Lewfa6135e2016-08-01 13:29:46 -07002509 * notif_if_up_all_xprts() - Check and notify existing transport state if up
2510 * @notif_info: Data structure containing transport information to be notified.
2511 *
2512 * This function is called when the client registers a notifier to know about
2513 * the state of a transport. This function matches the existing transports with
2514 * the transport in the "notif_info" parameter. When a matching transport is
2515 * found, the callback function in the "notif_info" parameter is called with
2516 * the state of the matching transport.
2517 *
2518 * If an edge or transport is not defined, then all edges and/or transports
2519 * will be matched and will receive up notifications.
2520 */
2521static void notif_if_up_all_xprts(
2522 struct link_state_notifier_info *notif_info)
2523{
2524 struct glink_core_xprt_ctx *xprt_ptr;
2525 struct glink_link_state_cb_info cb_info;
2526
2527 cb_info.link_state = GLINK_LINK_STATE_UP;
2528 mutex_lock(&transport_list_lock_lha0);
2529 list_for_each_entry(xprt_ptr, &transport_list, list_node) {
2530 if (strlen(notif_info->edge) &&
2531 strcmp(notif_info->edge, xprt_ptr->edge))
2532 continue;
2533
2534 if (strlen(notif_info->transport) &&
2535 strcmp(notif_info->transport, xprt_ptr->name))
2536 continue;
2537
2538 if (!xprt_is_fully_opened(xprt_ptr))
2539 continue;
2540
2541 cb_info.transport = xprt_ptr->name;
2542 cb_info.edge = xprt_ptr->edge;
2543 notif_info->glink_link_state_notif_cb(&cb_info,
2544 notif_info->priv);
2545 }
2546 mutex_unlock(&transport_list_lock_lha0);
2547}
2548
2549/**
2550 * check_link_notifier_and_notify() - Check and notify clients about link state
2551 * @xprt_ptr: Transport whose state to be notified.
2552 * @link_state: State of the transport to be notified.
2553 *
2554 * This function is called when the state of the transport changes. This
2555 * function matches the transport with the clients that have registered to
2556 * be notified about the state changes. When a matching client notifier is
2557 * found, the callback function in the client notifier is called with the
2558 * new state of the transport.
2559 */
2560static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
2561 enum glink_link_state link_state)
2562{
2563 struct link_state_notifier_info *notif_info;
2564 struct glink_link_state_cb_info cb_info;
2565
2566 cb_info.link_state = link_state;
2567 mutex_lock(&link_state_notifier_lock_lha1);
2568 list_for_each_entry(notif_info, &link_state_notifier_list, list) {
2569 if (strlen(notif_info->edge) &&
2570 strcmp(notif_info->edge, xprt_ptr->edge))
2571 continue;
2572
2573 if (strlen(notif_info->transport) &&
2574 strcmp(notif_info->transport, xprt_ptr->name))
2575 continue;
2576
2577 cb_info.transport = xprt_ptr->name;
2578 cb_info.edge = xprt_ptr->edge;
2579 notif_info->glink_link_state_notif_cb(&cb_info,
2580 notif_info->priv);
2581 }
2582 mutex_unlock(&link_state_notifier_lock_lha1);
2583}
2584
2585/**
2586 * Open GLINK channel.
2587 *
2588 * @cfg_ptr: Open configuration structure (the structure is copied before
2589 * glink_open returns). All unused fields should be zero-filled.
2590 *
2591 * This should not be called from link state callback context by clients.
2592 * It is recommended that client should invoke this function from their own
2593 * thread.
2594 *
2595 * Return: Pointer to channel on success, PTR_ERR() with standard Linux
2596 * error code on failure.
2597 */
2598void *glink_open(const struct glink_open_config *cfg)
2599{
2600 struct channel_ctx *ctx = NULL;
2601 struct glink_core_xprt_ctx *transport_ptr;
2602 size_t len;
2603 int ret;
2604 uint16_t best_id;
2605
2606 if (!cfg->edge || !cfg->name) {
2607 GLINK_ERR("%s: !cfg->edge || !cfg->name\n", __func__);
2608 return ERR_PTR(-EINVAL);
2609 }
2610
2611 len = strlen(cfg->edge);
2612 if (len == 0 || len >= GLINK_NAME_SIZE) {
2613 GLINK_ERR("%s: [EDGE] len == 0 || len >= GLINK_NAME_SIZE\n",
2614 __func__);
2615 return ERR_PTR(-EINVAL);
2616 }
2617
2618 len = strlen(cfg->name);
2619 if (len == 0 || len >= GLINK_NAME_SIZE) {
2620 GLINK_ERR("%s: [NAME] len == 0 || len >= GLINK_NAME_SIZE\n",
2621 __func__);
2622 return ERR_PTR(-EINVAL);
2623 }
2624
2625 if (cfg->transport) {
2626 len = strlen(cfg->transport);
2627 if (len == 0 || len >= GLINK_NAME_SIZE) {
2628 GLINK_ERR("%s: [TRANSPORT] len == 0 || %s\n",
2629 __func__,
2630 "len >= GLINK_NAME_SIZE");
2631 return ERR_PTR(-EINVAL);
2632 }
2633 }
2634
2635 /* confirm required notification parameters */
2636 if (!(cfg->notify_rx || cfg->notify_rxv) || !cfg->notify_tx_done
2637 || !cfg->notify_state
2638 || ((cfg->options & GLINK_OPT_RX_INTENT_NOTIF)
2639 && !cfg->notify_remote_rx_intent)) {
2640 GLINK_ERR("%s: Incorrect notification parameters\n", __func__);
2641 return ERR_PTR(-EINVAL);
2642 }
2643
2644 /* find transport */
2645 transport_ptr = find_open_transport(cfg->edge, cfg->transport,
2646 cfg->options & GLINK_OPT_INITIAL_XPORT,
2647 &best_id);
2648 if (IS_ERR_OR_NULL(transport_ptr)) {
2649 GLINK_ERR("%s:%s %s: Error %d - unable to find transport\n",
2650 cfg->transport, cfg->edge, __func__,
2651 (unsigned int)PTR_ERR(transport_ptr));
2652 return ERR_PTR(-ENODEV);
2653 }
2654
2655 /*
2656 * look for an existing port structure which can occur in
2657 * reopen and remote-open-first cases
2658 */
Dhoat Harpalae706e12018-01-18 00:29:20 +05302659 ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name, true);
Chris Lewfa6135e2016-08-01 13:29:46 -07002660 if (ctx == NULL) {
2661 GLINK_ERR("%s:%s %s: Error - unable to allocate new channel\n",
2662 cfg->transport, cfg->edge, __func__);
2663 return ERR_PTR(-ENOMEM);
2664 }
2665
Chris Lewfa6135e2016-08-01 13:29:46 -07002666 /* initialize port structure */
2667 ctx->user_priv = cfg->priv;
2668 ctx->rx_intent_req_timeout_jiffies =
2669 msecs_to_jiffies(cfg->rx_intent_req_timeout_ms);
2670 ctx->notify_rx = cfg->notify_rx;
2671 ctx->notify_tx_done = cfg->notify_tx_done;
2672 ctx->notify_state = cfg->notify_state;
2673 ctx->notify_rx_intent_req = cfg->notify_rx_intent_req;
2674 ctx->notify_rxv = cfg->notify_rxv;
2675 ctx->notify_rx_sigs = cfg->notify_rx_sigs;
2676 ctx->notify_rx_abort = cfg->notify_rx_abort;
2677 ctx->notify_tx_abort = cfg->notify_tx_abort;
2678 ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
2679 ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
2680
2681 if (!ctx->notify_rx_intent_req)
2682 ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
2683 if (!ctx->notify_rx_sigs)
2684 ctx->notify_rx_sigs = glink_dummy_notify_rx_sigs;
2685 if (!ctx->notify_rx_abort)
2686 ctx->notify_rx_abort = glink_dummy_notify_rx_abort;
2687 if (!ctx->notify_tx_abort)
2688 ctx->notify_tx_abort = glink_dummy_notify_tx_abort;
2689
2690 if (!ctx->rx_intent_req_timeout_jiffies)
2691 ctx->rx_intent_req_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
2692
2693 ctx->local_xprt_req = best_id;
2694 ctx->no_migrate = cfg->transport &&
2695 !(cfg->options & GLINK_OPT_INITIAL_XPORT);
Chris Lewfa6135e2016-08-01 13:29:46 -07002696 GLINK_INFO_PERF_CH(ctx,
2697 "%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
2698 __func__);
2699
2700 /* start local-open sequence */
2701 ret = ctx->transport_ptr->ops->tx_cmd_ch_open(ctx->transport_ptr->ops,
2702 ctx->lcid, cfg->name, best_id);
2703 if (ret) {
2704 /* failure to send open command (transport failure) */
2705 ctx->local_open_state = GLINK_CHANNEL_CLOSED;
2706 GLINK_ERR_CH(ctx, "%s: Unable to send open command %d\n",
2707 __func__, ret);
Dhoat Harpal390dd202017-04-11 12:32:33 +05302708 rwref_put(&ctx->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07002709 return ERR_PTR(ret);
2710 }
2711
2712 GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
2713 __func__, ctx);
Dhoat Harpal390dd202017-04-11 12:32:33 +05302714 rwref_put(&ctx->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07002715 return ctx;
2716}
2717EXPORT_SYMBOL(glink_open);
2718
2719/**
2720 * glink_get_channel_id_for_handle() - Get logical channel ID
2721 *
2722 * @handle: handle of channel
2723 *
2724 * Used internally by G-Link debugfs.
2725 *
2726 * Return: Logical Channel ID or standard Linux error code
2727 */
2728int glink_get_channel_id_for_handle(void *handle)
2729{
2730 struct channel_ctx *ctx = (struct channel_ctx *)handle;
2731
2732 if (ctx == NULL)
2733 return -EINVAL;
2734
2735 return ctx->lcid;
2736}
2737EXPORT_SYMBOL(glink_get_channel_id_for_handle);
2738
2739/**
2740 * glink_get_channel_name_for_handle() - return channel name
2741 *
2742 * @handle: handle of channel
2743 *
2744 * Used internally by G-Link debugfs.
2745 *
2746 * Return: Channel name or NULL
2747 */
2748char *glink_get_channel_name_for_handle(void *handle)
2749{
2750 struct channel_ctx *ctx = (struct channel_ctx *)handle;
2751
2752 if (ctx == NULL)
2753 return NULL;
2754
2755 return ctx->name;
2756}
2757EXPORT_SYMBOL(glink_get_channel_name_for_handle);
2758
2759/**
2760 * glink_delete_ch_from_list() - delete the channel from the list
2761 * @ctx: Pointer to channel context.
2762 * @add_flcid: Boolean value to decide whether the lcid should be added or not.
2763 *
2764 * This function deletes the channel from the list along with the debugfs
2765 * information associated with it. It also adds the channel lcid to the free
2766 * lcid list except if the channel is deleted in case of ssr/unregister case.
2767 * It can only called when channel is fully closed.
2768 *
2769 * Return: true when transport_ptr->channels is empty.
2770 */
2771static bool glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid)
2772{
2773 unsigned long flags;
2774 bool ret = false;
2775
2776 spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
2777 flags);
2778 if (!list_empty(&ctx->port_list_node))
2779 list_del_init(&ctx->port_list_node);
2780 if (list_empty(&ctx->transport_ptr->channels) &&
2781 list_empty(&ctx->transport_ptr->notified))
2782 ret = true;
2783 spin_unlock_irqrestore(
2784 &ctx->transport_ptr->xprt_ctx_lock_lhb1,
2785 flags);
2786 if (add_flcid)
2787 glink_add_free_lcid_list(ctx);
2788 mutex_lock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
2789 glink_debugfs_remove_channel(ctx, ctx->transport_ptr);
2790 mutex_unlock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
2791 rwref_put(&ctx->ch_state_lhb2);
2792 return ret;
2793}
2794
2795/**
2796 * glink_close() - Close a previously opened channel.
2797 *
2798 * @handle: handle to close
2799 *
2800 * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED
2801 * state event will be sent and the channel can be reopened.
2802 *
2803 * Return: 0 on success; -EINVAL for invalid handle, -EBUSY is close is
2804 * already in progress, standard Linux Error code otherwise.
2805 */
2806int glink_close(void *handle)
2807{
2808 struct glink_core_xprt_ctx *xprt_ctx = NULL;
2809 struct channel_ctx *ctx = (struct channel_ctx *)handle;
2810 int ret = 0;
2811 unsigned long flags;
2812 bool is_empty = false;
2813
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05302814 ret = glink_get_ch_ctx(ctx);
2815 if (ret)
2816 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07002817
2818 GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05302819 if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
Dhoat Harpal8c838482017-06-21 21:33:45 +05302820 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07002821 return 0;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05302822 }
Chris Lewfa6135e2016-08-01 13:29:46 -07002823
2824 if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
2825 /* close already pending */
Dhoat Harpal8c838482017-06-21 21:33:45 +05302826 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07002827 return -EBUSY;
2828 }
2829
2830 rwref_get(&ctx->ch_state_lhb2);
2831relock: xprt_ctx = ctx->transport_ptr;
2832 rwref_read_get(&xprt_ctx->xprt_state_lhb0);
2833 rwref_write_get(&ctx->ch_state_lhb2);
2834 if (xprt_ctx != ctx->transport_ptr) {
2835 rwref_write_put(&ctx->ch_state_lhb2);
2836 rwref_read_put(&xprt_ctx->xprt_state_lhb0);
2837 goto relock;
2838 }
2839
2840 /* Set the channel state before removing it from xprt's list(s) */
2841 GLINK_INFO_PERF_CH(ctx,
2842 "%s: local:%u->GLINK_CHANNEL_CLOSING\n",
2843 __func__, ctx->local_open_state);
2844 ctx->local_open_state = GLINK_CHANNEL_CLOSING;
2845
2846 ctx->pending_delete = true;
2847 ctx->int_req_ack = false;
2848
2849 spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb3, flags);
2850 if (!list_empty(&ctx->tx_ready_list_node))
2851 list_del_init(&ctx->tx_ready_list_node);
2852 spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb3, flags);
2853
2854 if (xprt_ctx->local_state != GLINK_XPRT_DOWN) {
2855 glink_qos_reset_priority(ctx);
2856 ret = xprt_ctx->ops->tx_cmd_ch_close(xprt_ctx->ops, ctx->lcid);
2857 rwref_write_put(&ctx->ch_state_lhb2);
2858 } else if (!strcmp(xprt_ctx->name, "dummy")) {
2859 /*
2860 * This check will avoid any race condition when clients call
2861 * glink_close before the dummy xprt swapping happens in link
2862 * down scenario.
2863 */
2864 ret = 0;
2865 rwref_write_put(&ctx->ch_state_lhb2);
2866 glink_core_ch_close_ack_common(ctx, false);
2867 if (ch_is_fully_closed(ctx)) {
2868 is_empty = glink_delete_ch_from_list(ctx, false);
2869 rwref_put(&xprt_ctx->xprt_state_lhb0);
2870 if (is_empty && !xprt_ctx->dummy_in_use)
2871 /* For the xprt reference */
2872 rwref_put(&xprt_ctx->xprt_state_lhb0);
2873 } else {
2874 GLINK_ERR_CH(ctx,
2875 "channel Not closed yet local state [%d] remote_state [%d]\n",
2876 ctx->local_open_state, ctx->remote_opened);
2877 }
2878 } else {
2879 /*
2880 * This case handles the scenario where glink_core_link_down
2881 * changes the local_state to GLINK_XPRT_DOWN but glink_close
2882 * gets the channel write lock before glink_core_channel_cleanup
2883 */
2884 rwref_write_put(&ctx->ch_state_lhb2);
2885 }
2886 complete_all(&ctx->int_req_ack_complete);
2887 complete_all(&ctx->int_req_complete);
2888
2889 rwref_put(&ctx->ch_state_lhb2);
2890 rwref_read_put(&xprt_ctx->xprt_state_lhb0);
Dhoat Harpal8c838482017-06-21 21:33:45 +05302891 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07002892 return ret;
2893}
2894EXPORT_SYMBOL(glink_close);
2895
2896/**
2897 * glink_tx_pkt_release() - Release a packet's transmit information
2898 * @tx_pkt_ref: Packet information which needs to be released.
2899 *
2900 * This function is called when all the references to a packet information
2901 * is dropped.
2902 */
2903static void glink_tx_pkt_release(struct rwref_lock *tx_pkt_ref)
2904{
2905 struct glink_core_tx_pkt *tx_info = container_of(tx_pkt_ref,
2906 struct glink_core_tx_pkt,
2907 pkt_ref);
2908 if (!list_empty(&tx_info->list_done))
2909 list_del_init(&tx_info->list_done);
2910 if (!list_empty(&tx_info->list_node))
2911 list_del_init(&tx_info->list_node);
2912 kfree(tx_info);
2913}
2914
2915/**
2916 * glink_tx_common() - Common TX implementation
2917 *
2918 * @handle: handle returned by glink_open()
2919 * @pkt_priv: opaque data value that will be returned to client with
2920 * notify_tx_done notification
2921 * @data: pointer to the data
2922 * @size: size of data
2923 * @vbuf_provider: Virtual Address-space Buffer Provider for the tx buffer.
2924 * @vbuf_provider: Physical Address-space Buffer Provider for the tx buffer.
2925 * @tx_flags: Flags to indicate transmit options
2926 *
2927 * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
2928 * transmit operation (not fully opened); -EAGAIN if remote side
2929 * has not provided a receive intent that is big enough.
2930 */
2931static int glink_tx_common(void *handle, void *pkt_priv,
2932 void *data, void *iovec, size_t size,
2933 void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
2934 void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
2935 uint32_t tx_flags)
2936{
2937 struct channel_ctx *ctx = (struct channel_ctx *)handle;
2938 uint32_t riid;
2939 int ret = 0;
Dhoat Harpal960b3b82017-05-12 21:31:47 +05302940 struct glink_core_tx_pkt *tx_info = NULL;
Chris Lewfa6135e2016-08-01 13:29:46 -07002941 size_t intent_size;
2942 bool is_atomic =
2943 tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
Dhoat Harpal021061e2018-02-19 20:07:12 +05302944 char glink_name[GLINK_CH_XPRT_NAME_SIZE];
Chris Lewfa6135e2016-08-01 13:29:46 -07002945 unsigned long flags;
2946 void *cookie = NULL;
2947
2948 if (!size)
2949 return -EINVAL;
2950
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05302951 ret = glink_get_ch_ctx(ctx);
2952 if (ret)
2953 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07002954
2955 rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
Dhoat Harpal960b3b82017-05-12 21:31:47 +05302956 tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
2957 is_atomic ? GFP_ATOMIC : GFP_KERNEL);
2958 if (!tx_info) {
2959 GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
2960 ret = -ENOMEM;
2961 goto glink_tx_common_err;
2962 }
Chris Lewfa6135e2016-08-01 13:29:46 -07002963 if (!(vbuf_provider || pbuf_provider)) {
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05302964 ret = -EINVAL;
2965 goto glink_tx_common_err;
Chris Lewfa6135e2016-08-01 13:29:46 -07002966 }
2967
2968 if (!ch_is_fully_opened(ctx)) {
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05302969 ret = -EBUSY;
2970 goto glink_tx_common_err;
Chris Lewfa6135e2016-08-01 13:29:46 -07002971 }
2972
2973 if (size > GLINK_MAX_PKT_SIZE) {
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05302974 ret = -EINVAL;
2975 goto glink_tx_common_err;
Chris Lewfa6135e2016-08-01 13:29:46 -07002976 }
2977
2978 if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
2979 if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05302980 ret = -EOPNOTSUPP;
2981 goto glink_tx_common_err;
Chris Lewfa6135e2016-08-01 13:29:46 -07002982 }
2983 tracer_pkt_log_event(data, GLINK_CORE_TX);
2984 }
2985
Dhoat Harpal021061e2018-02-19 20:07:12 +05302986 scnprintf(glink_name, GLINK_CH_XPRT_NAME_SIZE, "%s_%s_%s", ctx->name,
2987 ctx->transport_ptr->edge, ctx->transport_ptr->name);
Chris Lewfa6135e2016-08-01 13:29:46 -07002988 /* find matching rx intent (first-fit algorithm for now) */
2989 if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size, &cookie)) {
2990 if (!(tx_flags & GLINK_TX_REQ_INTENT)) {
2991 /* no rx intent available */
Dhoat Harpal021061e2018-02-19 20:07:12 +05302992 GLINK_ERR(
2993 "%s: %s: R[%u]:%zu Intent not present\n",
2994 glink_name, __func__, riid, size);
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05302995 ret = -EAGAIN;
2996 goto glink_tx_common_err;
Chris Lewfa6135e2016-08-01 13:29:46 -07002997 }
2998 if (is_atomic && !(ctx->transport_ptr->capabilities &
2999 GCAP_AUTO_QUEUE_RX_INT)) {
Dhoat Harpal021061e2018-02-19 20:07:12 +05303000 GLINK_ERR("%s: %s: %s\n", glink_name, __func__,
3001 "Cannot request intent in atomic context");
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303002 ret = -EINVAL;
3003 goto glink_tx_common_err;
Chris Lewfa6135e2016-08-01 13:29:46 -07003004 }
3005
3006 /* request intent of correct size */
3007 reinit_completion(&ctx->int_req_ack_complete);
3008 ret = ctx->transport_ptr->ops->tx_cmd_rx_intent_req(
3009 ctx->transport_ptr->ops, ctx->lcid, size);
3010 if (ret) {
Dhoat Harpal021061e2018-02-19 20:07:12 +05303011 GLINK_ERR("%s: %s: Request intent failed %d\n",
3012 glink_name, __func__, ret);
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303013 goto glink_tx_common_err;
Chris Lewfa6135e2016-08-01 13:29:46 -07003014 }
3015
3016 while (ch_pop_remote_rx_intent(ctx, size, &riid,
3017 &intent_size, &cookie)) {
Chris Lewfa6135e2016-08-01 13:29:46 -07003018 rwref_read_put(&ctx->ch_state_lhb2);
3019 if (is_atomic) {
Dhoat Harpal021061e2018-02-19 20:07:12 +05303020 GLINK_ERR("%s: %s: Intent of size %zu %s\n",
3021 glink_name, __func__, size,
3022 "not ready");
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303023 ret = -EAGAIN;
3024 goto glink_tx_common_err_2;
Chris Lewfa6135e2016-08-01 13:29:46 -07003025 }
3026
3027 if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN
3028 || !ch_is_fully_opened(ctx)) {
Dhoat Harpal021061e2018-02-19 20:07:12 +05303029 GLINK_ERR("%s: %s: %s %s\n", glink_name,
3030 __func__, "Channel closed while",
3031 "waiting for intent");
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303032 ret = -EBUSY;
3033 goto glink_tx_common_err_2;
Chris Lewfa6135e2016-08-01 13:29:46 -07003034 }
3035
3036 /* wait for the remote intent req ack */
3037 if (!wait_for_completion_timeout(
3038 &ctx->int_req_ack_complete,
3039 ctx->rx_intent_req_timeout_jiffies)) {
Dhoat Harpal907e0cf2017-12-28 16:03:16 +05303040 GLINK_ERR(
Dhoat Harpal021061e2018-02-19 20:07:12 +05303041 "%s: %s: %s %zu not granted for lcid\n",
3042 glink_name, __func__,
3043 "Intent request ack with size:", size);
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303044 ret = -ETIMEDOUT;
3045 goto glink_tx_common_err_2;
Chris Lewfa6135e2016-08-01 13:29:46 -07003046 }
3047
3048 if (!ctx->int_req_ack) {
Dhoat Harpal021061e2018-02-19 20:07:12 +05303049 GLINK_ERR("%s: %s: %s %zu %s\n", glink_name,
3050 __func__, "Intent Request with size:",
3051 size, "not granted for lcid");
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303052 ret = -EAGAIN;
3053 goto glink_tx_common_err_2;
Chris Lewfa6135e2016-08-01 13:29:46 -07003054 }
3055
3056 /* wait for the rx_intent from remote side */
3057 if (!wait_for_completion_timeout(
3058 &ctx->int_req_complete,
3059 ctx->rx_intent_req_timeout_jiffies)) {
Dhoat Harpal021061e2018-02-19 20:07:12 +05303060 GLINK_ERR("%s: %s: %s %zu %s\n", glink_name,
3061 __func__, "Intent request with size: ",
3062 size, "not granted for lcid");
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303063 ret = -ETIMEDOUT;
3064 goto glink_tx_common_err_2;
Chris Lewfa6135e2016-08-01 13:29:46 -07003065 }
3066
3067 reinit_completion(&ctx->int_req_complete);
3068 rwref_read_get(&ctx->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07003069 }
3070 }
3071
3072 if (!is_atomic) {
3073 spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3,
3074 flags);
3075 glink_pm_qos_vote(ctx->transport_ptr);
3076 spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3,
3077 flags);
3078 }
3079
3080 GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n",
3081 __func__, riid, intent_size,
3082 data ? data : iovec, size, current->pid);
Dhoat Harpal960b3b82017-05-12 21:31:47 +05303083
Chris Lewfa6135e2016-08-01 13:29:46 -07003084 rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
3085 INIT_LIST_HEAD(&tx_info->list_done);
3086 INIT_LIST_HEAD(&tx_info->list_node);
3087 tx_info->pkt_priv = pkt_priv;
3088 tx_info->data = data;
3089 tx_info->riid = riid;
3090 tx_info->rcid = ctx->rcid;
3091 tx_info->size = size;
3092 tx_info->size_remaining = size;
3093 tx_info->tracer_pkt = tx_flags & GLINK_TX_TRACER_PKT ? true : false;
3094 tx_info->iovec = iovec ? iovec : (void *)tx_info;
3095 tx_info->vprovider = vbuf_provider;
3096 tx_info->pprovider = pbuf_provider;
3097 tx_info->intent_size = intent_size;
3098 tx_info->cookie = cookie;
3099
3100 /* schedule packet for transmit */
3101 if ((tx_flags & GLINK_TX_SINGLE_THREADED) &&
3102 (ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
3103 ret = xprt_single_threaded_tx(ctx->transport_ptr,
3104 ctx, tx_info);
3105 else
3106 xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
3107
Dhoat Harpal960b3b82017-05-12 21:31:47 +05303108 rwref_read_put(&ctx->ch_state_lhb2);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303109 glink_put_ch_ctx(ctx);
Dhoat Harpal960b3b82017-05-12 21:31:47 +05303110 return ret;
3111
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303112glink_tx_common_err:
Chris Lewfa6135e2016-08-01 13:29:46 -07003113 rwref_read_put(&ctx->ch_state_lhb2);
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303114glink_tx_common_err_2:
Dhoat Harpal8c838482017-06-21 21:33:45 +05303115 glink_put_ch_ctx(ctx);
Dhoat Harpal960b3b82017-05-12 21:31:47 +05303116 kfree(tx_info);
Chris Lewfa6135e2016-08-01 13:29:46 -07003117 return ret;
3118}
3119
3120/**
3121 * glink_tx() - Transmit packet.
3122 *
3123 * @handle: handle returned by glink_open()
3124 * @pkt_priv: opaque data value that will be returned to client with
3125 * notify_tx_done notification
3126 * @data: pointer to the data
3127 * @size: size of data
3128 * @tx_flags: Flags to specify transmit specific options
3129 *
3130 * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
3131 * transmit operation (not fully opened); -EAGAIN if remote side
3132 * has not provided a receive intent that is big enough.
3133 */
3134int glink_tx(void *handle, void *pkt_priv, void *data, size_t size,
3135 uint32_t tx_flags)
3136{
3137 return glink_tx_common(handle, pkt_priv, data, NULL, size,
3138 tx_linear_vbuf_provider, NULL, tx_flags);
3139}
3140EXPORT_SYMBOL(glink_tx);
3141
3142/**
3143 * glink_queue_rx_intent() - Register an intent to receive data.
3144 *
3145 * @handle: handle returned by glink_open()
3146 * @pkt_priv: opaque data type that is returned when a packet is received
3147 * size: maximum size of data to receive
3148 *
3149 * Return: 0 for success; standard Linux error code for failure case
3150 */
3151int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
3152{
3153 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3154 struct glink_core_rx_intent *intent_ptr;
3155 int ret = 0;
3156
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303157 ret = glink_get_ch_ctx(ctx);
3158 if (ret)
3159 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003160
3161 if (!ch_is_fully_opened(ctx)) {
3162 /* Can only queue rx intents if channel is fully opened */
3163 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3164 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303165 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003166 return -EBUSY;
3167 }
3168
3169 intent_ptr = ch_push_local_rx_intent(ctx, pkt_priv, size);
3170 if (!intent_ptr) {
3171 GLINK_ERR_CH(ctx,
3172 "%s: Intent pointer allocation failed size[%zu]\n",
3173 __func__, size);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303174 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003175 return -ENOMEM;
3176 }
3177 GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
3178 intent_ptr->intent_size);
3179
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303180 if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
Dhoat Harpal8c838482017-06-21 21:33:45 +05303181 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003182 return ret;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303183 }
Chris Lewfa6135e2016-08-01 13:29:46 -07003184
3185 /* notify remote side of rx intent */
3186 ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
3187 ctx->transport_ptr->ops, ctx->lcid, size, intent_ptr->id);
3188 if (ret)
3189 /* unable to transmit, dequeue intent */
3190 ch_remove_local_rx_intent(ctx, intent_ptr->id);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303191 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003192 return ret;
3193}
3194EXPORT_SYMBOL(glink_queue_rx_intent);
3195
3196/**
3197 * glink_rx_intent_exists() - Check if an intent exists.
3198 *
3199 * @handle: handle returned by glink_open()
3200 * @size: size of an intent to check or 0 for any intent
3201 *
3202 * Return: TRUE if an intent exists with greater than or equal to the size
3203 * else FALSE
3204 */
3205bool glink_rx_intent_exists(void *handle, size_t size)
3206{
3207 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3208 struct glink_core_rx_intent *intent;
3209 unsigned long flags;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303210 int ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003211
3212 if (!ctx || !ch_is_fully_opened(ctx))
3213 return false;
3214
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303215 ret = glink_get_ch_ctx(ctx);
3216 if (ret)
3217 return false;
Chris Lewfa6135e2016-08-01 13:29:46 -07003218 spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
3219 list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
3220 if (size <= intent->intent_size) {
3221 spin_unlock_irqrestore(
3222 &ctx->local_rx_intent_lst_lock_lhc1, flags);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303223 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003224 return true;
3225 }
3226 }
3227 spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303228 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003229 return false;
3230}
3231EXPORT_SYMBOL(glink_rx_intent_exists);
3232
3233/**
3234 * glink_rx_done() - Return receive buffer to remote side.
3235 *
3236 * @handle: handle returned by glink_open()
3237 * @ptr: data pointer provided in the notify_rx() call
3238 * @reuse: if true, receive intent is re-used
3239 *
3240 * Return: 0 for success; standard Linux error code for failure case
3241 */
3242int glink_rx_done(void *handle, const void *ptr, bool reuse)
3243{
3244 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3245 struct glink_core_rx_intent *liid_ptr;
3246 uint32_t id;
3247 int ret = 0;
3248
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303249 ret = glink_get_ch_ctx(ctx);
3250 if (ret)
3251 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003252 liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
3253
3254 if (IS_ERR_OR_NULL(liid_ptr)) {
3255 /* invalid pointer */
3256 GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303257 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003258 return -EINVAL;
3259 }
3260
3261 GLINK_INFO_PERF_CH(ctx, "%s: L[%u]: data[%p]. TID %u\n",
3262 __func__, liid_ptr->id, ptr, current->pid);
3263 id = liid_ptr->id;
3264 if (reuse) {
3265 ret = ctx->transport_ptr->ops->reuse_rx_intent(
3266 ctx->transport_ptr->ops, liid_ptr);
3267 if (ret) {
3268 GLINK_ERR_CH(ctx, "%s: Intent reuse err %d for %p\n",
3269 __func__, ret, ptr);
3270 ret = -ENOBUFS;
3271 reuse = false;
3272 ctx->transport_ptr->ops->deallocate_rx_intent(
3273 ctx->transport_ptr->ops, liid_ptr);
3274 }
3275 } else {
3276 ctx->transport_ptr->ops->deallocate_rx_intent(
3277 ctx->transport_ptr->ops, liid_ptr);
3278 }
3279 ch_remove_local_rx_intent_notified(ctx, liid_ptr, reuse);
3280 /* send rx done */
3281 ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
3282 ctx->lcid, id, reuse);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303283 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003284 return ret;
3285}
3286EXPORT_SYMBOL(glink_rx_done);
3287
3288/**
3289 * glink_txv() - Transmit a packet in vector form.
3290 *
3291 * @handle: handle returned by glink_open()
3292 * @pkt_priv: opaque data value that will be returned to client with
3293 * notify_tx_done notification
3294 * @iovec: pointer to the vector (must remain valid until notify_tx_done
3295 * notification)
3296 * @size: size of data/vector
3297 * @vbuf_provider: Client provided helper function to iterate the vector
3298 * in physical address space
3299 * @pbuf_provider: Client provided helper function to iterate the vector
3300 * in virtual address space
3301 * @tx_flags: Flags to specify transmit specific options
3302 *
3303 * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
3304 * transmit operation (not fully opened); -EAGAIN if remote side has
3305 * not provided a receive intent that is big enough.
3306 */
3307int glink_txv(void *handle, void *pkt_priv,
3308 void *iovec, size_t size,
3309 void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
3310 void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
3311 uint32_t tx_flags)
3312{
3313 return glink_tx_common(handle, pkt_priv, NULL, iovec, size,
3314 vbuf_provider, pbuf_provider, tx_flags);
3315}
3316EXPORT_SYMBOL(glink_txv);
3317
3318/**
3319 * glink_sigs_set() - Set the local signals for the GLINK channel
3320 *
3321 * @handle: handle returned by glink_open()
3322 * @sigs: modified signal value
3323 *
3324 * Return: 0 for success; standard Linux error code for failure case
3325 */
3326int glink_sigs_set(void *handle, uint32_t sigs)
3327{
3328 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3329 int ret;
3330
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303331 ret = glink_get_ch_ctx(ctx);
3332 if (ret)
3333 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003334 if (!ch_is_fully_opened(ctx)) {
3335 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3336 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303337 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003338 return -EBUSY;
3339 }
3340
3341 ctx->lsigs = sigs;
3342
3343 ret = ctx->transport_ptr->ops->tx_cmd_set_sigs(ctx->transport_ptr->ops,
3344 ctx->lcid, ctx->lsigs);
3345 GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
3346
Dhoat Harpal8c838482017-06-21 21:33:45 +05303347 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003348 return ret;
3349}
3350EXPORT_SYMBOL(glink_sigs_set);
3351
3352/**
3353 * glink_sigs_local_get() - Get the local signals for the GLINK channel
3354 *
3355 * handle: handle returned by glink_open()
3356 * sigs: Pointer to hold the signals
3357 *
3358 * Return: 0 for success; standard Linux error code for failure case
3359 */
3360int glink_sigs_local_get(void *handle, uint32_t *sigs)
3361{
3362 struct channel_ctx *ctx = (struct channel_ctx *)handle;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303363 int ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003364
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303365 if (!sigs)
Chris Lewfa6135e2016-08-01 13:29:46 -07003366 return -EINVAL;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303367 ret = glink_get_ch_ctx(ctx);
3368 if (ret)
3369 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003370 if (!ch_is_fully_opened(ctx)) {
3371 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3372 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303373 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003374 return -EBUSY;
3375 }
3376
3377 *sigs = ctx->lsigs;
Dhoat Harpal8c838482017-06-21 21:33:45 +05303378 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003379 return 0;
3380}
3381EXPORT_SYMBOL(glink_sigs_local_get);
3382
3383/**
3384 * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel
3385 *
3386 * handle: handle returned by glink_open()
3387 * sigs: Pointer to hold the signals
3388 *
3389 * Return: 0 for success; standard Linux error code for failure case
3390 */
3391int glink_sigs_remote_get(void *handle, uint32_t *sigs)
3392{
3393 struct channel_ctx *ctx = (struct channel_ctx *)handle;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303394 int ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003395
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303396 if (!sigs)
Chris Lewfa6135e2016-08-01 13:29:46 -07003397 return -EINVAL;
3398
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303399 ret = glink_get_ch_ctx(ctx);
3400 if (ret)
3401 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003402 if (!ch_is_fully_opened(ctx)) {
3403 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3404 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303405 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003406 return -EBUSY;
3407 }
3408
3409 *sigs = ctx->rsigs;
Dhoat Harpal8c838482017-06-21 21:33:45 +05303410 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003411 return 0;
3412}
3413EXPORT_SYMBOL(glink_sigs_remote_get);
3414
3415/**
3416 * glink_register_link_state_cb() - Register for link state notification
3417 * @link_info: Data structure containing the link identification and callback.
3418 * @priv: Private information to be passed with the callback.
3419 *
3420 * This function is used to register a notifier to receive the updates about a
3421 * link's/transport's state. This notifier needs to be registered first before
3422 * an attempt to open a channel.
3423 *
3424 * Return: a reference to the notifier handle.
3425 */
3426void *glink_register_link_state_cb(struct glink_link_info *link_info,
3427 void *priv)
3428{
3429 struct link_state_notifier_info *notif_info;
3430
3431 if (!link_info || !link_info->glink_link_state_notif_cb)
3432 return ERR_PTR(-EINVAL);
3433
3434 notif_info = kzalloc(sizeof(*notif_info), GFP_KERNEL);
3435 if (!notif_info) {
3436 GLINK_ERR("%s: Error allocating link state notifier info\n",
3437 __func__);
3438 return ERR_PTR(-ENOMEM);
3439 }
3440 if (link_info->transport)
3441 strlcpy(notif_info->transport, link_info->transport,
3442 GLINK_NAME_SIZE);
3443
3444 if (link_info->edge)
3445 strlcpy(notif_info->edge, link_info->edge, GLINK_NAME_SIZE);
3446 notif_info->priv = priv;
3447 notif_info->glink_link_state_notif_cb =
3448 link_info->glink_link_state_notif_cb;
3449
3450 mutex_lock(&link_state_notifier_lock_lha1);
3451 list_add_tail(&notif_info->list, &link_state_notifier_list);
3452 mutex_unlock(&link_state_notifier_lock_lha1);
3453
3454 notif_if_up_all_xprts(notif_info);
3455 return notif_info;
3456}
3457EXPORT_SYMBOL(glink_register_link_state_cb);
3458
3459/**
3460 * glink_unregister_link_state_cb() - Unregister the link state notification
3461 * notif_handle: Handle to be unregistered.
3462 *
3463 * This function is used to unregister a notifier to stop receiving the updates
3464 * about a link's/ transport's state.
3465 */
3466void glink_unregister_link_state_cb(void *notif_handle)
3467{
3468 struct link_state_notifier_info *notif_info, *tmp_notif_info;
3469
3470 if (IS_ERR_OR_NULL(notif_handle))
3471 return;
3472
3473 mutex_lock(&link_state_notifier_lock_lha1);
3474 list_for_each_entry_safe(notif_info, tmp_notif_info,
3475 &link_state_notifier_list, list) {
3476 if (notif_info == notif_handle) {
3477 list_del(&notif_info->list);
3478 mutex_unlock(&link_state_notifier_lock_lha1);
3479 kfree(notif_info);
3480 return;
3481 }
3482 }
3483 mutex_unlock(&link_state_notifier_lock_lha1);
3484}
3485EXPORT_SYMBOL(glink_unregister_link_state_cb);
3486
3487/**
3488 * glink_qos_latency() - Register the latency QoS requirement
3489 * @handle: Channel handle in which the latency is required.
3490 * @latency_us: Latency requirement in units of micro-seconds.
3491 * @pkt_size: Worst case packet size for which the latency is required.
3492 *
3493 * This function is used to register the latency requirement for a channel
3494 * and ensures that the latency requirement for this channel is met without
3495 * impacting the existing latency requirements of other channels.
3496 *
3497 * Return: 0 if QoS request is achievable, standard Linux error codes on error
3498 */
3499int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
3500{
3501 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3502 int ret;
3503 unsigned long req_rate_kBps;
3504
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303505 if (!latency_us || !pkt_size)
Chris Lewfa6135e2016-08-01 13:29:46 -07003506 return -EINVAL;
3507
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303508 ret = glink_get_ch_ctx(ctx);
3509 if (ret)
3510 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003511 if (!ch_is_fully_opened(ctx)) {
3512 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3513 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303514 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003515 return -EBUSY;
3516 }
3517
3518 req_rate_kBps = glink_qos_calc_rate_kBps(pkt_size, latency_us);
3519
3520 ret = glink_qos_assign_priority(ctx, req_rate_kBps);
3521 if (ret < 0)
3522 GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
3523 __func__, latency_us, pkt_size);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303524 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003525 return ret;
3526}
3527EXPORT_SYMBOL(glink_qos_latency);
3528
3529/**
3530 * glink_qos_cancel() - Cancel or unregister the QoS request
3531 * @handle: Channel handle for which the QoS request is cancelled.
3532 *
3533 * This function is used to cancel/unregister the QoS requests for a channel.
3534 *
3535 * Return: 0 on success, standard Linux error codes on failure
3536 */
3537int glink_qos_cancel(void *handle)
3538{
3539 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3540 int ret;
3541
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303542 ret = glink_get_ch_ctx(ctx);
3543 if (ret)
3544 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003545 if (!ch_is_fully_opened(ctx)) {
3546 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3547 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303548 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003549 return -EBUSY;
3550 }
3551
3552 ret = glink_qos_reset_priority(ctx);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303553 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003554 return ret;
3555}
3556EXPORT_SYMBOL(glink_qos_cancel);
3557
3558/**
3559 * glink_qos_start() - Start of the transmission requiring QoS
3560 * @handle: Channel handle in which the transmit activity is performed.
3561 *
3562 * This function is called by the clients to indicate G-Link regarding the
3563 * start of the transmission which requires a certain QoS. The clients
3564 * must account for the QoS ramp time to ensure meeting the QoS.
3565 *
3566 * Return: 0 on success, standard Linux error codes on failure
3567 */
3568int glink_qos_start(void *handle)
3569{
3570 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3571 int ret;
3572 unsigned long flags;
3573
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303574 ret = glink_get_ch_ctx(ctx);
3575 if (ret)
3576 return ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003577 if (!ch_is_fully_opened(ctx)) {
3578 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3579 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303580 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003581 return -EBUSY;
3582 }
3583
3584 spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
3585 spin_lock(&ctx->tx_lists_lock_lhc3);
3586 ret = glink_qos_add_ch_tx_intent(ctx);
3587 spin_unlock(&ctx->tx_lists_lock_lhc3);
3588 spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303589 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003590 return ret;
3591}
3592EXPORT_SYMBOL(glink_qos_start);
3593
3594/**
3595 * glink_qos_get_ramp_time() - Get the QoS ramp time
3596 * @handle: Channel handle for which the QoS ramp time is required.
3597 * @pkt_size: Worst case packet size.
3598 *
3599 * This function is called by the clients to obtain the ramp time required
3600 * to meet the QoS requirements.
3601 *
3602 * Return: QoS ramp time is returned in units of micro-seconds on success,
3603 * standard Linux error codes cast to unsigned long on error.
3604 */
3605unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
3606{
3607 struct channel_ctx *ctx = (struct channel_ctx *)handle;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303608 int ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003609
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303610 ret = glink_get_ch_ctx(ctx);
3611 if (ret)
3612 return (unsigned long)ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003613
3614 if (!ch_is_fully_opened(ctx)) {
3615 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3616 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303617 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003618 return (unsigned long)-EBUSY;
3619 }
3620
Dhoat Harpal8c838482017-06-21 21:33:45 +05303621 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003622 return ctx->transport_ptr->ops->get_power_vote_ramp_time(
3623 ctx->transport_ptr->ops,
3624 glink_prio_to_power_state(ctx->transport_ptr,
3625 ctx->initial_priority));
3626}
3627EXPORT_SYMBOL(glink_qos_get_ramp_time);
3628
Chris Lewa9a78ae2017-05-11 16:47:37 -07003629
3630/**
3631 * glink_start_rx_rt() - Vote for RT thread priority on RX.
3632 * @handle: Channel handle for which transaction are occurring.
3633 *
3634 * Return: 0 on success, standard Linux error codes on failure
3635 */
3636int glink_start_rx_rt(void *handle)
3637{
3638 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3639 int ret;
3640
3641 ret = glink_get_ch_ctx(ctx);
3642 if (ret)
3643 return ret;
3644 if (!ch_is_fully_opened(ctx)) {
3645 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3646 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303647 glink_put_ch_ctx(ctx);
Chris Lewa9a78ae2017-05-11 16:47:37 -07003648 return -EBUSY;
3649 }
3650 ret = ctx->transport_ptr->ops->rx_rt_vote(ctx->transport_ptr->ops);
3651 ctx->rt_vote_on++;
3652 GLINK_INFO_CH(ctx, "%s: Voting RX Realtime Thread %d", __func__, ret);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303653 glink_put_ch_ctx(ctx);
Chris Lewa9a78ae2017-05-11 16:47:37 -07003654 return ret;
3655}
Rohit kumar85d9e712017-12-14 19:06:40 +05303656EXPORT_SYMBOL(glink_start_rx_rt);
Chris Lewa9a78ae2017-05-11 16:47:37 -07003657
3658/**
3659 * glink_end_rx_rt() - Vote for RT thread priority on RX.
3660 * @handle: Channel handle for which transaction are occurring.
3661 *
3662 * Return: 0 on success, standard Linux error codes on failure
3663 */
3664int glink_end_rx_rt(void *handle)
3665{
3666 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3667 int ret;
3668
3669 ret = glink_get_ch_ctx(ctx);
3670 if (ret)
3671 return ret;
3672 if (!ch_is_fully_opened(ctx)) {
3673 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
3674 __func__);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303675 glink_put_ch_ctx(ctx);
Chris Lewa9a78ae2017-05-11 16:47:37 -07003676 return -EBUSY;
3677 }
3678 ret = ctx->transport_ptr->ops->rx_rt_unvote(ctx->transport_ptr->ops);
3679 ctx->rt_vote_off++;
3680 GLINK_INFO_CH(ctx, "%s: Unvoting RX Realtime Thread %d", __func__, ret);
Dhoat Harpal8c838482017-06-21 21:33:45 +05303681 glink_put_ch_ctx(ctx);
Chris Lewa9a78ae2017-05-11 16:47:37 -07003682 return ret;
3683}
Rohit kumar85d9e712017-12-14 19:06:40 +05303684EXPORT_SYMBOL(glink_end_rx_rt);
Chris Lewa9a78ae2017-05-11 16:47:37 -07003685
Chris Lewfa6135e2016-08-01 13:29:46 -07003686/**
3687 * glink_rpm_rx_poll() - Poll and receive any available events
3688 * @handle: Channel handle in which this operation is performed.
3689 *
3690 * This function is used to poll and receive events and packets while the
3691 * receive interrupt from RPM is disabled.
3692 *
3693 * Note that even if a return value > 0 is returned indicating that some events
3694 * were processed, clients should only use the notification functions passed
3695 * into glink_open() to determine if an entire packet has been received since
3696 * some events may be internal details that are not visible to clients.
3697 *
3698 * Return: 0 for no packets available; > 0 for events available; standard
3699 * Linux error codes on failure.
3700 */
3701int glink_rpm_rx_poll(void *handle)
3702{
3703 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3704
3705 if (!ctx)
3706 return -EINVAL;
3707
3708 if (!ch_is_fully_opened(ctx))
3709 return -EBUSY;
3710
3711 if (!ctx->transport_ptr ||
3712 !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
3713 return -EOPNOTSUPP;
3714
3715 return ctx->transport_ptr->ops->poll(ctx->transport_ptr->ops,
3716 ctx->lcid);
3717}
3718EXPORT_SYMBOL(glink_rpm_rx_poll);
3719
3720/**
3721 * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt
3722 * @handle: Channel handle in which this operation is performed.
3723 * @mask: Flag to mask or unmask the interrupt.
3724 * @pstruct: Pointer to any platform specific data.
3725 *
3726 * This function is used to mask or unmask the receive interrupt from RPM.
3727 * "mask" set to true indicates masking the interrupt and when set to false
3728 * indicates unmasking the interrupt.
3729 *
3730 * Return: 0 on success, standard Linux error codes on failure.
3731 */
3732int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct)
3733{
3734 struct channel_ctx *ctx = (struct channel_ctx *)handle;
3735
3736 if (!ctx)
3737 return -EINVAL;
3738
3739 if (!ch_is_fully_opened(ctx))
3740 return -EBUSY;
3741
3742 if (!ctx->transport_ptr ||
3743 !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
3744 return -EOPNOTSUPP;
3745
3746 return ctx->transport_ptr->ops->mask_rx_irq(ctx->transport_ptr->ops,
3747 ctx->lcid, mask, pstruct);
3748
3749}
3750EXPORT_SYMBOL(glink_rpm_mask_rx_interrupt);
3751
3752/**
3753 * glink_wait_link_down() - Get status of link
3754 * @handle: Channel handle in which this operation is performed
3755 *
3756 * This function will query the transport for its status, to allow clients to
3757 * proceed in cleanup operations.
3758 */
3759int glink_wait_link_down(void *handle)
3760{
3761 struct channel_ctx *ctx = (struct channel_ctx *)handle;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303762 int ret;
Chris Lewfa6135e2016-08-01 13:29:46 -07003763
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303764 ret = glink_get_ch_ctx(ctx);
3765 if (ret)
3766 return ret;
3767 if (!ctx->transport_ptr) {
Dhoat Harpal8c838482017-06-21 21:33:45 +05303768 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003769 return -EOPNOTSUPP;
Dhoat Harpal9b9a85f2016-08-10 19:38:17 +05303770 }
Dhoat Harpal8c838482017-06-21 21:33:45 +05303771 glink_put_ch_ctx(ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07003772 return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
3773}
3774EXPORT_SYMBOL(glink_wait_link_down);
3775
3776/**
3777 * glink_xprt_ctx_release - Free the transport context
3778 * @ch_st_lock: handle to the rwref_lock associated with the transport
3779 *
3780 * This should only be called when the reference count associated with the
3781 * transport goes to zero.
3782 */
3783void glink_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
3784{
3785 struct glink_dbgfs xprt_rm_dbgfs;
3786 struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
3787 struct glink_core_xprt_ctx, xprt_state_lhb0);
3788 GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
3789 xprt_ctx->name,
3790 xprt_ctx->edge);
3791 xprt_rm_dbgfs.curr_name = xprt_ctx->name;
3792 xprt_rm_dbgfs.par_name = "xprt";
3793 glink_debugfs_remove_recur(&xprt_rm_dbgfs);
3794 GLINK_INFO("%s: xprt debugfs removec\n", __func__);
3795 rwref_put(&xprt_ctx->edge_ctx->edge_ref_lock_lhd1);
3796 kthread_stop(xprt_ctx->tx_task);
3797 xprt_ctx->tx_task = NULL;
3798 glink_core_deinit_xprt_qos_cfg(xprt_ctx);
3799 kfree(xprt_ctx);
3800 xprt_ctx = NULL;
3801}
3802
3803/**
3804 * glink_dummy_xprt_ctx_release - free the dummy transport context
3805 * @xprt_st_lock: Handle to the rwref_lock associated with the transport.
3806 *
3807 * The release function is called when all the channels on this dummy
3808 * transport are closed and the reference count goes to zero.
3809 */
3810static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
3811{
3812 struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
3813 struct glink_core_xprt_ctx, xprt_state_lhb0);
3814 GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
3815 xprt_ctx->name,
3816 xprt_ctx->edge);
Dhoat Harpalced1f062017-09-23 01:46:02 +05303817 kfree(xprt_ctx->ops);
3818 xprt_ctx->ops = NULL;
Chris Lewfa6135e2016-08-01 13:29:46 -07003819 kfree(xprt_ctx);
3820}
3821
3822/**
3823 * glink_xprt_name_to_id() - convert transport name to id
3824 * @name: Name of the transport.
3825 * @id: Assigned id.
3826 *
3827 * Return: 0 on success or standard Linux error code.
3828 */
3829int glink_xprt_name_to_id(const char *name, uint16_t *id)
3830{
Ramesh Yadav Javadi43ff8772018-04-06 13:13:53 +05303831 if (!strcmp(name, "bgcom")) {
3832 *id = SPIV2_XPRT_ID;
3833 return 0;
3834 }
Chris Lewfa6135e2016-08-01 13:29:46 -07003835 if (!strcmp(name, "smem")) {
3836 *id = SMEM_XPRT_ID;
3837 return 0;
3838 }
3839 if (!strcmp(name, "mailbox")) {
3840 *id = SMEM_XPRT_ID;
3841 return 0;
3842 }
3843 if (!strcmp(name, "spi")) {
3844 *id = SPIV2_XPRT_ID;
3845 return 0;
3846 }
3847 if (!strcmp(name, "smd_trans")) {
3848 *id = SMD_TRANS_XPRT_ID;
3849 return 0;
3850 }
3851 if (!strcmp(name, "lloop")) {
3852 *id = LLOOP_XPRT_ID;
3853 return 0;
3854 }
3855 if (!strcmp(name, "mock")) {
3856 *id = MOCK_XPRT_ID;
3857 return 0;
3858 }
3859 if (!strcmp(name, "mock_low")) {
3860 *id = MOCK_XPRT_LOW_ID;
3861 return 0;
3862 }
3863 if (!strcmp(name, "mock_high")) {
3864 *id = MOCK_XPRT_HIGH_ID;
3865 return 0;
3866 }
3867 return -ENODEV;
3868}
3869EXPORT_SYMBOL(glink_xprt_name_to_id);
3870
3871/**
3872 * of_get_glink_core_qos_cfg() - Parse the qos related dt entries
3873 * @phandle: The handle to the qos related node in DT.
3874 * @cfg: The transport configuration to be filled.
3875 *
3876 * Return: 0 on Success, standard Linux error otherwise.
3877 */
3878int of_get_glink_core_qos_cfg(struct device_node *phandle,
3879 struct glink_core_transport_cfg *cfg)
3880{
3881 int rc, i;
3882 char *key;
3883 uint32_t num_flows;
3884 uint32_t *arr32;
3885
3886 if (!phandle) {
3887 GLINK_ERR("%s: phandle is NULL\n", __func__);
3888 return -EINVAL;
3889 }
3890
3891 key = "qcom,mtu-size";
3892 rc = of_property_read_u32(phandle, key, (uint32_t *)&cfg->mtu);
3893 if (rc) {
3894 GLINK_ERR("%s: missing key %s\n", __func__, key);
3895 return -ENODEV;
3896 }
3897
3898 key = "qcom,tput-stats-cycle";
3899 rc = of_property_read_u32(phandle, key, &cfg->token_count);
3900 if (rc) {
3901 GLINK_ERR("%s: missing key %s\n", __func__, key);
3902 rc = -ENODEV;
3903 goto error;
3904 }
3905
3906 key = "qcom,flow-info";
3907 if (!of_find_property(phandle, key, &num_flows)) {
3908 GLINK_ERR("%s: missing key %s\n", __func__, key);
3909 rc = -ENODEV;
3910 goto error;
3911 }
3912
3913 num_flows /= sizeof(uint32_t);
3914 if (num_flows % 2) {
3915 GLINK_ERR("%s: Invalid flow info length\n", __func__);
3916 rc = -EINVAL;
3917 goto error;
3918 }
3919
3920 num_flows /= 2;
3921 cfg->num_flows = num_flows;
3922
3923 cfg->flow_info = kmalloc_array(num_flows, sizeof(*(cfg->flow_info)),
3924 GFP_KERNEL);
3925 if (!cfg->flow_info) {
3926 GLINK_ERR("%s: Memory allocation for flow info failed\n",
3927 __func__);
3928 rc = -ENOMEM;
3929 goto error;
3930 }
3931 arr32 = kmalloc_array(num_flows * 2, sizeof(uint32_t), GFP_KERNEL);
3932 if (!arr32) {
3933 GLINK_ERR("%s: Memory allocation for temporary array failed\n",
3934 __func__);
3935 rc = -ENOMEM;
3936 goto temp_mem_alloc_fail;
3937 }
3938
3939 of_property_read_u32_array(phandle, key, arr32, num_flows * 2);
3940
3941 for (i = 0; i < num_flows; i++) {
3942 cfg->flow_info[i].mtu_tx_time_us = arr32[2 * i];
3943 cfg->flow_info[i].power_state = arr32[2 * i + 1];
3944 }
3945
3946 kfree(arr32);
3947 of_node_put(phandle);
3948 return 0;
3949
3950temp_mem_alloc_fail:
3951 kfree(cfg->flow_info);
3952error:
3953 cfg->mtu = 0;
3954 cfg->token_count = 0;
3955 cfg->num_flows = 0;
3956 cfg->flow_info = NULL;
3957 return rc;
3958}
3959EXPORT_SYMBOL(of_get_glink_core_qos_cfg);
3960
3961/**
3962 * glink_core_init_xprt_qos_cfg() - Initialize a transport's QoS configuration
3963 * @xprt_ptr: Transport to be initialized with QoS configuration.
3964 * @cfg: Data structure containing QoS configuration.
3965 *
3966 * This function is used during the transport registration to initialize it
3967 * with QoS configuration.
3968 *
3969 * Return: 0 on success, standard Linux error codes on failure.
3970 */
3971static int glink_core_init_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr,
3972 struct glink_core_transport_cfg *cfg)
3973{
3974 int i;
3975 struct sched_param param = { .sched_priority = GLINK_KTHREAD_PRIO };
3976
3977 xprt_ptr->mtu = cfg->mtu ? cfg->mtu : GLINK_QOS_DEF_MTU;
3978 xprt_ptr->num_priority = cfg->num_flows ? cfg->num_flows :
3979 GLINK_QOS_DEF_NUM_PRIORITY;
3980 xprt_ptr->token_count = cfg->token_count ? cfg->token_count :
3981 GLINK_QOS_DEF_NUM_TOKENS;
3982
3983 xprt_ptr->prio_bin = kzalloc(xprt_ptr->num_priority *
3984 sizeof(struct glink_qos_priority_bin),
3985 GFP_KERNEL);
3986 if (xprt_ptr->num_priority > 1)
3987 sched_setscheduler(xprt_ptr->tx_task, SCHED_FIFO, &param);
3988 if (!xprt_ptr->prio_bin) {
3989 GLINK_ERR("%s: unable to allocate priority bins\n", __func__);
3990 return -ENOMEM;
3991 }
3992 for (i = 1; i < xprt_ptr->num_priority; i++) {
3993 xprt_ptr->prio_bin[i].max_rate_kBps =
3994 glink_qos_calc_rate_kBps(xprt_ptr->mtu,
3995 cfg->flow_info[i].mtu_tx_time_us);
3996 xprt_ptr->prio_bin[i].power_state =
3997 cfg->flow_info[i].power_state;
3998 INIT_LIST_HEAD(&xprt_ptr->prio_bin[i].tx_ready);
3999 }
4000 xprt_ptr->prio_bin[0].max_rate_kBps = 0;
4001 if (cfg->flow_info)
4002 xprt_ptr->prio_bin[0].power_state =
4003 cfg->flow_info[0].power_state;
4004 INIT_LIST_HEAD(&xprt_ptr->prio_bin[0].tx_ready);
4005 xprt_ptr->threshold_rate_kBps =
4006 xprt_ptr->prio_bin[xprt_ptr->num_priority - 1].max_rate_kBps;
4007
4008 return 0;
4009}
4010
4011/**
4012 * glink_core_deinit_xprt_qos_cfg() - Reset a transport's QoS configuration
4013 * @xprt_ptr: Transport to be deinitialized.
4014 *
4015 * This function is used during the time of transport unregistration to
4016 * de-initialize the QoS configuration from a transport.
4017 */
4018static void glink_core_deinit_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr)
4019{
4020 kfree(xprt_ptr->prio_bin);
4021 xprt_ptr->prio_bin = NULL;
4022 xprt_ptr->mtu = 0;
4023 xprt_ptr->num_priority = 0;
4024 xprt_ptr->token_count = 0;
4025 xprt_ptr->threshold_rate_kBps = 0;
4026}
4027
4028/**
4029 * glink_core_register_transport() - register a new transport
4030 * @if_ptr: The interface to the transport.
4031 * @cfg: Description and configuration of the transport.
4032 *
4033 * Return: 0 on success, EINVAL for invalid input.
4034 */
4035int glink_core_register_transport(struct glink_transport_if *if_ptr,
4036 struct glink_core_transport_cfg *cfg)
4037{
4038 struct glink_core_xprt_ctx *xprt_ptr;
4039 size_t len;
4040 uint16_t id;
4041 int ret;
4042 char log_name[GLINK_NAME_SIZE*2+2] = {0};
4043
4044 if (!if_ptr || !cfg || !cfg->name || !cfg->edge)
4045 return -EINVAL;
4046
4047 len = strlen(cfg->name);
4048 if (len == 0 || len >= GLINK_NAME_SIZE)
4049 return -EINVAL;
4050
4051 len = strlen(cfg->edge);
4052 if (len == 0 || len >= GLINK_NAME_SIZE)
4053 return -EINVAL;
4054
4055 if (cfg->versions_entries < 1)
4056 return -EINVAL;
4057
4058 ret = glink_xprt_name_to_id(cfg->name, &id);
4059 if (ret)
4060 return ret;
4061
4062 xprt_ptr = kzalloc(sizeof(struct glink_core_xprt_ctx), GFP_KERNEL);
4063 if (xprt_ptr == NULL)
4064 return -ENOMEM;
4065
4066 xprt_ptr->id = id;
4067 rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
4068 glink_xprt_ctx_release);
4069 strlcpy(xprt_ptr->name, cfg->name, GLINK_NAME_SIZE);
4070 strlcpy(xprt_ptr->edge, cfg->edge, GLINK_NAME_SIZE);
4071 xprt_ptr->versions = cfg->versions;
4072 xprt_ptr->versions_entries = cfg->versions_entries;
4073 xprt_ptr->local_version_idx = cfg->versions_entries - 1;
4074 xprt_ptr->remote_version_idx = cfg->versions_entries - 1;
4075 xprt_ptr->edge_ctx = edge_name_to_ctx_create(xprt_ptr);
Dhoat Harpal9a8ed652016-11-08 15:00:20 +05304076 if (!xprt_ptr->edge_ctx) {
4077 kfree(xprt_ptr);
4078 return -ENOMEM;
4079 }
Chris Lewfa6135e2016-08-01 13:29:46 -07004080 xprt_ptr->l_features =
4081 cfg->versions[cfg->versions_entries - 1].features;
4082 if (!if_ptr->poll)
4083 if_ptr->poll = dummy_poll;
4084 if (!if_ptr->mask_rx_irq)
4085 if_ptr->mask_rx_irq = dummy_mask_rx_irq;
4086 if (!if_ptr->reuse_rx_intent)
4087 if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
4088 if (!if_ptr->wait_link_down)
4089 if_ptr->wait_link_down = dummy_wait_link_down;
4090 if (!if_ptr->tx_cmd_tracer_pkt)
4091 if_ptr->tx_cmd_tracer_pkt = dummy_tx_cmd_tracer_pkt;
4092 if (!if_ptr->get_power_vote_ramp_time)
4093 if_ptr->get_power_vote_ramp_time =
4094 dummy_get_power_vote_ramp_time;
4095 if (!if_ptr->power_vote)
4096 if_ptr->power_vote = dummy_power_vote;
4097 if (!if_ptr->power_unvote)
4098 if_ptr->power_unvote = dummy_power_unvote;
Chris Lewa9a78ae2017-05-11 16:47:37 -07004099 if (!if_ptr->rx_rt_vote)
4100 if_ptr->rx_rt_vote = dummy_rx_rt_vote;
4101 if (!if_ptr->rx_rt_unvote)
4102 if_ptr->rx_rt_unvote = dummy_rx_rt_unvote;
Chris Lewfa6135e2016-08-01 13:29:46 -07004103 xprt_ptr->capabilities = 0;
4104 xprt_ptr->ops = if_ptr;
4105 spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
4106 xprt_ptr->next_lcid = 1; /* 0 reserved for default unconfigured */
4107 INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
4108 xprt_ptr->max_cid = cfg->max_cid;
4109 xprt_ptr->max_iid = cfg->max_iid;
4110 xprt_ptr->local_state = GLINK_XPRT_DOWN;
4111 xprt_ptr->remote_neg_completed = false;
4112 INIT_LIST_HEAD(&xprt_ptr->channels);
4113 INIT_LIST_HEAD(&xprt_ptr->notified);
4114 spin_lock_init(&xprt_ptr->tx_ready_lock_lhb3);
4115 mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb4);
Kyle Yan65be4a52016-10-31 15:05:00 -07004116 kthread_init_work(&xprt_ptr->tx_kwork, tx_func);
4117 kthread_init_worker(&xprt_ptr->tx_wq);
Chris Lewfa6135e2016-08-01 13:29:46 -07004118 xprt_ptr->tx_task = kthread_run(kthread_worker_fn,
4119 &xprt_ptr->tx_wq, "%s_%s_glink_tx",
4120 xprt_ptr->edge, xprt_ptr->name);
4121 if (IS_ERR_OR_NULL(xprt_ptr->tx_task)) {
4122 GLINK_ERR("%s: unable to run thread\n", __func__);
Chris Lewfa6135e2016-08-01 13:29:46 -07004123 kfree(xprt_ptr);
4124 return -ENOMEM;
4125 }
Chris Lew490a42a2017-10-02 15:20:54 -07004126 cfg->tx_task = xprt_ptr->tx_task;
Chris Lewfa6135e2016-08-01 13:29:46 -07004127 ret = glink_core_init_xprt_qos_cfg(xprt_ptr, cfg);
4128 if (ret < 0) {
4129 kfree(xprt_ptr);
4130 return ret;
4131 }
4132 INIT_DELAYED_WORK(&xprt_ptr->pm_qos_work, glink_pm_qos_cancel_worker);
4133 pm_qos_add_request(&xprt_ptr->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
4134 PM_QOS_DEFAULT_VALUE);
4135
4136 if_ptr->glink_core_priv = xprt_ptr;
4137 if_ptr->glink_core_if_ptr = &core_impl;
4138
4139 mutex_lock(&transport_list_lock_lha0);
4140 list_add_tail(&xprt_ptr->list_node, &transport_list);
4141 mutex_unlock(&transport_list_lock_lha0);
4142 glink_debugfs_add_xprt(xprt_ptr);
4143 snprintf(log_name, sizeof(log_name), "%s_%s",
4144 xprt_ptr->edge, xprt_ptr->name);
4145 xprt_ptr->log_ctx = ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
4146 if (!xprt_ptr->log_ctx)
4147 GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
4148 __func__, xprt_ptr->edge, xprt_ptr->name);
4149
4150 return 0;
4151}
4152EXPORT_SYMBOL(glink_core_register_transport);
4153
4154/**
4155 * glink_core_unregister_transport() - unregister a transport
4156 *
4157 * @if_ptr: The interface to the transport.
4158 */
4159void glink_core_unregister_transport(struct glink_transport_if *if_ptr)
4160{
4161 struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
4162
4163 GLINK_DBG_XPRT(xprt_ptr, "%s: destroying transport\n", __func__);
4164 if (xprt_ptr->local_state != GLINK_XPRT_DOWN) {
4165 GLINK_ERR_XPRT(xprt_ptr,
4166 "%s: link_down should have been called before this\n",
4167 __func__);
4168 return;
4169 }
4170
4171 mutex_lock(&transport_list_lock_lha0);
4172 list_del(&xprt_ptr->list_node);
4173 mutex_unlock(&transport_list_lock_lha0);
4174 flush_delayed_work(&xprt_ptr->pm_qos_work);
4175 pm_qos_remove_request(&xprt_ptr->pm_qos_req);
4176 ipc_log_context_destroy(xprt_ptr->log_ctx);
4177 xprt_ptr->log_ctx = NULL;
4178 rwref_put(&xprt_ptr->xprt_state_lhb0);
4179}
4180EXPORT_SYMBOL(glink_core_unregister_transport);
4181
4182/**
4183 * glink_core_link_up() - transport link-up notification
4184 *
4185 * @if_ptr: pointer to transport interface
4186 */
4187static void glink_core_link_up(struct glink_transport_if *if_ptr)
4188{
4189 struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
4190
4191 /* start local negotiation */
4192 xprt_ptr->local_state = GLINK_XPRT_NEGOTIATING;
4193 xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
4194 xprt_ptr->l_features =
4195 xprt_ptr->versions[xprt_ptr->local_version_idx].features;
4196 if_ptr->tx_cmd_version(if_ptr,
4197 xprt_ptr->versions[xprt_ptr->local_version_idx].version,
4198 xprt_ptr->versions[xprt_ptr->local_version_idx].features);
4199
4200}
4201
4202/**
4203 * glink_core_link_down() - transport link-down notification
4204 *
4205 * @if_ptr: pointer to transport interface
4206 */
4207static void glink_core_link_down(struct glink_transport_if *if_ptr)
4208{
4209 struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
4210
4211 rwref_write_get(&xprt_ptr->xprt_state_lhb0);
4212 xprt_ptr->next_lcid = 1;
4213 xprt_ptr->local_state = GLINK_XPRT_DOWN;
Dhoat Harpal28680f02017-06-13 22:19:55 +05304214 xprt_ptr->curr_qos_rate_kBps = 0;
Chris Lewfa6135e2016-08-01 13:29:46 -07004215 xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
4216 xprt_ptr->remote_version_idx = xprt_ptr->versions_entries - 1;
4217 xprt_ptr->l_features =
4218 xprt_ptr->versions[xprt_ptr->local_version_idx].features;
4219 xprt_ptr->remote_neg_completed = false;
4220 rwref_write_put(&xprt_ptr->xprt_state_lhb0);
4221 GLINK_DBG_XPRT(xprt_ptr,
4222 "%s: Flushing work from tx_wq. Thread: %u\n", __func__,
4223 current->pid);
Kyle Yan65be4a52016-10-31 15:05:00 -07004224 kthread_flush_worker(&xprt_ptr->tx_wq);
Chris Lewfa6135e2016-08-01 13:29:46 -07004225 glink_core_channel_cleanup(xprt_ptr);
4226 check_link_notifier_and_notify(xprt_ptr, GLINK_LINK_STATE_DOWN);
4227}
4228
4229/**
4230 * glink_create_dummy_xprt_ctx() - create a dummy transport that replaces all
4231 * the transport interface functions with a dummy
4232 * @orig_xprt_ctx: Pointer to the original transport context.
4233 *
4234 * The dummy transport is used only when it is swapped with the actual transport
4235 * pointer in ssr/unregister case.
4236 *
4237 * Return: Pointer to dummy transport context.
4238 */
4239static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx(
4240 struct glink_core_xprt_ctx *orig_xprt_ctx)
4241{
4242
4243 struct glink_core_xprt_ctx *xprt_ptr;
4244 struct glink_transport_if *if_ptr;
4245
4246 xprt_ptr = kzalloc(sizeof(*xprt_ptr), GFP_KERNEL);
4247 if (!xprt_ptr)
4248 return ERR_PTR(-ENOMEM);
Chris Lewf8c82542017-06-22 16:45:59 -07004249 if_ptr = kzalloc(sizeof(*if_ptr), GFP_KERNEL);
Chris Lewfa6135e2016-08-01 13:29:46 -07004250 if (!if_ptr) {
4251 kfree(xprt_ptr);
4252 return ERR_PTR(-ENOMEM);
4253 }
4254 rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
4255 glink_dummy_xprt_ctx_release);
4256
4257 strlcpy(xprt_ptr->name, "dummy", GLINK_NAME_SIZE);
4258 strlcpy(xprt_ptr->edge, orig_xprt_ctx->edge, GLINK_NAME_SIZE);
4259 if_ptr->poll = dummy_poll;
4260 if_ptr->mask_rx_irq = dummy_mask_rx_irq;
4261 if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
4262 if_ptr->wait_link_down = dummy_wait_link_down;
4263 if_ptr->allocate_rx_intent = dummy_allocate_rx_intent;
4264 if_ptr->deallocate_rx_intent = dummy_deallocate_rx_intent;
4265 if_ptr->tx_cmd_local_rx_intent = dummy_tx_cmd_local_rx_intent;
4266 if_ptr->tx_cmd_local_rx_done = dummy_tx_cmd_local_rx_done;
4267 if_ptr->tx = dummy_tx;
4268 if_ptr->tx_cmd_rx_intent_req = dummy_tx_cmd_rx_intent_req;
4269 if_ptr->tx_cmd_remote_rx_intent_req_ack =
4270 dummy_tx_cmd_remote_rx_intent_req_ack;
4271 if_ptr->tx_cmd_set_sigs = dummy_tx_cmd_set_sigs;
Dhoat Harpal8e06fcc2017-08-18 16:06:48 +05304272 if_ptr->tx_cmd_ch_open = dummy_tx_cmd_ch_open;
4273 if_ptr->tx_cmd_ch_remote_open_ack = dummy_tx_cmd_ch_remote_open_ack;
Chris Lewfa6135e2016-08-01 13:29:46 -07004274 if_ptr->tx_cmd_ch_close = dummy_tx_cmd_ch_close;
4275 if_ptr->tx_cmd_ch_remote_close_ack = dummy_tx_cmd_ch_remote_close_ack;
Dhoat Harpal8e06fcc2017-08-18 16:06:48 +05304276 if_ptr->tx_cmd_tracer_pkt = dummy_tx_cmd_tracer_pkt;
4277 if_ptr->get_power_vote_ramp_time = dummy_get_power_vote_ramp_time;
4278 if_ptr->power_vote = dummy_power_vote;
4279 if_ptr->power_unvote = dummy_power_unvote;
Chris Lewfa6135e2016-08-01 13:29:46 -07004280
4281 xprt_ptr->ops = if_ptr;
4282 xprt_ptr->log_ctx = log_ctx;
4283 spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
4284 INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
4285 xprt_ptr->local_state = GLINK_XPRT_DOWN;
4286 xprt_ptr->remote_neg_completed = false;
4287 INIT_LIST_HEAD(&xprt_ptr->channels);
4288 xprt_ptr->dummy_in_use = true;
4289 INIT_LIST_HEAD(&xprt_ptr->notified);
4290 spin_lock_init(&xprt_ptr->tx_ready_lock_lhb3);
4291 mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb4);
4292 return xprt_ptr;
4293}
4294
Dhoat Harpalc7a88a92016-08-29 22:59:27 +05304295static struct channel_ctx *get_first_ch_ctx(
4296 struct glink_core_xprt_ctx *xprt_ctx)
4297{
4298 unsigned long flags;
4299 struct channel_ctx *ctx;
4300
4301 spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
4302 if (!list_empty(&xprt_ctx->channels)) {
4303 ctx = list_first_entry(&xprt_ctx->channels,
4304 struct channel_ctx, port_list_node);
4305 rwref_get(&ctx->ch_state_lhb2);
4306 } else {
4307 ctx = NULL;
4308 }
4309 spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
4310 return ctx;
4311}
4312
4313static void glink_core_move_ch_node(struct glink_core_xprt_ctx *xprt_ptr,
4314 struct glink_core_xprt_ctx *dummy_xprt_ctx, struct channel_ctx *ctx)
4315{
4316 unsigned long flags, d_flags;
4317
4318 spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
4319 spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
4320 rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
4321 list_move_tail(&ctx->port_list_node, &dummy_xprt_ctx->channels);
4322 spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
4323 spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
4324}
4325
Chris Lewfa6135e2016-08-01 13:29:46 -07004326/**
4327 * glink_core_channel_cleanup() - cleanup all channels for the transport
4328 *
4329 * @xprt_ptr: pointer to transport context
4330 *
4331 * This function should be called either from link_down or ssr
4332 */
4333static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
4334{
4335 unsigned long flags, d_flags;
Dhoat Harpalc7a88a92016-08-29 22:59:27 +05304336 struct channel_ctx *ctx;
Chris Lewfa6135e2016-08-01 13:29:46 -07004337 struct channel_lcid *temp_lcid, *temp_lcid1;
4338 struct glink_core_xprt_ctx *dummy_xprt_ctx;
4339
4340 dummy_xprt_ctx = glink_create_dummy_xprt_ctx(xprt_ptr);
4341 if (IS_ERR_OR_NULL(dummy_xprt_ctx)) {
4342 GLINK_ERR("%s: Dummy Transport creation failed\n", __func__);
4343 return;
4344 }
Chris Lewfa6135e2016-08-01 13:29:46 -07004345 rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0);
4346 rwref_read_get(&xprt_ptr->xprt_state_lhb0);
Dhoat Harpalc7a88a92016-08-29 22:59:27 +05304347 ctx = get_first_ch_ctx(xprt_ptr);
4348 while (ctx) {
Dhoat Harpald6ed7262017-10-17 19:54:17 +05304349 spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
4350 spin_lock(&ctx->tx_lists_lock_lhc3);
4351 if (!list_empty(&ctx->tx_active))
4352 glink_qos_done_ch_tx(ctx);
4353 spin_unlock(&ctx->tx_lists_lock_lhc3);
4354 spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
Chris Lewfa6135e2016-08-01 13:29:46 -07004355 rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
4356 if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
4357 ctx->local_open_state == GLINK_CHANNEL_OPENING) {
Chris Lewfa6135e2016-08-01 13:29:46 -07004358 ctx->transport_ptr = dummy_xprt_ctx;
Dhoat Harpalc7a88a92016-08-29 22:59:27 +05304359 glink_core_move_ch_node(xprt_ptr, dummy_xprt_ctx, ctx);
Chris Lewfa6135e2016-08-01 13:29:46 -07004360 } else {
4361 /* local state is in either CLOSED or CLOSING */
Chris Lewfa6135e2016-08-01 13:29:46 -07004362 glink_core_remote_close_common(ctx, true);
4363 if (ctx->local_open_state == GLINK_CHANNEL_CLOSING)
4364 glink_core_ch_close_ack_common(ctx, true);
4365 /* Channel should be fully closed now. Delete here */
4366 if (ch_is_fully_closed(ctx))
4367 glink_delete_ch_from_list(ctx, false);
Chris Lewfa6135e2016-08-01 13:29:46 -07004368 }
Dhoat Harpalc7a88a92016-08-29 22:59:27 +05304369 rwref_put(&ctx->ch_state_lhb2);
Dhoat Harpal52fdd412017-02-15 19:43:27 +05304370 rwref_write_put(&ctx->ch_state_lhb2);
Dhoat Harpalc7a88a92016-08-29 22:59:27 +05304371 ctx = get_first_ch_ctx(xprt_ptr);
Chris Lewfa6135e2016-08-01 13:29:46 -07004372 }
Dhoat Harpalc7a88a92016-08-29 22:59:27 +05304373 spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
Chris Lewfa6135e2016-08-01 13:29:46 -07004374 list_for_each_entry_safe(temp_lcid, temp_lcid1,
4375 &xprt_ptr->free_lcid_list, list_node) {
4376 list_del(&temp_lcid->list_node);
4377 kfree(&temp_lcid->list_node);
4378 }
Chris Lewfa6135e2016-08-01 13:29:46 -07004379 spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
Chris Lewfa6135e2016-08-01 13:29:46 -07004380 rwref_read_put(&xprt_ptr->xprt_state_lhb0);
4381
4382 spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
Dhoat Harpalc7a88a92016-08-29 22:59:27 +05304383 dummy_xprt_ctx->dummy_in_use = false;
Chris Lewfa6135e2016-08-01 13:29:46 -07004384 while (!list_empty(&dummy_xprt_ctx->channels)) {
4385 ctx = list_first_entry(&dummy_xprt_ctx->channels,
4386 struct channel_ctx, port_list_node);
4387 list_move_tail(&ctx->port_list_node,
4388 &dummy_xprt_ctx->notified);
4389
4390 rwref_get(&ctx->ch_state_lhb2);
4391 spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
4392 d_flags);
4393 glink_core_remote_close_common(ctx, false);
4394 spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
4395 d_flags);
4396 rwref_put(&ctx->ch_state_lhb2);
4397 }
4398 spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
4399 rwref_read_put(&dummy_xprt_ctx->xprt_state_lhb0);
4400}
4401/**
4402 * glink_core_rx_cmd_version() - receive version/features from remote system
4403 *
4404 * @if_ptr: pointer to transport interface
4405 * @r_version: remote version
4406 * @r_features: remote features
4407 *
4408 * This function is called in response to a remote-initiated version/feature
4409 * negotiation sequence.
4410 */
4411static void glink_core_rx_cmd_version(struct glink_transport_if *if_ptr,
4412 uint32_t r_version, uint32_t r_features)
4413{
4414 struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
4415 const struct glink_core_version *versions = xprt_ptr->versions;
4416 bool neg_complete = false;
4417 uint32_t l_version;
4418
4419 if (xprt_is_fully_opened(xprt_ptr)) {
4420 GLINK_ERR_XPRT(xprt_ptr,
4421 "%s: Negotiation already complete\n", __func__);
4422 return;
4423 }
4424
4425 l_version = versions[xprt_ptr->remote_version_idx].version;
4426
4427 GLINK_INFO_XPRT(xprt_ptr,
4428 "%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
4429 l_version, xprt_ptr->l_features, r_version, r_features);
4430
4431 if (l_version > r_version) {
4432 /* Find matching version */
4433 while (true) {
4434 uint32_t rver_idx;
4435
4436 if (xprt_ptr->remote_version_idx == 0) {
4437 /* version negotiation failed */
4438 GLINK_ERR_XPRT(xprt_ptr,
4439 "%s: Transport negotiation failed\n",
4440 __func__);
4441 l_version = 0;
4442 xprt_ptr->l_features = 0;
4443 break;
4444 }
4445 --xprt_ptr->remote_version_idx;
4446 rver_idx = xprt_ptr->remote_version_idx;
4447
4448 if (versions[rver_idx].version <= r_version) {
4449 /* found a potential match */
4450 l_version = versions[rver_idx].version;
4451 xprt_ptr->l_features =
4452 versions[rver_idx].features;
4453 break;
4454 }
4455 }
4456 }
4457
4458 if (l_version == r_version) {
4459 GLINK_INFO_XPRT(xprt_ptr,
4460 "%s: Remote and local version are matched %x:%08x\n",
4461 __func__, r_version, r_features);
4462 if (xprt_ptr->l_features != r_features) {
4463 uint32_t rver_idx = xprt_ptr->remote_version_idx;
4464
4465 xprt_ptr->l_features = versions[rver_idx]
4466 .negotiate_features(if_ptr,
4467 &xprt_ptr->versions[rver_idx],
4468 r_features);
4469 GLINK_INFO_XPRT(xprt_ptr,
4470 "%s: negotiate features %x:%08x\n",
4471 __func__, l_version, xprt_ptr->l_features);
4472 }
4473 neg_complete = true;
4474 }
4475 if_ptr->tx_cmd_version_ack(if_ptr, l_version, xprt_ptr->l_features);
4476
4477 if (neg_complete) {
4478 GLINK_INFO_XPRT(xprt_ptr,
4479 "%s: Remote negotiation complete %x:%08x\n", __func__,
4480 l_version, xprt_ptr->l_features);
4481
4482 if (xprt_ptr->local_state == GLINK_XPRT_OPENED) {
4483 xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
4484 l_version,
4485 xprt_ptr->l_features);
4486 }
4487 if_ptr->glink_core_priv->remote_neg_completed = true;
4488 if (xprt_is_fully_opened(xprt_ptr))
4489 check_link_notifier_and_notify(xprt_ptr,
4490 GLINK_LINK_STATE_UP);
4491 }
4492}
4493
4494/**
4495 * glink_core_rx_cmd_version_ack() - receive negotiation ack from remote system
4496 *
4497 * @if_ptr: pointer to transport interface
4498 * @r_version: remote version response
4499 * @r_features: remote features response
4500 *
4501 * This function is called in response to a local-initiated version/feature
4502 * negotiation sequence and is the counter-offer from the remote side based
4503 * upon the initial version and feature set requested.
4504 */
4505static void glink_core_rx_cmd_version_ack(struct glink_transport_if *if_ptr,
4506 uint32_t r_version, uint32_t r_features)
4507{
4508 struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
4509 const struct glink_core_version *versions = xprt_ptr->versions;
4510 uint32_t l_version;
4511 bool neg_complete = false;
4512
4513 if (xprt_is_fully_opened(xprt_ptr)) {
4514 GLINK_ERR_XPRT(xprt_ptr,
4515 "%s: Negotiation already complete\n", __func__);
4516 return;
4517 }
4518
4519 l_version = versions[xprt_ptr->local_version_idx].version;
4520
4521 GLINK_INFO_XPRT(xprt_ptr,
4522 "%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
4523 l_version, xprt_ptr->l_features, r_version, r_features);
4524
4525 if (l_version > r_version) {
4526 /* find matching version */
4527 while (true) {
4528 uint32_t lver_idx = xprt_ptr->local_version_idx;
4529
4530 if (xprt_ptr->local_version_idx == 0) {
4531 /* version negotiation failed */
4532 xprt_ptr->local_state = GLINK_XPRT_FAILED;
4533 GLINK_ERR_XPRT(xprt_ptr,
4534 "%s: Transport negotiation failed\n",
4535 __func__);
4536 l_version = 0;
4537 xprt_ptr->l_features = 0;
4538 break;
4539 }
4540 --xprt_ptr->local_version_idx;
4541 lver_idx = xprt_ptr->local_version_idx;
4542
4543 if (versions[lver_idx].version <= r_version) {
4544 /* found a potential match */
4545 l_version = versions[lver_idx].version;
4546 xprt_ptr->l_features =
4547 versions[lver_idx].features;
4548 break;
4549 }
4550 }
4551 } else if (l_version == r_version) {
4552 if (xprt_ptr->l_features != r_features) {
4553 /* version matches, negotiate features */
4554 uint32_t lver_idx = xprt_ptr->local_version_idx;
4555
4556 xprt_ptr->l_features = versions[lver_idx]
4557 .negotiate_features(if_ptr,
4558 &versions[lver_idx],
4559 r_features);
4560 GLINK_INFO_XPRT(xprt_ptr,
4561 "%s: negotiation features %x:%08x\n",
4562 __func__, l_version, xprt_ptr->l_features);
4563 } else {
4564 neg_complete = true;
4565 }
4566 } else {
4567 /*
4568 * r_version > l_version
4569 *
4570 * Remote responded with a version greater than what we
4571 * requested which is invalid and is treated as failure of the
4572 * negotiation algorithm.
4573 */
4574 GLINK_ERR_XPRT(xprt_ptr,
4575 "%s: [local]%x:%08x [remote]%x:%08x neg failure\n",
4576 __func__, l_version, xprt_ptr->l_features, r_version,
4577 r_features);
4578 xprt_ptr->local_state = GLINK_XPRT_FAILED;
4579 l_version = 0;
4580 xprt_ptr->l_features = 0;
4581 }
4582
4583 if (neg_complete) {
4584 /* negotiation complete */
4585 GLINK_INFO_XPRT(xprt_ptr,
4586 "%s: Local negotiation complete %x:%08x\n",
4587 __func__, l_version, xprt_ptr->l_features);
4588
4589 if (xprt_ptr->remote_neg_completed) {
4590 xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
4591 l_version,
4592 xprt_ptr->l_features);
4593 }
4594
4595 xprt_ptr->local_state = GLINK_XPRT_OPENED;
4596 if (xprt_is_fully_opened(xprt_ptr))
4597 check_link_notifier_and_notify(xprt_ptr,
4598 GLINK_LINK_STATE_UP);
4599 } else {
4600 if_ptr->tx_cmd_version(if_ptr, l_version, xprt_ptr->l_features);
4601 }
4602}
4603
4604/**
4605 * find_l_ctx_get() - find a local channel context based on a remote one
4606 * @r_ctx: The remote channel to use as a lookup key.
4607 *
4608 * If the channel is found, the reference count is incremented to ensure the
4609 * lifetime of the channel context. The caller must call rwref_put() when done.
4610 *
4611 * Return: The corresponding local ctx or NULL is not found.
4612 */
4613static struct channel_ctx *find_l_ctx_get(struct channel_ctx *r_ctx)
4614{
4615 struct glink_core_xprt_ctx *xprt;
4616 struct channel_ctx *ctx;
4617 unsigned long flags;
4618 struct channel_ctx *l_ctx = NULL;
4619
4620 mutex_lock(&transport_list_lock_lha0);
4621 list_for_each_entry(xprt, &transport_list, list_node)
4622 if (!strcmp(r_ctx->transport_ptr->edge, xprt->edge)) {
4623 rwref_write_get(&xprt->xprt_state_lhb0);
4624 if (xprt->local_state != GLINK_XPRT_OPENED) {
4625 rwref_write_put(&xprt->xprt_state_lhb0);
4626 continue;
4627 }
4628 spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
4629 list_for_each_entry(ctx, &xprt->channels,
4630 port_list_node)
4631 if (!strcmp(ctx->name, r_ctx->name) &&
4632 ctx->local_xprt_req &&
4633 ctx->local_xprt_resp) {
4634 l_ctx = ctx;
4635 rwref_get(&l_ctx->ch_state_lhb2);
4636 }
4637 spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
4638 flags);
4639 rwref_write_put(&xprt->xprt_state_lhb0);
4640 }
4641 mutex_unlock(&transport_list_lock_lha0);
4642
4643 return l_ctx;
4644}
4645
4646/**
4647 * find_r_ctx_get() - find a remote channel context based on a local one
4648 * @l_ctx: The local channel to use as a lookup key.
4649 *
4650 * If the channel is found, the reference count is incremented to ensure the
4651 * lifetime of the channel context. The caller must call rwref_put() when done.
4652 *
4653 * Return: The corresponding remote ctx or NULL is not found.
4654 */
4655static struct channel_ctx *find_r_ctx_get(struct channel_ctx *l_ctx)
4656{
4657 struct glink_core_xprt_ctx *xprt;
4658 struct channel_ctx *ctx;
4659 unsigned long flags;
4660 struct channel_ctx *r_ctx = NULL;
4661
4662 mutex_lock(&transport_list_lock_lha0);
4663 list_for_each_entry(xprt, &transport_list, list_node)
4664 if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge)) {
4665 rwref_write_get(&xprt->xprt_state_lhb0);
4666 if (xprt->local_state != GLINK_XPRT_OPENED) {
4667 rwref_write_put(&xprt->xprt_state_lhb0);
4668 continue;
4669 }
4670 spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
4671 list_for_each_entry(ctx, &xprt->channels,
4672 port_list_node)
4673 if (!strcmp(ctx->name, l_ctx->name) &&
4674 ctx->remote_xprt_req &&
4675 ctx->remote_xprt_resp) {
4676 r_ctx = ctx;
4677 rwref_get(&r_ctx->ch_state_lhb2);
4678 }
4679 spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
4680 flags);
4681 rwref_write_put(&xprt->xprt_state_lhb0);
4682 }
4683 mutex_unlock(&transport_list_lock_lha0);
4684
4685 return r_ctx;
4686}
4687
4688/**
4689 * will_migrate() - will a channel migrate to a different transport
4690 * @l_ctx: The local channel to migrate.
4691 * @r_ctx: The remote channel to migrate.
4692 *
4693 * One of the channel contexts can be NULL if not known, but at least one ctx
4694 * must be provided.
4695 *
4696 * Return: Bool indicating if migration will occur.
4697 */
4698static bool will_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
4699{
4700 uint16_t new_xprt;
4701 bool migrate = false;
4702
4703 if (!r_ctx)
4704 r_ctx = find_r_ctx_get(l_ctx);
4705 else
4706 rwref_get(&r_ctx->ch_state_lhb2);
4707 if (!r_ctx)
4708 return migrate;
4709
4710 if (!l_ctx)
4711 l_ctx = find_l_ctx_get(r_ctx);
4712 else
4713 rwref_get(&l_ctx->ch_state_lhb2);
4714 if (!l_ctx)
4715 goto exit;
4716
4717 if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
4718 l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
4719 goto exit;
4720 if (l_ctx->no_migrate)
4721 goto exit;
4722
4723 if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
4724 l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
4725
4726 if (ch_is_fully_opened(l_ctx) &&
4727 (l_ctx->transport_ptr->id == l_ctx->local_xprt_req))
4728 goto exit;
4729
4730 new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
4731
4732 if (new_xprt == l_ctx->transport_ptr->id)
4733 goto exit;
4734
4735 migrate = true;
4736exit:
4737 if (l_ctx)
4738 rwref_put(&l_ctx->ch_state_lhb2);
4739 if (r_ctx)
4740 rwref_put(&r_ctx->ch_state_lhb2);
4741
4742 return migrate;
4743}
4744
4745/**
4746 * ch_migrate() - migrate a channel to a different transport
4747 * @l_ctx: The local channel to migrate.
4748 * @r_ctx: The remote channel to migrate.
4749 *
4750 * One of the channel contexts can be NULL if not known, but at least one ctx
4751 * must be provided.
4752 *
4753 * Return: Bool indicating if migration occurred.
4754 */
4755static bool ch_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
4756{
4757 uint16_t new_xprt;
4758 struct glink_core_xprt_ctx *xprt;
4759 unsigned long flags;
4760 struct channel_lcid *flcid;
4761 uint16_t best_xprt = USHRT_MAX;
4762 struct channel_ctx *ctx_clone;
4763 bool migrated = false;
4764
4765 if (!r_ctx)
4766 r_ctx = find_r_ctx_get(l_ctx);
4767 else
4768 rwref_get(&r_ctx->ch_state_lhb2);
4769 if (!r_ctx)
4770 return migrated;
4771
4772 if (!l_ctx)
4773 l_ctx = find_l_ctx_get(r_ctx);
4774 else
4775 rwref_get(&l_ctx->ch_state_lhb2);
4776 if (!l_ctx) {
4777 rwref_put(&r_ctx->ch_state_lhb2);
4778 return migrated;
4779 }
4780 if (ch_is_fully_opened(l_ctx) &&
4781 (l_ctx->transport_ptr->id == l_ctx->local_xprt_req)) {
4782 rwref_put(&l_ctx->ch_state_lhb2);
4783 rwref_put(&r_ctx->ch_state_lhb2);
4784 return migrated;
4785 }
4786
4787 if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
4788 l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
4789 goto exit;
4790 if (l_ctx->no_migrate)
4791 goto exit;
4792
4793 if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
4794 l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
4795
4796 new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
4797
4798 if (new_xprt == l_ctx->transport_ptr->id)
4799 goto exit;
4800
4801 ctx_clone = kmalloc(sizeof(*ctx_clone), GFP_KERNEL);
4802 if (!ctx_clone)
4803 goto exit;
4804
4805 mutex_lock(&transport_list_lock_lha0);
4806 list_for_each_entry(xprt, &transport_list, list_node)
4807 if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
4808 if (xprt->id == new_xprt)
4809 break;
4810 mutex_unlock(&transport_list_lock_lha0);
4811
4812 spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
4813 list_del_init(&l_ctx->port_list_node);
4814 spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
4815 flags);
4816 mutex_lock(&l_ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
4817 glink_debugfs_remove_channel(l_ctx, l_ctx->transport_ptr);
4818 mutex_unlock(&l_ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
4819
4820 memcpy(ctx_clone, l_ctx, sizeof(*ctx_clone));
4821 ctx_clone->local_xprt_req = 0;
4822 ctx_clone->local_xprt_resp = 0;
4823 ctx_clone->remote_xprt_req = 0;
4824 ctx_clone->remote_xprt_resp = 0;
4825 ctx_clone->notify_state = NULL;
4826 ctx_clone->local_open_state = GLINK_CHANNEL_CLOSING;
4827 rwref_lock_init(&ctx_clone->ch_state_lhb2, glink_ch_ctx_release);
4828 init_completion(&ctx_clone->int_req_ack_complete);
4829 init_completion(&ctx_clone->int_req_complete);
4830 spin_lock_init(&ctx_clone->local_rx_intent_lst_lock_lhc1);
4831 spin_lock_init(&ctx_clone->rmt_rx_intent_lst_lock_lhc2);
4832 INIT_LIST_HEAD(&ctx_clone->tx_ready_list_node);
4833 INIT_LIST_HEAD(&ctx_clone->local_rx_intent_list);
4834 INIT_LIST_HEAD(&ctx_clone->local_rx_intent_ntfy_list);
4835 INIT_LIST_HEAD(&ctx_clone->local_rx_intent_free_list);
4836 INIT_LIST_HEAD(&ctx_clone->rmt_rx_intent_list);
4837 INIT_LIST_HEAD(&ctx_clone->tx_active);
4838 spin_lock_init(&ctx_clone->tx_pending_rmt_done_lock_lhc4);
4839 INIT_LIST_HEAD(&ctx_clone->tx_pending_remote_done);
4840 spin_lock_init(&ctx_clone->tx_lists_lock_lhc3);
4841 spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
4842 list_add_tail(&ctx_clone->port_list_node,
4843 &l_ctx->transport_ptr->channels);
4844 spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
4845 flags);
4846
4847 l_ctx->transport_ptr->ops->tx_cmd_ch_close(l_ctx->transport_ptr->ops,
4848 l_ctx->lcid);
4849
4850 l_ctx->transport_ptr = xprt;
4851 l_ctx->local_xprt_req = 0;
4852 l_ctx->local_xprt_resp = 0;
4853 if (new_xprt != r_ctx->transport_ptr->id || l_ctx == r_ctx) {
4854 if (new_xprt != r_ctx->transport_ptr->id) {
4855 r_ctx->local_xprt_req = 0;
4856 r_ctx->local_xprt_resp = 0;
4857 r_ctx->remote_xprt_req = 0;
4858 r_ctx->remote_xprt_resp = 0;
4859 }
4860
4861 l_ctx->remote_xprt_req = 0;
4862 l_ctx->remote_xprt_resp = 0;
4863 l_ctx->remote_opened = false;
4864
4865 rwref_write_get(&xprt->xprt_state_lhb0);
4866 spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
4867 if (list_empty(&xprt->free_lcid_list)) {
4868 l_ctx->lcid = xprt->next_lcid++;
4869 } else {
4870 flcid = list_first_entry(&xprt->free_lcid_list,
4871 struct channel_lcid, list_node);
4872 l_ctx->lcid = flcid->lcid;
4873 list_del(&flcid->list_node);
4874 kfree(flcid);
4875 }
4876 list_add_tail(&l_ctx->port_list_node, &xprt->channels);
4877 spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
4878 rwref_write_put(&xprt->xprt_state_lhb0);
4879 } else {
4880 l_ctx->lcid = r_ctx->lcid;
4881 l_ctx->rcid = r_ctx->rcid;
4882 l_ctx->remote_opened = r_ctx->remote_opened;
4883 l_ctx->remote_xprt_req = r_ctx->remote_xprt_req;
4884 l_ctx->remote_xprt_resp = r_ctx->remote_xprt_resp;
4885 glink_delete_ch_from_list(r_ctx, false);
4886
4887 spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
4888 list_add_tail(&l_ctx->port_list_node, &xprt->channels);
4889 spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
4890 }
4891
4892 mutex_lock(&xprt->xprt_dbgfs_lock_lhb4);
4893 glink_debugfs_add_channel(l_ctx, xprt);
4894 mutex_unlock(&xprt->xprt_dbgfs_lock_lhb4);
4895
4896 mutex_lock(&transport_list_lock_lha0);
4897 list_for_each_entry(xprt, &transport_list, list_node)
4898 if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
4899 if (xprt->id < best_xprt)
4900 best_xprt = xprt->id;
4901 mutex_unlock(&transport_list_lock_lha0);
4902 l_ctx->local_open_state = GLINK_CHANNEL_OPENING;
4903 l_ctx->local_xprt_req = best_xprt;
4904 l_ctx->transport_ptr->ops->tx_cmd_ch_open(l_ctx->transport_ptr->ops,
4905 l_ctx->lcid, l_ctx->name, best_xprt);
4906
4907 migrated = true;
4908exit:
4909 rwref_put(&l_ctx->ch_state_lhb2);
4910 rwref_put(&r_ctx->ch_state_lhb2);
4911
4912 return migrated;
4913}
4914
4915/**
4916 * calculate_xprt_resp() - calculate the response to a remote xprt request
4917 * @r_ctx: The channel the remote xprt request is for.
4918 *
4919 * Return: The calculated response.
4920 */
4921static uint16_t calculate_xprt_resp(struct channel_ctx *r_ctx)
4922{
4923 struct channel_ctx *l_ctx;
4924
4925 l_ctx = find_l_ctx_get(r_ctx);
4926 if (!l_ctx) {
4927 r_ctx->remote_xprt_resp = r_ctx->transport_ptr->id;
4928 } else if (r_ctx->remote_xprt_req == r_ctx->transport_ptr->id) {
4929 r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
4930 } else {
4931 if (!l_ctx->local_xprt_req)
4932 r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
4933 else if (l_ctx->no_migrate)
4934 r_ctx->remote_xprt_resp = l_ctx->local_xprt_req;
4935 else
4936 r_ctx->remote_xprt_resp = max(l_ctx->local_xprt_req,
4937 r_ctx->remote_xprt_req);
4938 }
4939
4940 if (l_ctx)
4941 rwref_put(&l_ctx->ch_state_lhb2);
4942
4943 return r_ctx->remote_xprt_resp;
4944}
4945
4946/**
4947 * glink_core_rx_cmd_ch_remote_open() - Remote-initiated open command
4948 *
4949 * @if_ptr: Pointer to transport instance
4950 * @rcid: Remote Channel ID
4951 * @name: Channel name
4952 * @req_xprt: Requested transport to migrate to
4953 */
4954static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr,
4955 uint32_t rcid, const char *name, uint16_t req_xprt)
4956{
4957 struct channel_ctx *ctx;
4958 uint16_t xprt_resp;
4959 bool do_migrate;
4960
4961 glink_core_migration_edge_lock(if_ptr->glink_core_priv);
Dhoat Harpalae706e12018-01-18 00:29:20 +05304962 ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name, false);
Chris Lewfa6135e2016-08-01 13:29:46 -07004963 if (ctx == NULL) {
4964 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
4965 "%s: invalid rcid %u received, name '%s'\n",
4966 __func__, rcid, name);
4967 glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
4968 return;
4969 }
4970
4971 /* port already exists */
4972 if (ctx->remote_opened) {
4973 GLINK_ERR_CH(ctx,
4974 "%s: Duplicate remote open for rcid %u, name '%s'\n",
4975 __func__, rcid, name);
Dhoat Harpal390dd202017-04-11 12:32:33 +05304976 rwref_put(&ctx->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07004977 glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
4978 return;
4979 }
4980
4981 ctx->remote_opened = true;
4982 ch_add_rcid(if_ptr->glink_core_priv, ctx, rcid);
4983 ctx->transport_ptr = if_ptr->glink_core_priv;
4984
4985 ctx->remote_xprt_req = req_xprt;
4986 xprt_resp = calculate_xprt_resp(ctx);
4987
4988 do_migrate = will_migrate(NULL, ctx);
4989 GLINK_INFO_CH(ctx, "%s: remote: CLOSED->OPENED ; xprt req:resp %u:%u\n",
4990 __func__, req_xprt, xprt_resp);
4991
4992 if_ptr->tx_cmd_ch_remote_open_ack(if_ptr, rcid, xprt_resp);
4993 if (!do_migrate && ch_is_fully_opened(ctx))
4994 ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
4995
4996
4997 if (do_migrate)
4998 ch_migrate(NULL, ctx);
Dhoat Harpal390dd202017-04-11 12:32:33 +05304999 rwref_put(&ctx->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07005000 glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
5001}
5002
5003/**
5004 * glink_core_rx_cmd_ch_open_ack() - Receive ack to previously sent open request
5005 *
5006 * if_ptr: Pointer to transport instance
5007 * lcid: Local Channel ID
5008 * @xprt_resp: Response to the transport migration request
5009 */
5010static void glink_core_rx_cmd_ch_open_ack(struct glink_transport_if *if_ptr,
5011 uint32_t lcid, uint16_t xprt_resp)
5012{
5013 struct channel_ctx *ctx;
5014
5015 glink_core_migration_edge_lock(if_ptr->glink_core_priv);
5016 ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
5017 if (!ctx) {
5018 /* unknown LCID received - this shouldn't happen */
5019 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5020 "%s: invalid lcid %u received\n", __func__,
5021 (unsigned int)lcid);
5022 glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
5023 return;
5024 }
5025
5026 if (ctx->local_open_state != GLINK_CHANNEL_OPENING) {
5027 GLINK_ERR_CH(ctx,
5028 "%s: unexpected open ack receive for lcid. Current state: %u. Thread: %u\n",
5029 __func__, ctx->local_open_state, current->pid);
5030 rwref_put(&ctx->ch_state_lhb2);
5031 glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
5032 return;
5033 }
5034
5035 ctx->local_xprt_resp = xprt_resp;
5036 if (!ch_migrate(ctx, NULL)) {
5037 ctx->local_open_state = GLINK_CHANNEL_OPENED;
5038 GLINK_INFO_PERF_CH(ctx,
5039 "%s: local:GLINK_CHANNEL_OPENING_WAIT->GLINK_CHANNEL_OPENED\n",
5040 __func__);
5041
5042 if (ch_is_fully_opened(ctx)) {
5043 ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
5044 GLINK_INFO_PERF_CH(ctx,
5045 "%s: notify state: GLINK_CONNECTED\n",
5046 __func__);
5047 }
5048 }
5049 rwref_put(&ctx->ch_state_lhb2);
5050 glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
5051}
5052
5053/**
5054 * glink_core_rx_cmd_ch_remote_close() - Receive remote close command
5055 *
5056 * if_ptr: Pointer to transport instance
5057 * rcid: Remote Channel ID
5058 */
5059static void glink_core_rx_cmd_ch_remote_close(
5060 struct glink_transport_if *if_ptr, uint32_t rcid)
5061{
5062 struct channel_ctx *ctx;
5063 bool is_ch_fully_closed;
5064 struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
Dhoat Harpalae706e12018-01-18 00:29:20 +05305065 unsigned long flags;
Chris Lewfa6135e2016-08-01 13:29:46 -07005066
5067 ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
5068 if (!ctx) {
5069 /* unknown LCID received - this shouldn't happen */
5070 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5071 "%s: invalid rcid %u received\n", __func__,
5072 (unsigned int)rcid);
5073 return;
5074 }
5075
5076 if (!ctx->remote_opened) {
5077 GLINK_ERR_CH(ctx,
5078 "%s: unexpected remote close receive for rcid %u\n",
5079 __func__, (unsigned int)rcid);
5080 rwref_put(&ctx->ch_state_lhb2);
5081 return;
5082 }
Dhoat Harpalae706e12018-01-18 00:29:20 +05305083 spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
5084 ctx->pending_delete = true;
5085 spin_unlock_irqrestore(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
Chris Lewfa6135e2016-08-01 13:29:46 -07005086 GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__);
5087
5088 is_ch_fully_closed = glink_core_remote_close_common(ctx, false);
5089
Chris Lewfa6135e2016-08-01 13:29:46 -07005090 if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid);
5091
5092 if (is_ch_fully_closed) {
5093 glink_delete_ch_from_list(ctx, true);
Kyle Yan65be4a52016-10-31 15:05:00 -07005094 kthread_flush_worker(&xprt_ptr->tx_wq);
Chris Lewfa6135e2016-08-01 13:29:46 -07005095 }
5096 rwref_put(&ctx->ch_state_lhb2);
5097}
5098
5099/**
5100 * glink_core_rx_cmd_ch_close_ack() - Receive locally-request close ack
5101 *
5102 * if_ptr: Pointer to transport instance
5103 * lcid: Local Channel ID
5104 */
5105static void glink_core_rx_cmd_ch_close_ack(struct glink_transport_if *if_ptr,
5106 uint32_t lcid)
5107{
5108 struct channel_ctx *ctx;
5109 bool is_ch_fully_closed;
5110 struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
5111
5112 ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
5113 if (!ctx) {
5114 /* unknown LCID received - this shouldn't happen */
5115 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5116 "%s: invalid lcid %u received\n", __func__,
5117 (unsigned int)lcid);
5118 return;
5119 }
5120
5121 if (ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
5122 GLINK_ERR_CH(ctx,
5123 "%s: unexpected close ack receive for lcid %u\n",
5124 __func__, (unsigned int)lcid);
5125 rwref_put(&ctx->ch_state_lhb2);
5126 return;
5127 }
5128
5129 is_ch_fully_closed = glink_core_ch_close_ack_common(ctx, false);
5130 if (is_ch_fully_closed) {
5131 glink_delete_ch_from_list(ctx, true);
Kyle Yan65be4a52016-10-31 15:05:00 -07005132 kthread_flush_worker(&xprt_ptr->tx_wq);
Chris Lewfa6135e2016-08-01 13:29:46 -07005133 }
5134 rwref_put(&ctx->ch_state_lhb2);
5135}
5136
5137/**
5138 * glink_core_remote_rx_intent_put() - Receive remove intent
5139 *
5140 * @if_ptr: Pointer to transport instance
5141 * @rcid: Remote Channel ID
5142 * @riid: Remote Intent ID
5143 * @size: Size of the remote intent ID
5144 */
5145static void glink_core_remote_rx_intent_put(struct glink_transport_if *if_ptr,
5146 uint32_t rcid, uint32_t riid, size_t size)
5147{
5148 struct channel_ctx *ctx;
5149
5150 ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
5151 if (!ctx) {
5152 /* unknown rcid received - this shouldn't happen */
5153 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5154 "%s: invalid rcid received %u\n", __func__,
5155 (unsigned int)rcid);
5156 return;
5157 }
5158
5159 ch_push_remote_rx_intent(ctx, size, riid, NULL);
5160 rwref_put(&ctx->ch_state_lhb2);
5161}
5162
5163/**
5164 * glink_core_remote_rx_intent_put_cookie() - Receive remove intent
5165 *
5166 * @if_ptr: Pointer to transport instance
5167 * @rcid: Remote Channel ID
5168 * @riid: Remote Intent ID
5169 * @size: Size of the remote intent ID
5170 * @cookie: Transport-specific cookie to cache
5171 */
5172static void glink_core_remote_rx_intent_put_cookie(
5173 struct glink_transport_if *if_ptr,
5174 uint32_t rcid, uint32_t riid, size_t size, void *cookie)
5175{
5176 struct channel_ctx *ctx;
5177
5178 ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
5179 if (!ctx) {
5180 /* unknown rcid received - this shouldn't happen */
5181 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5182 "%s: invalid rcid received %u\n", __func__,
5183 (unsigned int)rcid);
5184 return;
5185 }
5186
5187 ch_push_remote_rx_intent(ctx, size, riid, cookie);
5188 rwref_put(&ctx->ch_state_lhb2);
5189}
5190
5191/**
5192 * glink_core_rx_cmd_remote_rx_intent_req() - Receive a request for rx_intent
5193 * from remote side
5194 * if_ptr: Pointer to the transport interface
5195 * rcid: Remote channel ID
5196 * size: size of the intent
5197 *
5198 * The function searches for the local channel to which the request for
5199 * rx_intent has arrived and informs this request to the local channel through
5200 * notify_rx_intent_req callback registered by the local channel.
5201 */
5202static void glink_core_rx_cmd_remote_rx_intent_req(
5203 struct glink_transport_if *if_ptr, uint32_t rcid, size_t size)
5204{
5205 struct channel_ctx *ctx;
5206 bool cb_ret;
5207
5208 ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
5209 if (!ctx) {
5210 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5211 "%s: invalid rcid received %u\n", __func__,
5212 (unsigned int)rcid);
5213 return;
5214 }
5215 if (!ctx->notify_rx_intent_req) {
5216 GLINK_ERR_CH(ctx,
5217 "%s: Notify function not defined for local channel",
5218 __func__);
5219 rwref_put(&ctx->ch_state_lhb2);
5220 return;
5221 }
5222
5223 cb_ret = ctx->notify_rx_intent_req(ctx, ctx->user_priv, size);
5224 if_ptr->tx_cmd_remote_rx_intent_req_ack(if_ptr, ctx->lcid, cb_ret);
5225 rwref_put(&ctx->ch_state_lhb2);
5226}
5227
5228/**
5229 * glink_core_rx_cmd_remote_rx_intent_req_ack()- Receive ack from remote side
5230 * for a local rx_intent request
5231 * if_ptr: Pointer to the transport interface
5232 * rcid: Remote channel ID
5233 * size: size of the intent
5234 *
5235 * This function receives the ack for rx_intent request from local channel.
5236 */
5237static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
5238 *if_ptr, uint32_t rcid, bool granted)
5239{
5240 struct channel_ctx *ctx;
5241
5242 ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
5243 if (!ctx) {
5244 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5245 "%s: Invalid rcid received %u\n", __func__,
5246 (unsigned int)rcid);
5247 return;
5248 }
5249 ctx->int_req_ack = granted;
5250 complete_all(&ctx->int_req_ack_complete);
5251 rwref_put(&ctx->ch_state_lhb2);
5252}
5253
5254/**
5255 * glink_core_rx_get_pkt_ctx() - lookup RX intent structure
5256 *
5257 * if_ptr: Pointer to the transport interface
5258 * rcid: Remote channel ID
5259 * liid: Local RX Intent ID
5260 *
5261 * Note that this function is designed to always be followed by a call to
5262 * glink_core_rx_put_pkt_ctx() to complete an RX operation by the transport.
5263 *
5264 * Return: Pointer to RX intent structure (or NULL if none found)
5265 */
5266static struct glink_core_rx_intent *glink_core_rx_get_pkt_ctx(
5267 struct glink_transport_if *if_ptr, uint32_t rcid, uint32_t liid)
5268{
5269 struct channel_ctx *ctx;
5270 struct glink_core_rx_intent *intent_ptr;
5271
5272 ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
5273 if (!ctx) {
5274 /* unknown LCID received - this shouldn't happen */
5275 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5276 "%s: invalid rcid received %u\n", __func__,
5277 (unsigned int)rcid);
5278 return NULL;
5279 }
5280
5281 /* match pending intent */
5282 intent_ptr = ch_get_local_rx_intent(ctx, liid);
5283 if (intent_ptr == NULL) {
5284 GLINK_ERR_CH(ctx,
5285 "%s: L[%u]: No matching rx intent\n",
5286 __func__, liid);
5287 rwref_put(&ctx->ch_state_lhb2);
5288 return NULL;
5289 }
5290
5291 rwref_put(&ctx->ch_state_lhb2);
5292 return intent_ptr;
5293}
5294
5295/**
5296 * glink_core_rx_put_pkt_ctx() - lookup RX intent structure
5297 *
5298 * if_ptr: Pointer to the transport interface
5299 * rcid: Remote channel ID
5300 * intent_ptr: Pointer to the RX intent
5301 * complete: Packet has been completely received
5302 *
5303 * Note that this function should always be preceded by a call to
5304 * glink_core_rx_get_pkt_ctx().
5305 */
5306void glink_core_rx_put_pkt_ctx(struct glink_transport_if *if_ptr,
5307 uint32_t rcid, struct glink_core_rx_intent *intent_ptr, bool complete)
5308{
5309 struct channel_ctx *ctx;
5310
5311 if (!complete) {
5312 GLINK_DBG_XPRT(if_ptr->glink_core_priv,
5313 "%s: rcid[%u] liid[%u] pkt_size[%zu] write_offset[%zu] Fragment received\n",
5314 __func__, rcid, intent_ptr->id,
5315 intent_ptr->pkt_size,
5316 intent_ptr->write_offset);
5317 return;
5318 }
5319
5320 /* packet complete */
5321 ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
5322 if (!ctx) {
5323 /* unknown LCID received - this shouldn't happen */
5324 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5325 "%s: invalid rcid received %u\n", __func__,
5326 (unsigned int)rcid);
5327 return;
5328 }
5329
5330 if (unlikely(intent_ptr->tracer_pkt)) {
5331 tracer_pkt_log_event(intent_ptr->data, GLINK_CORE_RX);
5332 ch_set_local_rx_intent_notified(ctx, intent_ptr);
5333 if (ctx->notify_rx_tracer_pkt)
5334 ctx->notify_rx_tracer_pkt(ctx, ctx->user_priv,
5335 intent_ptr->pkt_priv, intent_ptr->data,
5336 intent_ptr->pkt_size);
5337 rwref_put(&ctx->ch_state_lhb2);
5338 return;
5339 }
5340
5341 GLINK_PERF_CH(ctx, "%s: L[%u]: data[%p] size[%zu]\n",
5342 __func__, intent_ptr->id,
5343 intent_ptr->data ? intent_ptr->data : intent_ptr->iovec,
5344 intent_ptr->write_offset);
5345 if (!intent_ptr->data && !ctx->notify_rxv) {
5346 /* Received a vector, but client can't handle a vector */
5347 intent_ptr->bounce_buf = linearize_vector(intent_ptr->iovec,
5348 intent_ptr->pkt_size,
5349 intent_ptr->vprovider,
5350 intent_ptr->pprovider);
5351 if (IS_ERR_OR_NULL(intent_ptr->bounce_buf)) {
5352 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5353 "%s: Error %ld linearizing vector\n", __func__,
5354 PTR_ERR(intent_ptr->bounce_buf));
5355 WARN(1, "Failed to linearize vector\n");
5356 rwref_put(&ctx->ch_state_lhb2);
5357 return;
5358 }
5359 }
5360
5361 ch_set_local_rx_intent_notified(ctx, intent_ptr);
5362 if (ctx->notify_rx && (intent_ptr->data || intent_ptr->bounce_buf)) {
5363 ctx->notify_rx(ctx, ctx->user_priv, intent_ptr->pkt_priv,
5364 intent_ptr->data ?
5365 intent_ptr->data : intent_ptr->bounce_buf,
5366 intent_ptr->pkt_size);
5367 } else if (ctx->notify_rxv) {
5368 ctx->notify_rxv(ctx, ctx->user_priv, intent_ptr->pkt_priv,
5369 intent_ptr->iovec, intent_ptr->pkt_size,
5370 intent_ptr->vprovider, intent_ptr->pprovider);
5371 } else {
5372 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5373 "%s: Unable to process rx data\n", __func__);
5374 WARN(1, "Failed to process rx data\n");
5375 }
5376 rwref_put(&ctx->ch_state_lhb2);
5377}
5378
5379/**
5380 * glink_core_rx_cmd_tx_done() - Receive Transmit Done Command
5381 * @xprt_ptr: Transport to send packet on.
5382 * @rcid: Remote channel ID
5383 * @riid: Remote intent ID
5384 * @reuse: Reuse the consumed intent
5385 */
5386void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr,
5387 uint32_t rcid, uint32_t riid, bool reuse)
5388{
5389 struct channel_ctx *ctx;
5390 struct glink_core_tx_pkt *tx_pkt;
5391 unsigned long flags;
5392 size_t intent_size;
5393 void *cookie;
5394
5395 ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
5396 if (!ctx) {
5397 /* unknown RCID received - this shouldn't happen */
5398 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5399 "%s: invalid rcid %u received\n", __func__,
5400 rcid);
5401 return;
5402 }
5403
5404 spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
5405 tx_pkt = ch_get_tx_pending_remote_done(ctx, riid);
5406 if (IS_ERR_OR_NULL(tx_pkt)) {
5407 /*
5408 * FUTURE - in the case of a zero-copy transport, this is a
5409 * fatal protocol failure since memory corruption could occur
5410 * in this case. Prevent this by adding code in glink_close()
5411 * to recall any buffers in flight / wait for them to be
5412 * returned.
5413 */
5414 GLINK_ERR_CH(ctx, "%s: R[%u]: No matching tx\n",
5415 __func__,
5416 (unsigned int)riid);
5417 spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
5418 rwref_put(&ctx->ch_state_lhb2);
5419 return;
5420 }
5421
5422 /* notify client */
5423 ctx->notify_tx_done(ctx, ctx->user_priv, tx_pkt->pkt_priv,
5424 tx_pkt->data ? tx_pkt->data : tx_pkt->iovec);
5425 intent_size = tx_pkt->intent_size;
5426 cookie = tx_pkt->cookie;
5427 ch_remove_tx_pending_remote_done(ctx, tx_pkt);
5428 spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
5429
5430 if (reuse)
5431 ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
5432 rwref_put(&ctx->ch_state_lhb2);
5433}
5434
5435/**
5436 * xprt_schedule_tx() - Schedules packet for transmit.
5437 * @xprt_ptr: Transport to send packet on.
5438 * @ch_ptr: Channel to send packet on.
5439 * @tx_info: Packet to transmit.
5440 */
5441static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
5442 struct channel_ctx *ch_ptr,
5443 struct glink_core_tx_pkt *tx_info)
5444{
5445 unsigned long flags;
5446
5447 if (unlikely(xprt_ptr->local_state == GLINK_XPRT_DOWN)) {
5448 GLINK_ERR_CH(ch_ptr, "%s: Error XPRT is down\n", __func__);
5449 kfree(tx_info);
5450 return;
5451 }
5452
5453 spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
5454 if (unlikely(!ch_is_fully_opened(ch_ptr))) {
5455 spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
5456 GLINK_ERR_CH(ch_ptr, "%s: Channel closed before tx\n",
5457 __func__);
5458 kfree(tx_info);
5459 return;
5460 }
5461 if (list_empty(&ch_ptr->tx_ready_list_node))
5462 list_add_tail(&ch_ptr->tx_ready_list_node,
5463 &xprt_ptr->prio_bin[ch_ptr->curr_priority].tx_ready);
5464
5465 spin_lock(&ch_ptr->tx_lists_lock_lhc3);
5466 list_add_tail(&tx_info->list_node, &ch_ptr->tx_active);
5467 glink_qos_do_ch_tx(ch_ptr);
5468 if (unlikely(tx_info->tracer_pkt))
5469 tracer_pkt_log_event((void *)(tx_info->data),
5470 GLINK_QUEUE_TO_SCHEDULER);
5471
5472 spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
5473 spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
5474
Kyle Yan65be4a52016-10-31 15:05:00 -07005475 kthread_queue_work(&xprt_ptr->tx_wq, &xprt_ptr->tx_kwork);
Chris Lewfa6135e2016-08-01 13:29:46 -07005476}
5477
5478/**
5479 * xprt_single_threaded_tx() - Transmit in the context of sender.
5480 * @xprt_ptr: Transport to send packet on.
5481 * @ch_ptr: Channel to send packet on.
5482 * @tx_info: Packet to transmit.
5483 */
5484static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
5485 struct channel_ctx *ch_ptr,
5486 struct glink_core_tx_pkt *tx_info)
5487{
5488 int ret;
5489 unsigned long flags;
5490
5491 spin_lock_irqsave(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
5492 do {
5493 ret = xprt_ptr->ops->tx(ch_ptr->transport_ptr->ops,
5494 ch_ptr->lcid, tx_info);
5495 } while (ret == -EAGAIN);
5496 if (ret < 0 || tx_info->size_remaining) {
5497 GLINK_ERR_CH(ch_ptr, "%s: Error %d writing data\n",
5498 __func__, ret);
5499 kfree(tx_info);
5500 } else {
5501 list_add_tail(&tx_info->list_done,
5502 &ch_ptr->tx_pending_remote_done);
5503 ret = 0;
5504 }
5505 spin_unlock_irqrestore(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
5506 return ret;
5507}
5508
5509/**
5510 * glink_scheduler_eval_prio() - Evaluate the channel priority
5511 * @ctx: Channel whose priority is evaluated.
5512 * @xprt_ctx: Transport in which the channel is part of.
5513 *
5514 * This function is called by the packet scheduler to measure the traffic
5515 * rate observed in the channel and compare it against the traffic rate
5516 * requested by the channel. The comparison result is used to evaluate the
5517 * priority of the channel.
5518 */
5519static void glink_scheduler_eval_prio(struct channel_ctx *ctx,
5520 struct glink_core_xprt_ctx *xprt_ctx)
5521{
Chris Lew39393242017-08-11 15:56:56 -07005522 unsigned long token_end_time, rem;
5523 uint64_t token_consume_time;
Chris Lewfa6135e2016-08-01 13:29:46 -07005524 unsigned long obs_rate_kBps;
5525
5526 if (ctx->initial_priority == 0)
5527 return;
5528
5529 if (ctx->token_count)
5530 return;
5531
5532 token_end_time = arch_counter_get_cntvct();
5533
5534 token_consume_time = NSEC_PER_SEC;
5535 rem = do_div(token_consume_time, arch_timer_get_rate());
5536 token_consume_time = (token_end_time - ctx->token_start_time) *
5537 token_consume_time;
5538 rem = do_div(token_consume_time, 1000);
5539 obs_rate_kBps = glink_qos_calc_rate_kBps(ctx->txd_len,
5540 token_consume_time);
5541 if (obs_rate_kBps > ctx->req_rate_kBps) {
5542 GLINK_INFO_CH(ctx, "%s: Obs. Rate (%lu) > Req. Rate (%lu)\n",
5543 __func__, obs_rate_kBps, ctx->req_rate_kBps);
5544 glink_qos_update_ch_prio(ctx, 0);
5545 } else {
5546 glink_qos_update_ch_prio(ctx, ctx->initial_priority);
5547 }
5548
5549 ctx->token_count = xprt_ctx->token_count;
5550 ctx->txd_len = 0;
5551 ctx->token_start_time = arch_counter_get_cntvct();
5552}
5553
5554/**
5555 * glink_scheduler_tx() - Transmit operation by the scheduler
5556 * @ctx: Channel which is scheduled for transmission.
5557 * @xprt_ctx: Transport context in which the transmission is performed.
5558 *
5559 * This function is called by the scheduler after scheduling a channel for
5560 * transmission over the transport.
5561 *
5562 * Return: return value as returned by the transport on success,
5563 * standard Linux error codes on failure.
5564 */
5565static int glink_scheduler_tx(struct channel_ctx *ctx,
5566 struct glink_core_xprt_ctx *xprt_ctx)
5567{
5568 unsigned long flags;
Dhoat Harpal701649f2016-09-09 16:51:14 +05305569 struct glink_core_tx_pkt *tx_info, *temp_tx_info;
Chris Lewfa6135e2016-08-01 13:29:46 -07005570 size_t txd_len = 0;
5571 size_t tx_len = 0;
5572 uint32_t num_pkts = 0;
5573 int ret = 0;
5574
5575 spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
5576 while (txd_len < xprt_ctx->mtu &&
5577 !list_empty(&ctx->tx_active)) {
5578 tx_info = list_first_entry(&ctx->tx_active,
5579 struct glink_core_tx_pkt, list_node);
5580 rwref_get(&tx_info->pkt_ref);
5581
5582 spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4);
5583 if (list_empty(&tx_info->list_done))
5584 list_add(&tx_info->list_done,
5585 &ctx->tx_pending_remote_done);
5586 spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4);
5587 spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
5588
5589 if (unlikely(tx_info->tracer_pkt)) {
5590 tracer_pkt_log_event((void *)(tx_info->data),
5591 GLINK_SCHEDULER_TX);
5592 ret = xprt_ctx->ops->tx_cmd_tracer_pkt(xprt_ctx->ops,
5593 ctx->lcid, tx_info);
5594 } else {
5595 tx_len = tx_info->size_remaining <
5596 (xprt_ctx->mtu - txd_len) ?
5597 tx_info->size_remaining :
5598 (xprt_ctx->mtu - txd_len);
5599 tx_info->tx_len = tx_len;
5600 ret = xprt_ctx->ops->tx(xprt_ctx->ops,
5601 ctx->lcid, tx_info);
5602 }
5603 spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
Dhoat Harpal701649f2016-09-09 16:51:14 +05305604 if (!list_empty(&ctx->tx_active)) {
5605 /*
5606 * Verify if same tx_info still exist in tx_active
5607 * list and is not removed during tx operation.
5608 * It can happen if SSR and tx done both happen
5609 * before tx_lists_lock_lhc3 is taken.
5610 */
5611 temp_tx_info = list_first_entry(&ctx->tx_active,
5612 struct glink_core_tx_pkt, list_node);
5613 if (temp_tx_info != tx_info)
5614 continue;
5615 } else {
5616 break;
5617 }
Chris Lewfa6135e2016-08-01 13:29:46 -07005618 if (ret == -EAGAIN) {
5619 /*
5620 * transport unable to send at the moment and will call
5621 * tx_resume() when it can send again.
5622 */
5623 rwref_put(&tx_info->pkt_ref);
5624 break;
5625 } else if (ret < 0) {
5626 /*
5627 * General failure code that indicates that the
5628 * transport is unable to recover. In this case, the
5629 * communication failure will be detected at a higher
5630 * level and a subsystem restart of the affected system
5631 * will be triggered.
5632 */
5633 GLINK_ERR_XPRT(xprt_ctx,
5634 "%s: unrecoverable xprt failure %d\n",
5635 __func__, ret);
5636 rwref_put(&tx_info->pkt_ref);
5637 break;
5638 } else if (!ret && tx_info->size_remaining) {
5639 /*
5640 * Transport unable to send any data on this channel.
5641 * Break out of the loop so that the scheduler can
5642 * continue with the next channel.
5643 */
Dhoat Harpal701649f2016-09-09 16:51:14 +05305644 rwref_put(&tx_info->pkt_ref);
Chris Lewfa6135e2016-08-01 13:29:46 -07005645 break;
5646 }
5647
5648 txd_len += tx_len;
5649 if (!tx_info->size_remaining) {
5650 num_pkts++;
5651 list_del_init(&tx_info->list_node);
Chris Lewfa6135e2016-08-01 13:29:46 -07005652 }
Dhoat Harpal701649f2016-09-09 16:51:14 +05305653 rwref_put(&tx_info->pkt_ref);
Chris Lewfa6135e2016-08-01 13:29:46 -07005654 }
5655
5656 ctx->txd_len += txd_len;
5657 if (txd_len) {
5658 if (num_pkts >= ctx->token_count)
5659 ctx->token_count = 0;
5660 else if (num_pkts)
5661 ctx->token_count -= num_pkts;
5662 else
5663 ctx->token_count--;
5664 }
5665 spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
5666
5667 return ret;
5668}
5669
5670/**
5671 * tx_func() Transmit Kthread
5672 * @work: Linux kthread work structure
5673 */
5674static void tx_func(struct kthread_work *work)
5675{
5676 struct channel_ctx *ch_ptr;
5677 uint32_t prio;
Channagoud Kadabi075db3b2017-03-16 14:26:17 -07005678 uint32_t tx_ready_head_prio = 0;
Chris Lewfa6135e2016-08-01 13:29:46 -07005679 int ret;
5680 struct channel_ctx *tx_ready_head = NULL;
5681 bool transmitted_successfully = true;
5682 unsigned long flags;
5683 struct glink_core_xprt_ctx *xprt_ptr = container_of(work,
5684 struct glink_core_xprt_ctx, tx_kwork);
5685
5686 GLINK_PERF("%s: worker starting\n", __func__);
5687
5688 while (1) {
5689 prio = xprt_ptr->num_priority - 1;
5690 spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
5691 while (list_empty(&xprt_ptr->prio_bin[prio].tx_ready)) {
5692 if (prio == 0) {
5693 spin_unlock_irqrestore(
5694 &xprt_ptr->tx_ready_lock_lhb3, flags);
5695 return;
5696 }
5697 prio--;
5698 }
5699 glink_pm_qos_vote(xprt_ptr);
5700 ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready,
5701 struct channel_ctx, tx_ready_list_node);
Dhoat Harpal701649f2016-09-09 16:51:14 +05305702 rwref_get(&ch_ptr->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07005703 spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
5704
5705 if (tx_ready_head == NULL || tx_ready_head_prio < prio) {
5706 tx_ready_head = ch_ptr;
5707 tx_ready_head_prio = prio;
5708 }
5709
5710 if (ch_ptr == tx_ready_head && !transmitted_successfully) {
5711 GLINK_ERR_XPRT(xprt_ptr,
5712 "%s: Unable to send data on this transport.\n",
5713 __func__);
Dhoat Harpal701649f2016-09-09 16:51:14 +05305714 rwref_put(&ch_ptr->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07005715 break;
5716 }
5717 transmitted_successfully = false;
5718
5719 ret = glink_scheduler_tx(ch_ptr, xprt_ptr);
5720 if (ret == -EAGAIN) {
5721 /*
5722 * transport unable to send at the moment and will call
5723 * tx_resume() when it can send again.
5724 */
Dhoat Harpal701649f2016-09-09 16:51:14 +05305725 rwref_put(&ch_ptr->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07005726 break;
5727 } else if (ret < 0) {
5728 /*
5729 * General failure code that indicates that the
5730 * transport is unable to recover. In this case, the
5731 * communication failure will be detected at a higher
5732 * level and a subsystem restart of the affected system
5733 * will be triggered.
5734 */
5735 GLINK_ERR_XPRT(xprt_ptr,
5736 "%s: unrecoverable xprt failure %d\n",
5737 __func__, ret);
Dhoat Harpal701649f2016-09-09 16:51:14 +05305738 rwref_put(&ch_ptr->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07005739 break;
5740 } else if (!ret) {
5741 /*
5742 * Transport unable to send any data on this channel,
5743 * but didn't return an error. Move to the next channel
5744 * and continue.
5745 */
5746 spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
5747 list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready);
5748 spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3,
5749 flags);
Dhoat Harpal701649f2016-09-09 16:51:14 +05305750 rwref_put(&ch_ptr->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07005751 continue;
5752 }
5753
5754 spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
5755 spin_lock(&ch_ptr->tx_lists_lock_lhc3);
5756
5757 glink_scheduler_eval_prio(ch_ptr, xprt_ptr);
5758 if (list_empty(&ch_ptr->tx_active)) {
5759 list_del_init(&ch_ptr->tx_ready_list_node);
5760 glink_qos_done_ch_tx(ch_ptr);
5761 }
5762
5763 spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
5764 spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
5765
5766 tx_ready_head = NULL;
5767 transmitted_successfully = true;
Dhoat Harpal701649f2016-09-09 16:51:14 +05305768 rwref_put(&ch_ptr->ch_state_lhb2);
Chris Lewfa6135e2016-08-01 13:29:46 -07005769 }
5770 glink_pm_qos_unvote(xprt_ptr);
5771 GLINK_PERF("%s: worker exiting\n", __func__);
5772}
5773
5774static void glink_core_tx_resume(struct glink_transport_if *if_ptr)
5775{
5776 struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
5777
Kyle Yan65be4a52016-10-31 15:05:00 -07005778 kthread_queue_work(&xprt_ptr->tx_wq, &xprt_ptr->tx_kwork);
Chris Lewfa6135e2016-08-01 13:29:46 -07005779}
5780
5781/**
5782 * glink_pm_qos_vote() - Add Power Management QoS Vote
5783 * @xprt_ptr: Transport for power vote
5784 *
5785 * Note - must be called with tx_ready_lock_lhb3 locked.
5786 */
5787static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr)
5788{
5789 if (glink_pm_qos && !xprt_ptr->qos_req_active) {
5790 GLINK_PERF("%s: qos vote %u us\n", __func__, glink_pm_qos);
5791 pm_qos_update_request(&xprt_ptr->pm_qos_req, glink_pm_qos);
5792 xprt_ptr->qos_req_active = true;
5793 }
5794 xprt_ptr->tx_path_activity = true;
5795}
5796
5797/**
5798 * glink_pm_qos_unvote() - Schedule Power Management QoS Vote Removal
5799 * @xprt_ptr: Transport for power vote removal
5800 *
5801 * Note - must be called with tx_ready_lock_lhb3 locked.
5802 */
5803static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr)
5804{
5805 xprt_ptr->tx_path_activity = false;
5806 if (xprt_ptr->qos_req_active) {
5807 GLINK_PERF("%s: qos unvote\n", __func__);
5808 schedule_delayed_work(&xprt_ptr->pm_qos_work,
5809 msecs_to_jiffies(GLINK_PM_QOS_HOLDOFF_MS));
5810 }
5811}
5812
5813/**
5814 * glink_pm_qos_cancel_worker() - Remove Power Management QoS Vote
5815 * @work: Delayed work structure
5816 *
5817 * Removes PM QoS vote if no additional transmit activity has occurred between
5818 * the unvote and when this worker runs.
5819 */
5820static void glink_pm_qos_cancel_worker(struct work_struct *work)
5821{
5822 struct glink_core_xprt_ctx *xprt_ptr;
5823 unsigned long flags;
5824
5825 xprt_ptr = container_of(to_delayed_work(work),
5826 struct glink_core_xprt_ctx, pm_qos_work);
5827
5828 spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
5829 if (!xprt_ptr->tx_path_activity) {
5830 /* no more tx activity */
5831 GLINK_PERF("%s: qos off\n", __func__);
5832 pm_qos_update_request(&xprt_ptr->pm_qos_req,
5833 PM_QOS_DEFAULT_VALUE);
5834 xprt_ptr->qos_req_active = false;
5835 }
5836 xprt_ptr->tx_path_activity = false;
5837 spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
5838}
5839
5840/**
5841 * glink_core_rx_cmd_remote_sigs() - Receive remote channel signal command
5842 *
5843 * if_ptr: Pointer to transport instance
5844 * rcid: Remote Channel ID
5845 */
5846static void glink_core_rx_cmd_remote_sigs(struct glink_transport_if *if_ptr,
5847 uint32_t rcid, uint32_t sigs)
5848{
5849 struct channel_ctx *ctx;
5850 uint32_t old_sigs;
5851
5852 ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
5853 if (!ctx) {
5854 /* unknown LCID received - this shouldn't happen */
5855 GLINK_ERR_XPRT(if_ptr->glink_core_priv,
5856 "%s: invalid rcid %u received\n", __func__,
5857 (unsigned int)rcid);
5858 return;
5859 }
5860
5861 if (!ch_is_fully_opened(ctx)) {
5862 GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
5863 __func__);
5864 rwref_put(&ctx->ch_state_lhb2);
5865 return;
5866 }
5867
5868 old_sigs = ctx->rsigs;
5869 ctx->rsigs = sigs;
5870 if (ctx->notify_rx_sigs) {
5871 ctx->notify_rx_sigs(ctx, ctx->user_priv, old_sigs, ctx->rsigs);
5872 GLINK_INFO_CH(ctx, "%s: notify rx sigs old:0x%x new:0x%x\n",
5873 __func__, old_sigs, ctx->rsigs);
5874 }
5875 rwref_put(&ctx->ch_state_lhb2);
5876}
5877
5878static struct glink_core_if core_impl = {
5879 .link_up = glink_core_link_up,
5880 .link_down = glink_core_link_down,
5881 .rx_cmd_version = glink_core_rx_cmd_version,
5882 .rx_cmd_version_ack = glink_core_rx_cmd_version_ack,
5883 .rx_cmd_ch_remote_open = glink_core_rx_cmd_ch_remote_open,
5884 .rx_cmd_ch_open_ack = glink_core_rx_cmd_ch_open_ack,
5885 .rx_cmd_ch_remote_close = glink_core_rx_cmd_ch_remote_close,
5886 .rx_cmd_ch_close_ack = glink_core_rx_cmd_ch_close_ack,
5887 .rx_get_pkt_ctx = glink_core_rx_get_pkt_ctx,
5888 .rx_put_pkt_ctx = glink_core_rx_put_pkt_ctx,
5889 .rx_cmd_remote_rx_intent_put = glink_core_remote_rx_intent_put,
5890 .rx_cmd_remote_rx_intent_put_cookie =
5891 glink_core_remote_rx_intent_put_cookie,
5892 .rx_cmd_remote_rx_intent_req = glink_core_rx_cmd_remote_rx_intent_req,
5893 .rx_cmd_rx_intent_req_ack = glink_core_rx_cmd_rx_intent_req_ack,
5894 .rx_cmd_tx_done = glink_core_rx_cmd_tx_done,
5895 .tx_resume = glink_core_tx_resume,
5896 .rx_cmd_remote_sigs = glink_core_rx_cmd_remote_sigs,
5897};
5898
5899/**
5900 * glink_xprt_ctx_iterator_init() - Initializes the transport context list
5901 * iterator
5902 * @xprt_i: pointer to the transport context iterator.
5903 *
5904 * This function acquires the transport context lock which must then be
5905 * released by glink_xprt_ctx_iterator_end()
5906 */
5907void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i)
5908{
5909 if (xprt_i == NULL)
5910 return;
5911
5912 mutex_lock(&transport_list_lock_lha0);
5913 xprt_i->xprt_list = &transport_list;
5914 xprt_i->i_curr = list_entry(&transport_list,
5915 struct glink_core_xprt_ctx, list_node);
5916}
5917EXPORT_SYMBOL(glink_xprt_ctx_iterator_init);
5918
5919/**
5920 * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration
5921 * @xprt_i: pointer to the transport context iterator.
5922 */
5923void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i)
5924{
5925 if (xprt_i == NULL)
5926 return;
5927
5928 xprt_i->xprt_list = NULL;
5929 xprt_i->i_curr = NULL;
5930 mutex_unlock(&transport_list_lock_lha0);
5931}
5932EXPORT_SYMBOL(glink_xprt_ctx_iterator_end);
5933
5934/**
5935 * glink_xprt_ctx_iterator_next() - iterates element by element in transport
5936 * context list
5937 * @xprt_i: pointer to the transport context iterator.
5938 *
5939 * Return: pointer to the transport context structure
5940 */
5941struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next(
5942 struct xprt_ctx_iterator *xprt_i)
5943{
5944 struct glink_core_xprt_ctx *xprt_ctx = NULL;
5945
5946 if (xprt_i == NULL)
5947 return xprt_ctx;
5948
5949 if (list_empty(xprt_i->xprt_list))
5950 return xprt_ctx;
5951
5952 list_for_each_entry_continue(xprt_i->i_curr,
5953 xprt_i->xprt_list, list_node) {
5954 xprt_ctx = xprt_i->i_curr;
5955 break;
5956 }
5957 return xprt_ctx;
5958}
5959EXPORT_SYMBOL(glink_xprt_ctx_iterator_next);
5960
5961/**
5962 * glink_get_xprt_name() - get the transport name
5963 * @xprt_ctx: pointer to the transport context.
5964 *
5965 * Return: name of the transport
5966 */
5967char *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx)
5968{
5969 if (xprt_ctx == NULL)
5970 return NULL;
5971
5972 return xprt_ctx->name;
5973}
5974EXPORT_SYMBOL(glink_get_xprt_name);
5975
5976/**
5977 * glink_get_xprt_name() - get the name of the remote processor/edge
5978 * of the transport
5979 * @xprt_ctx: pointer to the transport context.
5980 *
5981 * Return: Name of the remote processor/edge
5982 */
5983char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx)
5984{
5985 if (xprt_ctx == NULL)
5986 return NULL;
5987 return xprt_ctx->edge;
5988}
5989EXPORT_SYMBOL(glink_get_xprt_edge_name);
5990
5991/**
5992 * glink_get_xprt_state() - get the state of the transport
5993 * @xprt_ctx: pointer to the transport context.
5994 *
5995 * Return: Name of the transport state, NULL in case of invalid input
5996 */
5997const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx)
5998{
5999 if (xprt_ctx == NULL)
6000 return NULL;
6001
6002 return glink_get_xprt_state_string(xprt_ctx->local_state);
6003}
6004EXPORT_SYMBOL(glink_get_xprt_state);
6005
6006/**
6007 * glink_get_xprt_version_features() - get the version and feature set
6008 * of local transport in glink
6009 * @xprt_ctx: pointer to the transport context.
6010 *
6011 * Return: pointer to the glink_core_version
6012 */
6013const struct glink_core_version *glink_get_xprt_version_features(
6014 struct glink_core_xprt_ctx *xprt_ctx)
6015{
6016 const struct glink_core_version *ver = NULL;
6017
6018 if (xprt_ctx == NULL)
6019 return ver;
6020
6021 ver = &xprt_ctx->versions[xprt_ctx->local_version_idx];
6022 return ver;
6023}
6024EXPORT_SYMBOL(glink_get_xprt_version_features);
6025
6026/**
6027 * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator
6028 * @ch_iter: pointer to the channel context iterator.
6029 * xprt: pointer to the transport context that holds the channel list
6030 *
6031 * This function acquires the channel context lock which must then be
6032 * released by glink_ch_ctx_iterator_end()
6033 */
6034void glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter,
6035 struct glink_core_xprt_ctx *xprt)
6036{
6037 unsigned long flags;
6038
6039 if (ch_iter == NULL || xprt == NULL)
6040 return;
6041
6042 spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
6043 ch_iter->ch_list = &(xprt->channels);
6044 ch_iter->i_curr = list_entry(&(xprt->channels),
6045 struct channel_ctx, port_list_node);
6046 ch_iter->ch_list_flags = flags;
6047}
6048EXPORT_SYMBOL(glink_ch_ctx_iterator_init);
6049
6050/**
6051 * glink_ch_ctx_iterator_end() - Ends the channel context list iteration
6052 * @ch_iter: pointer to the channel context iterator.
6053 */
6054void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter,
6055 struct glink_core_xprt_ctx *xprt)
6056{
6057 if (ch_iter == NULL || xprt == NULL)
6058 return;
6059
6060 spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
6061 ch_iter->ch_list_flags);
6062 ch_iter->ch_list = NULL;
6063 ch_iter->i_curr = NULL;
6064}
6065EXPORT_SYMBOL(glink_ch_ctx_iterator_end);
6066
6067/**
6068 * glink_ch_ctx_iterator_next() - iterates element by element in channel
6069 * context list
6070 * @c_i: pointer to the channel context iterator.
6071 *
6072 * Return: pointer to the channel context structure
6073 */
6074struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *c_i)
6075{
6076 struct channel_ctx *ch_ctx = NULL;
6077
6078 if (c_i == NULL)
6079 return ch_ctx;
6080
6081 if (list_empty(c_i->ch_list))
6082 return ch_ctx;
6083
6084 list_for_each_entry_continue(c_i->i_curr,
6085 c_i->ch_list, port_list_node) {
6086 ch_ctx = c_i->i_curr;
6087 break;
6088 }
6089 return ch_ctx;
6090}
6091EXPORT_SYMBOL(glink_ch_ctx_iterator_next);
6092
6093/**
6094 * glink_get_ch_name() - get the channel name
6095 * @ch_ctx: pointer to the channel context.
6096 *
6097 * Return: name of the channel, NULL in case of invalid input
6098 */
6099char *glink_get_ch_name(struct channel_ctx *ch_ctx)
6100{
6101 if (ch_ctx == NULL)
6102 return NULL;
6103
6104 return ch_ctx->name;
6105}
6106EXPORT_SYMBOL(glink_get_ch_name);
6107
6108/**
6109 * glink_get_ch_edge_name() - get the edge on which channel is created
6110 * @ch_ctx: pointer to the channel context.
6111 *
6112 * Return: name of the edge, NULL in case of invalid input
6113 */
6114char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx)
6115{
6116 if (ch_ctx == NULL)
6117 return NULL;
6118
6119 return ch_ctx->transport_ptr->edge;
6120}
6121EXPORT_SYMBOL(glink_get_ch_edge_name);
6122
6123/**
6124 * glink_get_ch_lcid() - get the local channel ID
6125 * @c_i: pointer to the channel context.
6126 *
6127 * Return: local channel id, -EINVAL in case of invalid input
6128 */
6129int glink_get_ch_lcid(struct channel_ctx *ch_ctx)
6130{
6131 if (ch_ctx == NULL)
6132 return -EINVAL;
6133
6134 return ch_ctx->lcid;
6135}
6136EXPORT_SYMBOL(glink_get_ch_lcid);
6137
6138/**
6139 * glink_get_ch_rcid() - get the remote channel ID
6140 * @ch_ctx: pointer to the channel context.
6141 *
6142 * Return: remote channel id, -EINVAL in case of invalid input
6143 */
6144int glink_get_ch_rcid(struct channel_ctx *ch_ctx)
6145{
6146 if (ch_ctx == NULL)
6147 return -EINVAL;
6148
6149 return ch_ctx->rcid;
6150}
6151EXPORT_SYMBOL(glink_get_ch_rcid);
6152
6153/**
6154 * glink_get_ch_lstate() - get the local channel state
6155 * @ch_ctx: pointer to the channel context.
6156 *
6157 * Return: Name of the local channel state, NUll in case of invalid input
6158 */
6159const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx)
6160{
6161 if (ch_ctx == NULL)
6162 return NULL;
6163
6164 return glink_get_ch_state_string(ch_ctx->local_open_state);
6165}
6166EXPORT_SYMBOL(glink_get_ch_lstate);
6167
6168/**
6169 * glink_get_ch_rstate() - get the remote channel state
6170 * @ch_ctx: pointer to the channel context.
6171 *
6172 * Return: true if remote side is opened false otherwise
6173 */
6174bool glink_get_ch_rstate(struct channel_ctx *ch_ctx)
6175{
6176 if (ch_ctx == NULL)
6177 return NULL;
6178
6179 return ch_ctx->remote_opened;
6180}
6181EXPORT_SYMBOL(glink_get_ch_rstate);
6182
6183/**
6184 * glink_get_ch_xprt_name() - get the name of the transport to which
6185 * the channel belongs
6186 * @ch_ctx: pointer to the channel context.
6187 *
6188 * Return: name of the export, NULL in case of invalid input
6189 */
6190char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx)
6191{
6192 if (ch_ctx == NULL)
6193 return NULL;
6194
6195 return ch_ctx->transport_ptr->name;
6196}
6197EXPORT_SYMBOL(glink_get_ch_xprt_name);
6198
6199/**
6200 * glink_get_tx_pkt_count() - get the total number of packets sent
6201 * through this channel
6202 * @ch_ctx: pointer to the channel context.
6203 *
6204 * Return: number of packets transmitted, -EINVAL in case of invalid input
6205 */
6206int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx)
6207{
6208 if (ch_ctx == NULL)
6209 return -EINVAL;
6210
6211 /* FUTURE: packet stats not yet implemented */
6212
6213 return -EOPNOTSUPP;
6214}
6215EXPORT_SYMBOL(glink_get_ch_tx_pkt_count);
6216
6217/**
6218 * glink_get_ch_rx_pkt_count() - get the total number of packets
6219 * received at this channel
6220 * @ch_ctx: pointer to the channel context.
6221 *
6222 * Return: number of packets received, -EINVAL in case of invalid input
6223 */
6224int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx)
6225{
6226 if (ch_ctx == NULL)
6227 return -EINVAL;
6228
6229 /* FUTURE: packet stats not yet implemented */
6230
6231 return -EOPNOTSUPP;
6232}
6233EXPORT_SYMBOL(glink_get_ch_rx_pkt_count);
6234
6235/**
6236 * glink_get_ch_lintents_queued() - get the total number of intents queued
6237 * at local side
6238 * @ch_ctx: pointer to the channel context.
6239 *
6240 * Return: number of intents queued, -EINVAL in case of invalid input
6241 */
6242int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx)
6243{
6244 struct glink_core_rx_intent *intent;
6245 int ilrx_count = 0;
6246
6247 if (ch_ctx == NULL)
6248 return -EINVAL;
6249
6250 list_for_each_entry(intent, &ch_ctx->local_rx_intent_list, list)
6251 ilrx_count++;
6252
6253 return ilrx_count;
6254}
6255EXPORT_SYMBOL(glink_get_ch_lintents_queued);
6256
6257/**
6258 * glink_get_ch_rintents_queued() - get the total number of intents queued
6259 * from remote side
6260 * @ch_ctx: pointer to the channel context.
6261 *
6262 * Return: number of intents queued, -EINVAL in case of invalid input
6263 */
6264int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx)
6265{
6266 struct glink_core_rx_intent *intent;
6267 int irrx_count = 0;
6268
6269 if (ch_ctx == NULL)
6270 return -EINVAL;
6271
6272 list_for_each_entry(intent, &ch_ctx->rmt_rx_intent_list, list)
6273 irrx_count++;
6274
6275 return irrx_count;
6276}
6277EXPORT_SYMBOL(glink_get_ch_rintents_queued);
6278
6279/**
6280 * glink_get_ch_intent_info() - get the intent details of a channel
6281 * @ch_ctx: pointer to the channel context.
6282 * ch_ctx_i: pointer to a structure that will contain intent details
6283 *
6284 * This function is used to get all the channel intent details including locks.
6285 */
6286void glink_get_ch_intent_info(struct channel_ctx *ch_ctx,
6287 struct glink_ch_intent_info *ch_ctx_i)
6288{
6289 if (ch_ctx == NULL || ch_ctx_i == NULL)
6290 return;
6291
6292 ch_ctx_i->li_lst_lock = &ch_ctx->local_rx_intent_lst_lock_lhc1;
6293 ch_ctx_i->li_avail_list = &ch_ctx->local_rx_intent_list;
6294 ch_ctx_i->li_used_list = &ch_ctx->local_rx_intent_ntfy_list;
6295 ch_ctx_i->ri_lst_lock = &ch_ctx->rmt_rx_intent_lst_lock_lhc2;
6296 ch_ctx_i->ri_list = &ch_ctx->rmt_rx_intent_list;
6297}
6298EXPORT_SYMBOL(glink_get_ch_intent_info);
6299
6300/**
6301 * glink_get_debug_mask() - Return debug mask attribute
6302 *
6303 * Return: debug mask attribute
6304 */
6305unsigned int glink_get_debug_mask(void)
6306{
6307 return glink_debug_mask;
6308}
6309EXPORT_SYMBOL(glink_get_debug_mask);
6310
6311/**
6312 * glink_get_log_ctx() - Return log context for other GLINK modules.
6313 *
6314 * Return: Log context or NULL if none.
6315 */
6316void *glink_get_log_ctx(void)
6317{
6318 return log_ctx;
6319}
6320EXPORT_SYMBOL(glink_get_log_ctx);
6321
6322/**
6323 * glink_get_xprt_log_ctx() - Return log context for GLINK xprts.
6324 *
6325 * Return: Log context or NULL if none.
6326 */
6327void *glink_get_xprt_log_ctx(struct glink_core_xprt_ctx *xprt)
6328{
6329 if (xprt)
6330 return xprt->log_ctx;
6331 else
6332 return NULL;
6333}
6334EXPORT_SYMBOL(glink_get_xprt_log_ctx);
6335
6336static int glink_init(void)
6337{
6338 log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
Chris Lewfa6135e2016-08-01 13:29:46 -07006339 if (!log_ctx)
6340 GLINK_ERR("%s: unable to create log context\n", __func__);
6341 glink_debugfs_init();
6342
6343 return 0;
6344}
6345arch_initcall(glink_init);
6346
6347MODULE_DESCRIPTION("MSM Generic Link (G-Link) Transport");
6348MODULE_LICENSE("GPL v2");