blob: 0b85d3b8144ed4b771175f4e3152a709d7f301ea [file] [log] [blame]
Chaitanya Pratapa02130722020-01-31 16:06:58 -08001/* Copyright (c) 2015-2018, 2020, The Linux Foundation. All rights reserved.
Amir Levycdccd632016-10-30 09:36:41 +02002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#ifndef MSM_GSI_H
13#define MSM_GSI_H
14#include <linux/types.h>
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -040015#include <linux/interrupt.h>
Amir Levycdccd632016-10-30 09:36:41 +020016
Amir Levy41644242016-11-03 15:38:09 +020017enum gsi_ver {
18 GSI_VER_ERR = 0,
19 GSI_VER_1_0 = 1,
20 GSI_VER_1_2 = 2,
21 GSI_VER_1_3 = 3,
Michael Adisumarta8522e212017-05-15 11:59:42 -070022 GSI_VER_2_0 = 4,
Amir Levy41644242016-11-03 15:38:09 +020023 GSI_VER_MAX,
24};
25
Amir Levycdccd632016-10-30 09:36:41 +020026enum gsi_status {
27 GSI_STATUS_SUCCESS = 0,
28 GSI_STATUS_ERROR = 1,
29 GSI_STATUS_RING_INSUFFICIENT_SPACE = 2,
30 GSI_STATUS_RING_EMPTY = 3,
31 GSI_STATUS_RES_ALLOC_FAILURE = 4,
32 GSI_STATUS_BAD_STATE = 5,
33 GSI_STATUS_INVALID_PARAMS = 6,
34 GSI_STATUS_UNSUPPORTED_OP = 7,
35 GSI_STATUS_NODEV = 8,
36 GSI_STATUS_POLL_EMPTY = 9,
37 GSI_STATUS_EVT_RING_INCOMPATIBLE = 10,
38 GSI_STATUS_TIMED_OUT = 11,
39 GSI_STATUS_AGAIN = 12,
40};
41
42enum gsi_per_evt {
43 GSI_PER_EVT_GLOB_ERROR,
44 GSI_PER_EVT_GLOB_GP1,
45 GSI_PER_EVT_GLOB_GP2,
46 GSI_PER_EVT_GLOB_GP3,
47 GSI_PER_EVT_GENERAL_BREAK_POINT,
48 GSI_PER_EVT_GENERAL_BUS_ERROR,
49 GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW,
50 GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW,
51};
52
53/**
54 * gsi_per_notify - Peripheral callback info
55 *
56 * @user_data: cookie supplied in gsi_register_device
57 * @evt_id: type of notification
58 * @err_desc: error related information
59 *
60 */
61struct gsi_per_notify {
62 void *user_data;
63 enum gsi_per_evt evt_id;
64 union {
65 uint16_t err_desc;
66 } data;
67};
68
69enum gsi_intr_type {
70 GSI_INTR_MSI = 0x0,
71 GSI_INTR_IRQ = 0x1
72};
73
74
75/**
76 * gsi_per_props - Peripheral related properties
77 *
Amir Levy41644242016-11-03 15:38:09 +020078 * @gsi: GSI core version
Amir Levycdccd632016-10-30 09:36:41 +020079 * @ee: EE where this driver and peripheral driver runs
80 * @intr: control interrupt type
81 * @intvec: write data for MSI write
82 * @msi_addr: MSI address
83 * @irq: IRQ number
84 * @phys_addr: physical address of GSI block
85 * @size: register size of GSI block
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -040086 * @emulator_intcntrlr_addr: the location of emulator's interrupt control block
87 * @emulator_intcntrlr_size: the sise of emulator_intcntrlr_addr
88 * @emulator_intcntrlr_client_isr: client's isr. Called by the emulator's isr
Ghanim Fodic823bc62017-10-21 17:29:53 +030089 * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits
90 * @mhi_er_id_limits: MHI event ring start and end ids
Amir Levycdccd632016-10-30 09:36:41 +020091 * @notify_cb: general notification callback
92 * @req_clk_cb: callback to request peripheral clock
93 * granted should be set to true if request is completed
94 * synchronously, false otherwise (peripheral needs
95 * to call gsi_complete_clk_grant later when request is
96 * completed)
97 * if this callback is not provided, then GSI will assume
98 * peripheral is clocked at all times
99 * @rel_clk_cb: callback to release peripheral clock
100 * @user_data: cookie used for notifications
101 *
102 * All the callbacks are in interrupt context
103 *
104 */
105struct gsi_per_props {
Amir Levy41644242016-11-03 15:38:09 +0200106 enum gsi_ver ver;
Amir Levycdccd632016-10-30 09:36:41 +0200107 unsigned int ee;
108 enum gsi_intr_type intr;
109 uint32_t intvec;
110 uint64_t msi_addr;
111 unsigned int irq;
112 phys_addr_t phys_addr;
113 unsigned long size;
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -0400114 phys_addr_t emulator_intcntrlr_addr;
115 unsigned long emulator_intcntrlr_size;
116 irq_handler_t emulator_intcntrlr_client_isr;
Ghanim Fodic823bc62017-10-21 17:29:53 +0300117 bool mhi_er_id_limits_valid;
118 uint32_t mhi_er_id_limits[2];
Amir Levycdccd632016-10-30 09:36:41 +0200119 void (*notify_cb)(struct gsi_per_notify *notify);
120 void (*req_clk_cb)(void *user_data, bool *granted);
121 int (*rel_clk_cb)(void *user_data);
122 void *user_data;
123};
124
125enum gsi_evt_err {
126 GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0,
127 GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1,
128 GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2,
129 GSI_EVT_EVT_RING_EMPTY_ERR = 0x3,
130};
131
132/**
133 * gsi_evt_err_notify - event ring error callback info
134 *
135 * @user_data: cookie supplied in gsi_alloc_evt_ring
136 * @evt_id: type of error
137 * @err_desc: more info about the error
138 *
139 */
140struct gsi_evt_err_notify {
141 void *user_data;
142 enum gsi_evt_err evt_id;
143 uint16_t err_desc;
144};
145
146enum gsi_evt_chtype {
147 GSI_EVT_CHTYPE_MHI_EV = 0x0,
148 GSI_EVT_CHTYPE_XHCI_EV = 0x1,
149 GSI_EVT_CHTYPE_GPI_EV = 0x2,
150 GSI_EVT_CHTYPE_XDCI_EV = 0x3
151};
152
153enum gsi_evt_ring_elem_size {
154 GSI_EVT_RING_RE_SIZE_4B = 4,
155 GSI_EVT_RING_RE_SIZE_16B = 16,
156};
157
158/**
159 * gsi_evt_ring_props - Event ring related properties
160 *
161 * @intf: interface type (of the associated channel)
162 * @intr: interrupt type
163 * @re_size: size of event ring element
164 * @ring_len: length of ring in bytes (must be integral multiple of
165 * re_size)
166 * @ring_base_addr: physical base address of ring. Address must be aligned to
167 * ring_len rounded to power of two
168 * @ring_base_vaddr: virtual base address of ring (set to NULL when not
169 * applicable)
170 * @int_modt: cycles base interrupt moderation (32KHz clock)
171 * @int_modc: interrupt moderation packet counter
172 * @intvec: write data for MSI write
173 * @msi_addr: MSI address
174 * @rp_update_addr: physical address to which event read pointer should be
175 * written on every event generation. must be set to 0 when
176 * no update is desdired
177 * @exclusive: if true, only one GSI channel can be associated with this
178 * event ring. if false, the event ring can be shared among
179 * multiple GSI channels but in that case no polling
180 * (GSI_CHAN_MODE_POLL) is supported on any of those channels
181 * @err_cb: error notification callback
182 * @user_data: cookie used for error notifications
183 * @evchid_valid: is evchid valid?
184 * @evchid: the event ID that is being specifically requested (this is
185 * relevant for MHI where doorbell routing requires ERs to be
186 * physically contiguous)
187 */
188struct gsi_evt_ring_props {
189 enum gsi_evt_chtype intf;
190 enum gsi_intr_type intr;
191 enum gsi_evt_ring_elem_size re_size;
192 uint16_t ring_len;
193 uint64_t ring_base_addr;
194 void *ring_base_vaddr;
195 uint16_t int_modt;
196 uint8_t int_modc;
197 uint32_t intvec;
198 uint64_t msi_addr;
199 uint64_t rp_update_addr;
200 bool exclusive;
201 void (*err_cb)(struct gsi_evt_err_notify *notify);
202 void *user_data;
203 bool evchid_valid;
204 uint8_t evchid;
205};
206
207enum gsi_chan_mode {
208 GSI_CHAN_MODE_CALLBACK = 0x0,
209 GSI_CHAN_MODE_POLL = 0x1,
210};
211
212enum gsi_chan_prot {
213 GSI_CHAN_PROT_MHI = 0x0,
214 GSI_CHAN_PROT_XHCI = 0x1,
215 GSI_CHAN_PROT_GPI = 0x2,
216 GSI_CHAN_PROT_XDCI = 0x3
217};
218
219enum gsi_chan_dir {
220 GSI_CHAN_DIR_FROM_GSI = 0x0,
221 GSI_CHAN_DIR_TO_GSI = 0x1
222};
223
224enum gsi_max_prefetch {
225 GSI_ONE_PREFETCH_SEG = 0x0,
226 GSI_TWO_PREFETCH_SEG = 0x1
227};
228
Ghanim Fodi407046b2018-05-09 17:37:23 +0300229enum gsi_prefetch_mode {
230 GSI_USE_PREFETCH_BUFS = 0x0,
231 GSI_ESCAPE_BUF_ONLY = 0x1
232};
233
Amir Levycdccd632016-10-30 09:36:41 +0200234enum gsi_chan_evt {
235 GSI_CHAN_EVT_INVALID = 0x0,
236 GSI_CHAN_EVT_SUCCESS = 0x1,
237 GSI_CHAN_EVT_EOT = 0x2,
238 GSI_CHAN_EVT_OVERFLOW = 0x3,
239 GSI_CHAN_EVT_EOB = 0x4,
240 GSI_CHAN_EVT_OOB = 0x5,
241 GSI_CHAN_EVT_DB_MODE = 0x6,
242 GSI_CHAN_EVT_UNDEFINED = 0x10,
243 GSI_CHAN_EVT_RE_ERROR = 0x11,
244};
245
246/**
247 * gsi_chan_xfer_notify - Channel callback info
248 *
249 * @chan_user_data: cookie supplied in gsi_alloc_channel
250 * @xfer_user_data: cookie of the gsi_xfer_elem that caused the
251 * event to be generated
252 * @evt_id: type of event triggered by the associated TRE
253 * (corresponding to xfer_user_data)
254 * @bytes_xfered: number of bytes transferred by the associated TRE
255 * (corresponding to xfer_user_data)
256 *
257 */
258struct gsi_chan_xfer_notify {
259 void *chan_user_data;
260 void *xfer_user_data;
261 enum gsi_chan_evt evt_id;
262 uint16_t bytes_xfered;
263};
264
265enum gsi_chan_err {
266 GSI_CHAN_INVALID_TRE_ERR = 0x0,
267 GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1,
268 GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2,
269 GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3,
270 GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
271 GSI_CHAN_HWO_1_ERR = 0x5
272};
273
274/**
275 * gsi_chan_err_notify - Channel general callback info
276 *
277 * @chan_user_data: cookie supplied in gsi_alloc_channel
278 * @evt_id: type of error
279 * @err_desc: more info about the error
280 *
281 */
282struct gsi_chan_err_notify {
283 void *chan_user_data;
284 enum gsi_chan_err evt_id;
285 uint16_t err_desc;
286};
287
288enum gsi_chan_ring_elem_size {
289 GSI_CHAN_RE_SIZE_4B = 4,
290 GSI_CHAN_RE_SIZE_16B = 16,
291 GSI_CHAN_RE_SIZE_32B = 32,
292};
293
294enum gsi_chan_use_db_eng {
295 GSI_CHAN_DIRECT_MODE = 0x0,
296 GSI_CHAN_DB_MODE = 0x1,
297};
298
299/**
300 * gsi_chan_props - Channel related properties
301 *
302 * @prot: interface type
303 * @dir: channel direction
304 * @ch_id: virtual channel ID
305 * @evt_ring_hdl: handle of associated event ring. set to ~0 if no
306 * event ring associated
307 * @re_size: size of channel ring element
308 * @ring_len: length of ring in bytes (must be integral multiple of
309 * re_size)
310 * @max_re_expected: maximal number of ring elements expected to be queued.
311 * used for data path statistics gathering. if 0 provided
312 * ring_len / re_size will be used.
313 * @ring_base_addr: physical base address of ring. Address must be aligned to
314 * ring_len rounded to power of two
315 * @ring_base_vaddr: virtual base address of ring (set to NULL when not
316 * applicable)
317 * @use_db_eng: 0 => direct mode (doorbells are written directly to RE
318 * engine)
319 * 1 => DB mode (doorbells are written to DB engine)
320 * @max_prefetch: limit number of pre-fetch segments for channel
321 * @low_weight: low channel weight (priority of channel for RE engine
322 * round robin algorithm); must be >= 1
323 * @xfer_cb: transfer notification callback, this callback happens
324 * on event boundaries
325 *
326 * e.g. 1
327 *
328 * out TD with 3 REs
329 *
330 * RE1: EOT=0, EOB=0, CHAIN=1;
331 * RE2: EOT=0, EOB=0, CHAIN=1;
332 * RE3: EOT=1, EOB=0, CHAIN=0;
333 *
334 * the callback will be triggered for RE3 using the
335 * xfer_user_data of that RE
336 *
337 * e.g. 2
338 *
339 * in REs
340 *
341 * RE1: EOT=1, EOB=0, CHAIN=0;
342 * RE2: EOT=1, EOB=0, CHAIN=0;
343 * RE3: EOT=1, EOB=0, CHAIN=0;
344 *
345 * received packet consumes all of RE1, RE2 and part of RE3
346 * for EOT condition. there will be three callbacks in below
347 * order
348 *
349 * callback for RE1 using GSI_CHAN_EVT_OVERFLOW
350 * callback for RE2 using GSI_CHAN_EVT_OVERFLOW
351 * callback for RE3 using GSI_CHAN_EVT_EOT
352 *
353 * @err_cb: error notification callback
354 * @chan_user_data: cookie used for notifications
Chaitanya Pratapa02130722020-01-31 16:06:58 -0800355 * @common_evt_ring: Boolean indicating common event ring.
Amir Levycdccd632016-10-30 09:36:41 +0200356 * All the callbacks are in interrupt context
357 *
358 */
359struct gsi_chan_props {
360 enum gsi_chan_prot prot;
361 enum gsi_chan_dir dir;
362 uint8_t ch_id;
363 unsigned long evt_ring_hdl;
364 enum gsi_chan_ring_elem_size re_size;
365 uint16_t ring_len;
366 uint16_t max_re_expected;
367 uint64_t ring_base_addr;
368 void *ring_base_vaddr;
369 enum gsi_chan_use_db_eng use_db_eng;
370 enum gsi_max_prefetch max_prefetch;
371 uint8_t low_weight;
Ghanim Fodi407046b2018-05-09 17:37:23 +0300372 enum gsi_prefetch_mode prefetch_mode;
Amir Levycdccd632016-10-30 09:36:41 +0200373 void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
374 void (*err_cb)(struct gsi_chan_err_notify *notify);
375 void *chan_user_data;
Chaitanya Pratapa02130722020-01-31 16:06:58 -0800376 bool common_evt_ring;
Amir Levycdccd632016-10-30 09:36:41 +0200377};
378
379enum gsi_xfer_flag {
380 GSI_XFER_FLAG_CHAIN = 0x1,
381 GSI_XFER_FLAG_EOB = 0x100,
382 GSI_XFER_FLAG_EOT = 0x200,
383 GSI_XFER_FLAG_BEI = 0x400
384};
385
386enum gsi_xfer_elem_type {
387 GSI_XFER_ELEM_DATA,
388 GSI_XFER_ELEM_IMME_CMD,
Skylar Changa7975cf2017-03-21 17:20:20 -0700389 GSI_XFER_ELEM_NOP,
Amir Levycdccd632016-10-30 09:36:41 +0200390};
391
392/**
393 * gsi_xfer_elem - Metadata about a single transfer
394 *
395 * @addr: physical address of buffer
396 * @len: size of buffer for GSI_XFER_ELEM_DATA:
397 * for outbound transfers this is the number of bytes to
398 * transfer.
399 * for inbound transfers, this is the maximum number of
400 * bytes the host expects from device in this transfer
401 *
402 * immediate command opcode for GSI_XFER_ELEM_IMME_CMD
403 * @flags: transfer flags, OR of all the applicable flags
404 *
405 * GSI_XFER_FLAG_BEI: Block event interrupt
406 * 1: Event generated by this ring element must not assert
407 * an interrupt to the host
408 * 0: Event generated by this ring element must assert an
409 * interrupt to the host
410 *
411 * GSI_XFER_FLAG_EOT: Interrupt on end of transfer
412 * 1: If an EOT condition is encountered when processing
413 * this ring element, an event is generated by the device
414 * with its completion code set to EOT.
415 * 0: If an EOT condition is encountered for this ring
416 * element, a completion event is not be generated by the
417 * device, unless IEOB is 1
418 *
419 * GSI_XFER_FLAG_EOB: Interrupt on end of block
420 * 1: Device notifies host after processing this ring element
421 * by sending a completion event
422 * 0: Completion event is not required after processing this
423 * ring element
424 *
425 * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring
426 * elements in a TD
427 *
428 * @type: transfer type
429 *
430 * GSI_XFER_ELEM_DATA: for all data transfers
431 * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
Skylar Changa7975cf2017-03-21 17:20:20 -0700432 * GSI_XFER_ELEM_NOP: for event generation only
Amir Levycdccd632016-10-30 09:36:41 +0200433 *
434 * @xfer_user_data: cookie used in xfer_cb
435 *
436 */
437struct gsi_xfer_elem {
438 uint64_t addr;
439 uint16_t len;
440 uint16_t flags;
441 enum gsi_xfer_elem_type type;
442 void *xfer_user_data;
443};
444
445/**
446 * gsi_gpi_channel_scratch - GPI protocol SW config area of
447 * channel scratch
448 *
449 * @max_outstanding_tre: Used for the prefetch management sequence by the
450 * sequencer. Defines the maximum number of allowed
451 * outstanding TREs in IPA/GSI (in Bytes). RE engine
452 * prefetch will be limited by this configuration. It
453 * is suggested to configure this value to IPA_IF
454 * channel TLV queue size times element size. To disable
455 * the feature in doorbell mode (DB Mode=1). Maximum
456 * outstanding TREs should be set to 64KB
457 * (or any value larger or equal to ring length . RLEN)
458 * @outstanding_threshold: Used for the prefetch management sequence by the
459 * sequencer. Defines the threshold (in Bytes) as to when
460 * to update the channel doorbell. Should be smaller than
461 * Maximum outstanding TREs. value. It is suggested to
462 * configure this value to 2 * element size.
463 */
464struct __packed gsi_gpi_channel_scratch {
465 uint64_t resvd1;
466 uint32_t resvd2:16;
467 uint32_t max_outstanding_tre:16;
468 uint32_t resvd3:16;
469 uint32_t outstanding_threshold:16;
470};
471
472/**
473 * gsi_mhi_channel_scratch - MHI protocol SW config area of
474 * channel scratch
475 *
476 * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines
477 * address in host from which channel write pointer
478 * should be read in polling mode
479 * @assert_bit40: 1: bit #41 in address should be asserted upon
480 * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
481 * transfers)
482 * 0: bit #41 in address should be deasserted upon
483 * IPA_IF.ProcessDescriptor routine (for non-MHI over
484 * PCIe transfers)
485 * @polling_configuration: Uplink channels: Defines timer to poll on MHI
486 * context. Range: 1 to 31 milliseconds.
487 * Downlink channel: Defines transfer ring buffer
488 * availability threshold to poll on MHI context in
489 * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
490 * elements. E.g., value of 2 indicates 16 ring elements.
491 * Valid only when Burst Mode Enabled is set to 1
492 * @burst_mode_enabled: 0: Burst mode is disabled for this channel
493 * 1: Burst mode is enabled for this channel
494 * @polling_mode: 0: the channel is not in polling mode, meaning the
495 * host should ring DBs.
496 * 1: the channel is in polling mode, meaning the host
497 * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8
498 * ring elements.
499 * should not ring DBs until notified of DB mode/OOB mode
500 * @max_outstanding_tre: Used for the prefetch management sequence by the
501 * sequencer. Defines the maximum number of allowed
502 * outstanding TREs in IPA/GSI (in Bytes). RE engine
503 * prefetch will be limited by this configuration. It
504 * is suggested to configure this value to IPA_IF
505 * channel TLV queue size times element size.
506 * To disable the feature in doorbell mode (DB Mode=1).
507 * Maximum outstanding TREs should be set to 64KB
508 * (or any value larger or equal to ring length . RLEN)
509 * @outstanding_threshold: Used for the prefetch management sequence by the
510 * sequencer. Defines the threshold (in Bytes) as to when
511 * to update the channel doorbell. Should be smaller than
512 * Maximum outstanding TREs. value. It is suggested to
513 * configure this value to min(TLV_FIFO_SIZE/2,8) *
514 * element size.
515 */
516struct __packed gsi_mhi_channel_scratch {
517 uint64_t mhi_host_wp_addr;
518 uint32_t rsvd1:1;
519 uint32_t assert_bit40:1;
520 uint32_t polling_configuration:5;
521 uint32_t burst_mode_enabled:1;
522 uint32_t polling_mode:1;
523 uint32_t oob_mod_threshold:5;
524 uint32_t resvd2:2;
525 uint32_t max_outstanding_tre:16;
526 uint32_t resvd3:16;
527 uint32_t outstanding_threshold:16;
528};
529
530/**
531 * gsi_xdci_channel_scratch - xDCI protocol SW config area of
532 * channel scratch
533 *
534 * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi
535 * configuration). Must be aligned to Max USB Packet Size
536 * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned
537 * transfer resource index for the transfer, which was
538 * returned in response to the Start Transfer command.
539 * This field is used for "Update Transfer" command
540 * @last_trb_addr: Address (LSB - based on alignment restrictions) of
541 * last TRB in queue. Used to identify rollover case
542 * @depcmd_low_addr: Used to generate "Update Transfer" command
543 * @max_outstanding_tre: Used for the prefetch management sequence by the
544 * sequencer. Defines the maximum number of allowed
545 * outstanding TREs in IPA/GSI (in Bytes). RE engine
546 * prefetch will be limited by this configuration. It
547 * is suggested to configure this value to IPA_IF
548 * channel TLV queue size times element size.
549 * To disable the feature in doorbell mode (DB Mode=1)
550 * Maximum outstanding TREs should be set to 64KB
551 * (or any value larger or equal to ring length . RLEN)
552 * @depcmd_hi_addr: Used to generate "Update Transfer" command
553 * @outstanding_threshold: Used for the prefetch management sequence by the
554 * sequencer. Defines the threshold (in Bytes) as to when
555 * to update the channel doorbell. Should be smaller than
556 * Maximum outstanding TREs. value. It is suggested to
557 * configure this value to 2 * element size. for MBIM the
558 * suggested configuration is the element size.
559 */
560struct __packed gsi_xdci_channel_scratch {
561 uint32_t last_trb_addr:16;
562 uint32_t resvd1:4;
563 uint32_t xferrscidx:7;
564 uint32_t const_buffer_size:5;
565 uint32_t depcmd_low_addr;
566 uint32_t depcmd_hi_addr:8;
567 uint32_t resvd2:8;
568 uint32_t max_outstanding_tre:16;
569 uint32_t resvd3:16;
570 uint32_t outstanding_threshold:16;
571};
572
573/**
574 * gsi_channel_scratch - channel scratch SW config area
575 *
576 */
577union __packed gsi_channel_scratch {
578 struct __packed gsi_gpi_channel_scratch gpi;
579 struct __packed gsi_mhi_channel_scratch mhi;
580 struct __packed gsi_xdci_channel_scratch xdci;
581 struct __packed {
582 uint32_t word1;
583 uint32_t word2;
584 uint32_t word3;
585 uint32_t word4;
586 } data;
587};
588
589/**
590 * gsi_mhi_evt_scratch - MHI protocol SW config area of
591 * event scratch
592 */
593struct __packed gsi_mhi_evt_scratch {
594 uint32_t resvd1;
595 uint32_t resvd2;
596};
597
598/**
599 * gsi_xdci_evt_scratch - xDCI protocol SW config area of
600 * event scratch
601 *
602 */
603struct __packed gsi_xdci_evt_scratch {
604 uint32_t gevntcount_low_addr;
605 uint32_t gevntcount_hi_addr:8;
606 uint32_t resvd1:24;
607};
608
609/**
610 * gsi_evt_scratch - event scratch SW config area
611 *
612 */
613union __packed gsi_evt_scratch {
614 struct __packed gsi_mhi_evt_scratch mhi;
615 struct __packed gsi_xdci_evt_scratch xdci;
616 struct __packed {
617 uint32_t word1;
618 uint32_t word2;
619 } data;
620};
621
622/**
623 * gsi_device_scratch - EE scratch config parameters
624 *
625 * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid?
626 * @mhi_base_chan_idx: base index of IPA MHI channel indexes.
627 * IPA MHI channel index = GSI channel ID +
628 * MHI base channel index
629 * @max_usb_pkt_size_valid: is max_usb_pkt_size valid?
630 * @max_usb_pkt_size: max USB packet size in bytes (valid values are
631 * 512 and 1024)
632 */
633struct gsi_device_scratch {
634 bool mhi_base_chan_idx_valid;
635 uint8_t mhi_base_chan_idx;
636 bool max_usb_pkt_size_valid;
637 uint16_t max_usb_pkt_size;
638};
639
640/**
641 * gsi_chan_info - information about channel occupancy
642 *
643 * @wp: channel write pointer (physical address)
644 * @rp: channel read pointer (physical address)
645 * @evt_valid: is evt* info valid?
646 * @evt_wp: event ring write pointer (physical address)
647 * @evt_rp: event ring read pointer (physical address)
648 */
649struct gsi_chan_info {
650 uint64_t wp;
651 uint64_t rp;
652 bool evt_valid;
653 uint64_t evt_wp;
654 uint64_t evt_rp;
655};
656
657#ifdef CONFIG_GSI
658/**
659 * gsi_register_device - Peripheral should call this function to
660 * register itself with GSI before invoking any other APIs
661 *
662 * @props: Peripheral properties
663 * @dev_hdl: Handle populated by GSI, opaque to client
664 *
665 * @Return -GSI_STATUS_AGAIN if request should be re-tried later
666 * other error codes for failure
667 */
668int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl);
669
670/**
671 * gsi_complete_clk_grant - Peripheral should call this function to
672 * grant the clock resource requested by GSI previously that could not
673 * be granted synchronously. GSI will release the clock resource using
674 * the rel_clk_cb when appropriate
675 *
676 * @dev_hdl: Client handle previously obtained from
677 * gsi_register_device
678 *
679 * @Return gsi_status
680 */
681int gsi_complete_clk_grant(unsigned long dev_hdl);
682
683/**
684 * gsi_write_device_scratch - Peripheral should call this function to
685 * write to the EE scratch area
686 *
687 * @dev_hdl: Client handle previously obtained from
688 * gsi_register_device
689 * @val: Value to write
690 *
691 * @Return gsi_status
692 */
693int gsi_write_device_scratch(unsigned long dev_hdl,
694 struct gsi_device_scratch *val);
695
696/**
697 * gsi_deregister_device - Peripheral should call this function to
698 * de-register itself with GSI
699 *
700 * @dev_hdl: Client handle previously obtained from
701 * gsi_register_device
702 * @force: When set to true, cleanup is performed even if there
703 * are in use resources like channels, event rings, etc.
704 * this would be used after GSI reset to recover from some
705 * fatal error
706 * When set to false, there must not exist any allocated
707 * channels and event rings.
708 *
709 * @Return gsi_status
710 */
711int gsi_deregister_device(unsigned long dev_hdl, bool force);
712
713/**
714 * gsi_alloc_evt_ring - Peripheral should call this function to
715 * allocate an event ring
716 *
717 * @props: Event ring properties
718 * @dev_hdl: Client handle previously obtained from
719 * gsi_register_device
720 * @evt_ring_hdl: Handle populated by GSI, opaque to client
721 *
722 * This function can sleep
723 *
724 * @Return gsi_status
725 */
726int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
727 unsigned long *evt_ring_hdl);
728
729/**
730 * gsi_write_evt_ring_scratch - Peripheral should call this function to
731 * write to the scratch area of the event ring context
732 *
733 * @evt_ring_hdl: Client handle previously obtained from
734 * gsi_alloc_evt_ring
735 * @val: Value to write
736 *
737 * @Return gsi_status
738 */
739int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
740 union __packed gsi_evt_scratch val);
741
742/**
743 * gsi_dealloc_evt_ring - Peripheral should call this function to
744 * de-allocate an event ring. There should not exist any active
745 * channels using this event ring
746 *
747 * @evt_ring_hdl: Client handle previously obtained from
748 * gsi_alloc_evt_ring
749 *
750 * This function can sleep
751 *
752 * @Return gsi_status
753 */
754int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl);
755
756/**
757 * gsi_query_evt_ring_db_addr - Peripheral should call this function to
758 * query the physical addresses of the event ring doorbell registers
759 *
760 * @evt_ring_hdl: Client handle previously obtained from
761 * gsi_alloc_evt_ring
762 * @db_addr_wp_lsb: Physical address of doorbell register where the 32
763 * LSBs of the doorbell value should be written
764 * @db_addr_wp_msb: Physical address of doorbell register where the 32
765 * MSBs of the doorbell value should be written
766 *
767 * @Return gsi_status
768 */
769int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
770 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
771
772/**
Ghanim Fodia4fc49b2017-06-20 10:35:20 +0300773 * gsi_ring_evt_ring_db - Peripheral should call this function for
774 * ringing the event ring doorbell with given value
775 *
776 * @evt_ring_hdl: Client handle previously obtained from
777 * gsi_alloc_evt_ring
778 * @value: The value to be used for ringing the doorbell
779 *
780 * @Return gsi_status
781 */
782int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value);
783
784/**
Amir Levycdccd632016-10-30 09:36:41 +0200785 * gsi_reset_evt_ring - Peripheral should call this function to
786 * reset an event ring to recover from error state
787 *
788 * @evt_ring_hdl: Client handle previously obtained from
789 * gsi_alloc_evt_ring
790 *
791 * This function can sleep
792 *
793 * @Return gsi_status
794 */
795int gsi_reset_evt_ring(unsigned long evt_ring_hdl);
796
797/**
798 * gsi_get_evt_ring_cfg - This function returns the current config
799 * of the specified event ring
800 *
801 * @evt_ring_hdl: Client handle previously obtained from
802 * gsi_alloc_evt_ring
803 * @props: where to copy properties to
804 * @scr: where to copy scratch info to
805 *
806 * @Return gsi_status
807 */
808int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
809 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
810
811/**
812 * gsi_set_evt_ring_cfg - This function applies the supplied config
813 * to the specified event ring.
814 *
815 * exclusive property of the event ring cannot be changed after
816 * gsi_alloc_evt_ring
817 *
818 * @evt_ring_hdl: Client handle previously obtained from
819 * gsi_alloc_evt_ring
820 * @props: the properties to apply
821 * @scr: the scratch info to apply
822 *
823 * @Return gsi_status
824 */
825int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
826 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
827
828/**
829 * gsi_alloc_channel - Peripheral should call this function to
830 * allocate a channel
831 *
832 * @props: Channel properties
833 * @dev_hdl: Client handle previously obtained from
834 * gsi_register_device
835 * @chan_hdl: Handle populated by GSI, opaque to client
836 *
837 * This function can sleep
838 *
839 * @Return gsi_status
840 */
841int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
842 unsigned long *chan_hdl);
843
844/**
845 * gsi_write_channel_scratch - Peripheral should call this function to
846 * write to the scratch area of the channel context
847 *
848 * @chan_hdl: Client handle previously obtained from
849 * gsi_alloc_channel
850 * @val: Value to write
851 *
852 * @Return gsi_status
853 */
854int gsi_write_channel_scratch(unsigned long chan_hdl,
855 union __packed gsi_channel_scratch val);
856
857/**
Mohammed Javid7e4bcb42018-05-31 19:07:58 +0530858 * gsi_read_channel_scratch - Peripheral should call this function to
859 * read the scratch area of the channel context
860 *
861 * @chan_hdl: Client handle previously obtained from
862 * gsi_alloc_channel
863 *
864 * @Return gsi_status
865 */
866int gsi_read_channel_scratch(unsigned long chan_hdl,
867 union __packed gsi_channel_scratch *ch_scratch);
868
869/**
Amir Levycdccd632016-10-30 09:36:41 +0200870 * gsi_start_channel - Peripheral should call this function to
871 * start a channel i.e put into running state
872 *
873 * @chan_hdl: Client handle previously obtained from
874 * gsi_alloc_channel
875 *
876 * This function can sleep
877 *
878 * @Return gsi_status
879 */
880int gsi_start_channel(unsigned long chan_hdl);
881
882/**
883 * gsi_stop_channel - Peripheral should call this function to
884 * stop a channel. Stop will happen on a packet boundary
885 *
886 * @chan_hdl: Client handle previously obtained from
887 * gsi_alloc_channel
888 *
889 * This function can sleep
890 *
891 * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
892 * other error codes for failure
893 */
894int gsi_stop_channel(unsigned long chan_hdl);
895
896/**
897 * gsi_reset_channel - Peripheral should call this function to
898 * reset a channel to recover from error state
899 *
900 * @chan_hdl: Client handle previously obtained from
901 * gsi_alloc_channel
902 *
903 * This function can sleep
904 *
905 * @Return gsi_status
906 */
907int gsi_reset_channel(unsigned long chan_hdl);
908
909/**
910 * gsi_dealloc_channel - Peripheral should call this function to
911 * de-allocate a channel
912 *
913 * @chan_hdl: Client handle previously obtained from
914 * gsi_alloc_channel
915 *
916 * This function can sleep
917 *
918 * @Return gsi_status
919 */
920int gsi_dealloc_channel(unsigned long chan_hdl);
921
922/**
923 * gsi_stop_db_channel - Peripheral should call this function to
924 * stop a channel when all transfer elements till the doorbell
925 * have been processed
926 *
927 * @chan_hdl: Client handle previously obtained from
928 * gsi_alloc_channel
929 *
930 * This function can sleep
931 *
932 * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
933 * other error codes for failure
934 */
935int gsi_stop_db_channel(unsigned long chan_hdl);
936
937/**
938 * gsi_query_channel_db_addr - Peripheral should call this function to
939 * query the physical addresses of the channel doorbell registers
940 *
941 * @chan_hdl: Client handle previously obtained from
942 * gsi_alloc_channel
943 * @db_addr_wp_lsb: Physical address of doorbell register where the 32
944 * LSBs of the doorbell value should be written
945 * @db_addr_wp_msb: Physical address of doorbell register where the 32
946 * MSBs of the doorbell value should be written
947 *
948 * @Return gsi_status
949 */
950int gsi_query_channel_db_addr(unsigned long chan_hdl,
951 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
952
953/**
954 * gsi_query_channel_info - Peripheral can call this function to query the
955 * channel and associated event ring (if any) status.
956 *
957 * @chan_hdl: Client handle previously obtained from
958 * gsi_alloc_channel
959 * @info: Where to read the values into
960 *
961 * @Return gsi_status
962 */
963int gsi_query_channel_info(unsigned long chan_hdl,
964 struct gsi_chan_info *info);
965
966/**
967 * gsi_is_channel_empty - Peripheral can call this function to query if
968 * the channel is empty. This is only applicable to GPI. "Empty" means
969 * GSI has consumed all descriptors for a TO_GSI channel and SW has
970 * processed all completed descriptors for a FROM_GSI channel.
971 *
972 * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
973 * @is_empty: set by GSI based on channel emptiness
974 *
975 * @Return gsi_status
976 */
977int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
978
979/**
980 * gsi_get_channel_cfg - This function returns the current config
981 * of the specified channel
982 *
983 * @chan_hdl: Client handle previously obtained from
984 * gsi_alloc_channel
985 * @props: where to copy properties to
986 * @scr: where to copy scratch info to
987 *
988 * @Return gsi_status
989 */
990int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
991 union gsi_channel_scratch *scr);
992
993/**
994 * gsi_set_channel_cfg - This function applies the supplied config
995 * to the specified channel
996 *
997 * ch_id and evt_ring_hdl of the channel cannot be changed after
998 * gsi_alloc_channel
999 *
1000 * @chan_hdl: Client handle previously obtained from
1001 * gsi_alloc_channel
1002 * @props: the properties to apply
1003 * @scr: the scratch info to apply
1004 *
1005 * @Return gsi_status
1006 */
1007int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
1008 union gsi_channel_scratch *scr);
1009
1010/**
1011 * gsi_poll_channel - Peripheral should call this function to query for
1012 * completed transfer descriptors.
1013 *
1014 * @chan_hdl: Client handle previously obtained from
1015 * gsi_alloc_channel
1016 * @notify: Information about the completed transfer if any
1017 *
1018 * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
1019 * completed)
1020 */
1021int gsi_poll_channel(unsigned long chan_hdl,
1022 struct gsi_chan_xfer_notify *notify);
1023
1024/**
1025 * gsi_config_channel_mode - Peripheral should call this function
1026 * to configure the channel mode.
1027 *
1028 * @chan_hdl: Client handle previously obtained from
1029 * gsi_alloc_channel
1030 * @mode: Mode to move the channel into
1031 *
1032 * @Return gsi_status
1033 */
1034int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode);
1035
1036/**
1037 * gsi_queue_xfer - Peripheral should call this function
1038 * to queue transfers on the given channel
1039 *
1040 * @chan_hdl: Client handle previously obtained from
1041 * gsi_alloc_channel
1042 * @num_xfers: Number of transfer in the array @ xfer
1043 * @xfer: Array of num_xfers transfer descriptors
1044 * @ring_db: If true, tell HW about these queued xfers
1045 * If false, do not notify HW at this time
1046 *
1047 * @Return gsi_status
1048 */
1049int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
1050 struct gsi_xfer_elem *xfer, bool ring_db);
1051
1052/**
1053 * gsi_start_xfer - Peripheral should call this function to
1054 * inform HW about queued xfers
1055 *
1056 * @chan_hdl: Client handle previously obtained from
1057 * gsi_alloc_channel
1058 *
1059 * @Return gsi_status
1060 */
1061int gsi_start_xfer(unsigned long chan_hdl);
1062
1063/**
1064 * gsi_configure_regs - Peripheral should call this function
1065 * to configure the GSI registers before/after the FW is
1066 * loaded but before it is enabled.
1067 *
1068 * @gsi_base_addr: Base address of GSI register space
1069 * @gsi_size: Mapping size of the GSI register space
1070 * @per_base_addr: Base address of the peripheral using GSI
1071 *
1072 * @Return gsi_status
1073 */
1074int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
1075 phys_addr_t per_base_addr);
1076
1077/**
1078 * gsi_enable_fw - Peripheral should call this function
1079 * to enable the GSI FW after the FW has been loaded to the SRAM.
1080 *
1081 * @gsi_base_addr: Base address of GSI register space
1082 * @gsi_size: Mapping size of the GSI register space
Amir Levy85dcd172016-12-06 17:47:39 +02001083 * @ver: GSI core version
Amir Levycdccd632016-10-30 09:36:41 +02001084
1085 * @Return gsi_status
1086 */
Amir Levy85dcd172016-12-06 17:47:39 +02001087int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver);
Amir Levycdccd632016-10-30 09:36:41 +02001088
Ghanim Fodi37b64952017-01-24 15:42:30 +02001089/**
1090 * gsi_get_inst_ram_offset_and_size - Peripheral should call this function
1091 * to get instruction RAM base address offset and size. Peripheral typically
1092 * uses this info to load GSI FW into the IRAM.
1093 *
1094 * @base_offset:[OUT] - IRAM base offset address
1095 * @size: [OUT] - IRAM size
1096
1097 * @Return none
1098 */
1099void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
1100 unsigned long *size);
1101
Skylar Changc9939cf2017-02-21 09:46:46 -08001102/**
1103 * gsi_halt_channel_ee - Peripheral should call this function
1104 * to stop other EE's channel. This is usually used in SSR clean
1105 *
1106 * @chan_idx: Virtual channel index
1107 * @ee: EE
1108 * @code: [out] response code for operation
1109
1110 * @Return gsi_status
1111 */
1112int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
1113
Amir Levycdccd632016-10-30 09:36:41 +02001114/*
1115 * Here is a typical sequence of calls
1116 *
1117 * gsi_register_device
1118 *
1119 * gsi_write_device_scratch (if the protocol needs this)
1120 *
1121 * gsi_alloc_evt_ring (for as many event rings as needed)
1122 * gsi_write_evt_ring_scratch
1123 *
1124 * gsi_alloc_channel (for as many channels as needed; channels can have
1125 * no event ring, an exclusive event ring or a shared event ring)
1126 * gsi_write_channel_scratch
Mohammed Javid7e4bcb42018-05-31 19:07:58 +05301127 * gsi_read_channel_scratch
Amir Levycdccd632016-10-30 09:36:41 +02001128 * gsi_start_channel
1129 * gsi_queue_xfer/gsi_start_xfer
1130 * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
1131 * xfer completions)
1132 * gsi_stop_db_channel/gsi_stop_channel
1133 *
1134 * gsi_dealloc_channel
1135 *
1136 * gsi_dealloc_evt_ring
1137 *
1138 * gsi_deregister_device
1139 *
1140 */
1141#else
1142static inline int gsi_register_device(struct gsi_per_props *props,
1143 unsigned long *dev_hdl)
1144{
1145 return -GSI_STATUS_UNSUPPORTED_OP;
1146}
1147
1148static inline int gsi_complete_clk_grant(unsigned long dev_hdl)
1149{
1150 return -GSI_STATUS_UNSUPPORTED_OP;
1151}
1152
1153static inline int gsi_write_device_scratch(unsigned long dev_hdl,
1154 struct gsi_device_scratch *val)
1155{
1156 return -GSI_STATUS_UNSUPPORTED_OP;
1157}
1158
1159static inline int gsi_deregister_device(unsigned long dev_hdl, bool force)
1160{
1161 return -GSI_STATUS_UNSUPPORTED_OP;
1162}
1163
1164static inline int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props,
1165 unsigned long dev_hdl,
1166 unsigned long *evt_ring_hdl)
1167{
1168 return -GSI_STATUS_UNSUPPORTED_OP;
1169}
1170
1171static inline int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
1172 union __packed gsi_evt_scratch val)
1173{
1174 return -GSI_STATUS_UNSUPPORTED_OP;
1175}
1176
1177static inline int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
1178{
1179 return -GSI_STATUS_UNSUPPORTED_OP;
1180}
1181
1182static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
1183 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1184{
1185 return -GSI_STATUS_UNSUPPORTED_OP;
1186}
1187
Ghanim Fodia4fc49b2017-06-20 10:35:20 +03001188static inline int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl,
1189 uint64_t value)
1190{
1191 return -GSI_STATUS_UNSUPPORTED_OP;
1192}
1193
Amir Levycdccd632016-10-30 09:36:41 +02001194static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
1195{
1196 return -GSI_STATUS_UNSUPPORTED_OP;
1197}
1198
1199static inline int gsi_alloc_channel(struct gsi_chan_props *props,
1200 unsigned long dev_hdl,
1201 unsigned long *chan_hdl)
1202{
1203 return -GSI_STATUS_UNSUPPORTED_OP;
1204}
1205
1206static inline int gsi_write_channel_scratch(unsigned long chan_hdl,
1207 union __packed gsi_channel_scratch val)
1208{
1209 return -GSI_STATUS_UNSUPPORTED_OP;
1210}
1211
Mohammed Javid7e4bcb42018-05-31 19:07:58 +05301212static inline int gsi_read_channel_scratch(unsigned long chan_hdl,
1213 union __packed gsi_channel_scratch *ch_scratch)
1214{
1215 return -GSI_STATUS_UNSUPPORTED_OP;
1216}
1217
Amir Levycdccd632016-10-30 09:36:41 +02001218static inline int gsi_start_channel(unsigned long chan_hdl)
1219{
1220 return -GSI_STATUS_UNSUPPORTED_OP;
1221}
1222
1223static inline int gsi_stop_channel(unsigned long chan_hdl)
1224{
1225 return -GSI_STATUS_UNSUPPORTED_OP;
1226}
1227
1228static inline int gsi_reset_channel(unsigned long chan_hdl)
1229{
1230 return -GSI_STATUS_UNSUPPORTED_OP;
1231}
1232
1233static inline int gsi_dealloc_channel(unsigned long chan_hdl)
1234{
1235 return -GSI_STATUS_UNSUPPORTED_OP;
1236}
1237
1238static inline int gsi_stop_db_channel(unsigned long chan_hdl)
1239{
1240 return -GSI_STATUS_UNSUPPORTED_OP;
1241}
1242
1243static inline int gsi_query_channel_db_addr(unsigned long chan_hdl,
1244 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1245{
1246 return -GSI_STATUS_UNSUPPORTED_OP;
1247}
1248
1249static inline int gsi_query_channel_info(unsigned long chan_hdl,
1250 struct gsi_chan_info *info)
1251{
1252 return -GSI_STATUS_UNSUPPORTED_OP;
1253}
1254
1255static inline int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
1256{
1257 return -GSI_STATUS_UNSUPPORTED_OP;
1258}
1259
1260static inline int gsi_poll_channel(unsigned long chan_hdl,
1261 struct gsi_chan_xfer_notify *notify)
1262{
1263 return -GSI_STATUS_UNSUPPORTED_OP;
1264}
1265
1266static inline int gsi_config_channel_mode(unsigned long chan_hdl,
1267 enum gsi_chan_mode mode)
1268{
1269 return -GSI_STATUS_UNSUPPORTED_OP;
1270}
1271
1272static inline int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
1273 struct gsi_xfer_elem *xfer, bool ring_db)
1274{
1275 return -GSI_STATUS_UNSUPPORTED_OP;
1276}
1277
1278static inline int gsi_start_xfer(unsigned long chan_hdl)
1279{
1280 return -GSI_STATUS_UNSUPPORTED_OP;
1281}
1282
1283static inline int gsi_get_channel_cfg(unsigned long chan_hdl,
1284 struct gsi_chan_props *props,
1285 union gsi_channel_scratch *scr)
1286{
1287 return -GSI_STATUS_UNSUPPORTED_OP;
1288}
1289
1290static inline int gsi_set_channel_cfg(unsigned long chan_hdl,
1291 struct gsi_chan_props *props,
1292 union gsi_channel_scratch *scr)
1293{
1294 return -GSI_STATUS_UNSUPPORTED_OP;
1295}
1296
1297static inline int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
1298 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1299{
1300 return -GSI_STATUS_UNSUPPORTED_OP;
1301}
1302
1303static inline int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
1304 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1305{
1306 return -GSI_STATUS_UNSUPPORTED_OP;
1307}
1308
1309static inline int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
1310 phys_addr_t per_base_addr)
1311{
1312 return -GSI_STATUS_UNSUPPORTED_OP;
1313}
Ghanim Fodi37b64952017-01-24 15:42:30 +02001314
Jennifer L. Zennerdf159af2018-04-25 16:44:38 -04001315static inline int gsi_enable_fw(
1316 phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
Amir Levycdccd632016-10-30 09:36:41 +02001317{
1318 return -GSI_STATUS_UNSUPPORTED_OP;
1319}
Ghanim Fodi37b64952017-01-24 15:42:30 +02001320
1321static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
1322 unsigned long *size)
1323{
1324}
Skylar Changc9939cf2017-02-21 09:46:46 -08001325
1326static inline int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee,
1327 int *code)
1328{
1329 return -GSI_STATUS_UNSUPPORTED_OP;
1330}
Amir Levycdccd632016-10-30 09:36:41 +02001331#endif
1332#endif