blob: ebca4469489886fae9de368349962325f0f69312 [file] [log] [blame]
Ghanim Fodi37b64952017-01-24 15:42:30 +02001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Amir Levycdccd632016-10-30 09:36:41 +02002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#ifndef MSM_GSI_H
13#define MSM_GSI_H
14#include <linux/types.h>
15
Amir Levy41644242016-11-03 15:38:09 +020016enum gsi_ver {
17 GSI_VER_ERR = 0,
18 GSI_VER_1_0 = 1,
19 GSI_VER_1_2 = 2,
20 GSI_VER_1_3 = 3,
Michael Adisumarta8522e212017-05-15 11:59:42 -070021 GSI_VER_2_0 = 4,
Amir Levy41644242016-11-03 15:38:09 +020022 GSI_VER_MAX,
23};
24
Amir Levycdccd632016-10-30 09:36:41 +020025enum gsi_status {
26 GSI_STATUS_SUCCESS = 0,
27 GSI_STATUS_ERROR = 1,
28 GSI_STATUS_RING_INSUFFICIENT_SPACE = 2,
29 GSI_STATUS_RING_EMPTY = 3,
30 GSI_STATUS_RES_ALLOC_FAILURE = 4,
31 GSI_STATUS_BAD_STATE = 5,
32 GSI_STATUS_INVALID_PARAMS = 6,
33 GSI_STATUS_UNSUPPORTED_OP = 7,
34 GSI_STATUS_NODEV = 8,
35 GSI_STATUS_POLL_EMPTY = 9,
36 GSI_STATUS_EVT_RING_INCOMPATIBLE = 10,
37 GSI_STATUS_TIMED_OUT = 11,
38 GSI_STATUS_AGAIN = 12,
39};
40
41enum gsi_per_evt {
42 GSI_PER_EVT_GLOB_ERROR,
43 GSI_PER_EVT_GLOB_GP1,
44 GSI_PER_EVT_GLOB_GP2,
45 GSI_PER_EVT_GLOB_GP3,
46 GSI_PER_EVT_GENERAL_BREAK_POINT,
47 GSI_PER_EVT_GENERAL_BUS_ERROR,
48 GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW,
49 GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW,
50};
51
52/**
53 * gsi_per_notify - Peripheral callback info
54 *
55 * @user_data: cookie supplied in gsi_register_device
56 * @evt_id: type of notification
57 * @err_desc: error related information
58 *
59 */
60struct gsi_per_notify {
61 void *user_data;
62 enum gsi_per_evt evt_id;
63 union {
64 uint16_t err_desc;
65 } data;
66};
67
68enum gsi_intr_type {
69 GSI_INTR_MSI = 0x0,
70 GSI_INTR_IRQ = 0x1
71};
72
73
74/**
75 * gsi_per_props - Peripheral related properties
76 *
Amir Levy41644242016-11-03 15:38:09 +020077 * @gsi: GSI core version
Amir Levycdccd632016-10-30 09:36:41 +020078 * @ee: EE where this driver and peripheral driver runs
79 * @intr: control interrupt type
80 * @intvec: write data for MSI write
81 * @msi_addr: MSI address
82 * @irq: IRQ number
83 * @phys_addr: physical address of GSI block
84 * @size: register size of GSI block
85 * @notify_cb: general notification callback
86 * @req_clk_cb: callback to request peripheral clock
87 * granted should be set to true if request is completed
88 * synchronously, false otherwise (peripheral needs
89 * to call gsi_complete_clk_grant later when request is
90 * completed)
91 * if this callback is not provided, then GSI will assume
92 * peripheral is clocked at all times
93 * @rel_clk_cb: callback to release peripheral clock
94 * @user_data: cookie used for notifications
95 *
96 * All the callbacks are in interrupt context
97 *
98 */
99struct gsi_per_props {
Amir Levy41644242016-11-03 15:38:09 +0200100 enum gsi_ver ver;
Amir Levycdccd632016-10-30 09:36:41 +0200101 unsigned int ee;
102 enum gsi_intr_type intr;
103 uint32_t intvec;
104 uint64_t msi_addr;
105 unsigned int irq;
106 phys_addr_t phys_addr;
107 unsigned long size;
108 void (*notify_cb)(struct gsi_per_notify *notify);
109 void (*req_clk_cb)(void *user_data, bool *granted);
110 int (*rel_clk_cb)(void *user_data);
111 void *user_data;
112};
113
114enum gsi_evt_err {
115 GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0,
116 GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1,
117 GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2,
118 GSI_EVT_EVT_RING_EMPTY_ERR = 0x3,
119};
120
121/**
122 * gsi_evt_err_notify - event ring error callback info
123 *
124 * @user_data: cookie supplied in gsi_alloc_evt_ring
125 * @evt_id: type of error
126 * @err_desc: more info about the error
127 *
128 */
129struct gsi_evt_err_notify {
130 void *user_data;
131 enum gsi_evt_err evt_id;
132 uint16_t err_desc;
133};
134
135enum gsi_evt_chtype {
136 GSI_EVT_CHTYPE_MHI_EV = 0x0,
137 GSI_EVT_CHTYPE_XHCI_EV = 0x1,
138 GSI_EVT_CHTYPE_GPI_EV = 0x2,
139 GSI_EVT_CHTYPE_XDCI_EV = 0x3
140};
141
142enum gsi_evt_ring_elem_size {
143 GSI_EVT_RING_RE_SIZE_4B = 4,
144 GSI_EVT_RING_RE_SIZE_16B = 16,
145};
146
147/**
148 * gsi_evt_ring_props - Event ring related properties
149 *
150 * @intf: interface type (of the associated channel)
151 * @intr: interrupt type
152 * @re_size: size of event ring element
153 * @ring_len: length of ring in bytes (must be integral multiple of
154 * re_size)
155 * @ring_base_addr: physical base address of ring. Address must be aligned to
156 * ring_len rounded to power of two
157 * @ring_base_vaddr: virtual base address of ring (set to NULL when not
158 * applicable)
159 * @int_modt: cycles base interrupt moderation (32KHz clock)
160 * @int_modc: interrupt moderation packet counter
161 * @intvec: write data for MSI write
162 * @msi_addr: MSI address
163 * @rp_update_addr: physical address to which event read pointer should be
164 * written on every event generation. must be set to 0 when
165 * no update is desdired
166 * @exclusive: if true, only one GSI channel can be associated with this
167 * event ring. if false, the event ring can be shared among
168 * multiple GSI channels but in that case no polling
169 * (GSI_CHAN_MODE_POLL) is supported on any of those channels
170 * @err_cb: error notification callback
171 * @user_data: cookie used for error notifications
172 * @evchid_valid: is evchid valid?
173 * @evchid: the event ID that is being specifically requested (this is
174 * relevant for MHI where doorbell routing requires ERs to be
175 * physically contiguous)
176 */
177struct gsi_evt_ring_props {
178 enum gsi_evt_chtype intf;
179 enum gsi_intr_type intr;
180 enum gsi_evt_ring_elem_size re_size;
181 uint16_t ring_len;
182 uint64_t ring_base_addr;
183 void *ring_base_vaddr;
184 uint16_t int_modt;
185 uint8_t int_modc;
186 uint32_t intvec;
187 uint64_t msi_addr;
188 uint64_t rp_update_addr;
189 bool exclusive;
190 void (*err_cb)(struct gsi_evt_err_notify *notify);
191 void *user_data;
192 bool evchid_valid;
193 uint8_t evchid;
194};
195
196enum gsi_chan_mode {
197 GSI_CHAN_MODE_CALLBACK = 0x0,
198 GSI_CHAN_MODE_POLL = 0x1,
199};
200
201enum gsi_chan_prot {
202 GSI_CHAN_PROT_MHI = 0x0,
203 GSI_CHAN_PROT_XHCI = 0x1,
204 GSI_CHAN_PROT_GPI = 0x2,
205 GSI_CHAN_PROT_XDCI = 0x3
206};
207
208enum gsi_chan_dir {
209 GSI_CHAN_DIR_FROM_GSI = 0x0,
210 GSI_CHAN_DIR_TO_GSI = 0x1
211};
212
213enum gsi_max_prefetch {
214 GSI_ONE_PREFETCH_SEG = 0x0,
215 GSI_TWO_PREFETCH_SEG = 0x1
216};
217
218enum gsi_chan_evt {
219 GSI_CHAN_EVT_INVALID = 0x0,
220 GSI_CHAN_EVT_SUCCESS = 0x1,
221 GSI_CHAN_EVT_EOT = 0x2,
222 GSI_CHAN_EVT_OVERFLOW = 0x3,
223 GSI_CHAN_EVT_EOB = 0x4,
224 GSI_CHAN_EVT_OOB = 0x5,
225 GSI_CHAN_EVT_DB_MODE = 0x6,
226 GSI_CHAN_EVT_UNDEFINED = 0x10,
227 GSI_CHAN_EVT_RE_ERROR = 0x11,
228};
229
230/**
231 * gsi_chan_xfer_notify - Channel callback info
232 *
233 * @chan_user_data: cookie supplied in gsi_alloc_channel
234 * @xfer_user_data: cookie of the gsi_xfer_elem that caused the
235 * event to be generated
236 * @evt_id: type of event triggered by the associated TRE
237 * (corresponding to xfer_user_data)
238 * @bytes_xfered: number of bytes transferred by the associated TRE
239 * (corresponding to xfer_user_data)
240 *
241 */
242struct gsi_chan_xfer_notify {
243 void *chan_user_data;
244 void *xfer_user_data;
245 enum gsi_chan_evt evt_id;
246 uint16_t bytes_xfered;
247};
248
249enum gsi_chan_err {
250 GSI_CHAN_INVALID_TRE_ERR = 0x0,
251 GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1,
252 GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2,
253 GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3,
254 GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
255 GSI_CHAN_HWO_1_ERR = 0x5
256};
257
258/**
259 * gsi_chan_err_notify - Channel general callback info
260 *
261 * @chan_user_data: cookie supplied in gsi_alloc_channel
262 * @evt_id: type of error
263 * @err_desc: more info about the error
264 *
265 */
266struct gsi_chan_err_notify {
267 void *chan_user_data;
268 enum gsi_chan_err evt_id;
269 uint16_t err_desc;
270};
271
272enum gsi_chan_ring_elem_size {
273 GSI_CHAN_RE_SIZE_4B = 4,
274 GSI_CHAN_RE_SIZE_16B = 16,
275 GSI_CHAN_RE_SIZE_32B = 32,
276};
277
278enum gsi_chan_use_db_eng {
279 GSI_CHAN_DIRECT_MODE = 0x0,
280 GSI_CHAN_DB_MODE = 0x1,
281};
282
283/**
284 * gsi_chan_props - Channel related properties
285 *
286 * @prot: interface type
287 * @dir: channel direction
288 * @ch_id: virtual channel ID
289 * @evt_ring_hdl: handle of associated event ring. set to ~0 if no
290 * event ring associated
291 * @re_size: size of channel ring element
292 * @ring_len: length of ring in bytes (must be integral multiple of
293 * re_size)
294 * @max_re_expected: maximal number of ring elements expected to be queued.
295 * used for data path statistics gathering. if 0 provided
296 * ring_len / re_size will be used.
297 * @ring_base_addr: physical base address of ring. Address must be aligned to
298 * ring_len rounded to power of two
299 * @ring_base_vaddr: virtual base address of ring (set to NULL when not
300 * applicable)
301 * @use_db_eng: 0 => direct mode (doorbells are written directly to RE
302 * engine)
303 * 1 => DB mode (doorbells are written to DB engine)
304 * @max_prefetch: limit number of pre-fetch segments for channel
305 * @low_weight: low channel weight (priority of channel for RE engine
306 * round robin algorithm); must be >= 1
307 * @xfer_cb: transfer notification callback, this callback happens
308 * on event boundaries
309 *
310 * e.g. 1
311 *
312 * out TD with 3 REs
313 *
314 * RE1: EOT=0, EOB=0, CHAIN=1;
315 * RE2: EOT=0, EOB=0, CHAIN=1;
316 * RE3: EOT=1, EOB=0, CHAIN=0;
317 *
318 * the callback will be triggered for RE3 using the
319 * xfer_user_data of that RE
320 *
321 * e.g. 2
322 *
323 * in REs
324 *
325 * RE1: EOT=1, EOB=0, CHAIN=0;
326 * RE2: EOT=1, EOB=0, CHAIN=0;
327 * RE3: EOT=1, EOB=0, CHAIN=0;
328 *
329 * received packet consumes all of RE1, RE2 and part of RE3
330 * for EOT condition. there will be three callbacks in below
331 * order
332 *
333 * callback for RE1 using GSI_CHAN_EVT_OVERFLOW
334 * callback for RE2 using GSI_CHAN_EVT_OVERFLOW
335 * callback for RE3 using GSI_CHAN_EVT_EOT
336 *
337 * @err_cb: error notification callback
338 * @chan_user_data: cookie used for notifications
339 *
340 * All the callbacks are in interrupt context
341 *
342 */
343struct gsi_chan_props {
344 enum gsi_chan_prot prot;
345 enum gsi_chan_dir dir;
346 uint8_t ch_id;
347 unsigned long evt_ring_hdl;
348 enum gsi_chan_ring_elem_size re_size;
349 uint16_t ring_len;
350 uint16_t max_re_expected;
351 uint64_t ring_base_addr;
352 void *ring_base_vaddr;
353 enum gsi_chan_use_db_eng use_db_eng;
354 enum gsi_max_prefetch max_prefetch;
355 uint8_t low_weight;
356 void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
357 void (*err_cb)(struct gsi_chan_err_notify *notify);
358 void *chan_user_data;
359};
360
361enum gsi_xfer_flag {
362 GSI_XFER_FLAG_CHAIN = 0x1,
363 GSI_XFER_FLAG_EOB = 0x100,
364 GSI_XFER_FLAG_EOT = 0x200,
365 GSI_XFER_FLAG_BEI = 0x400
366};
367
368enum gsi_xfer_elem_type {
369 GSI_XFER_ELEM_DATA,
370 GSI_XFER_ELEM_IMME_CMD,
Skylar Changa7975cf2017-03-21 17:20:20 -0700371 GSI_XFER_ELEM_NOP,
Amir Levycdccd632016-10-30 09:36:41 +0200372};
373
374/**
375 * gsi_xfer_elem - Metadata about a single transfer
376 *
377 * @addr: physical address of buffer
378 * @len: size of buffer for GSI_XFER_ELEM_DATA:
379 * for outbound transfers this is the number of bytes to
380 * transfer.
381 * for inbound transfers, this is the maximum number of
382 * bytes the host expects from device in this transfer
383 *
384 * immediate command opcode for GSI_XFER_ELEM_IMME_CMD
385 * @flags: transfer flags, OR of all the applicable flags
386 *
387 * GSI_XFER_FLAG_BEI: Block event interrupt
388 * 1: Event generated by this ring element must not assert
389 * an interrupt to the host
390 * 0: Event generated by this ring element must assert an
391 * interrupt to the host
392 *
393 * GSI_XFER_FLAG_EOT: Interrupt on end of transfer
394 * 1: If an EOT condition is encountered when processing
395 * this ring element, an event is generated by the device
396 * with its completion code set to EOT.
397 * 0: If an EOT condition is encountered for this ring
398 * element, a completion event is not be generated by the
399 * device, unless IEOB is 1
400 *
401 * GSI_XFER_FLAG_EOB: Interrupt on end of block
402 * 1: Device notifies host after processing this ring element
403 * by sending a completion event
404 * 0: Completion event is not required after processing this
405 * ring element
406 *
407 * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring
408 * elements in a TD
409 *
410 * @type: transfer type
411 *
412 * GSI_XFER_ELEM_DATA: for all data transfers
413 * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
Skylar Changa7975cf2017-03-21 17:20:20 -0700414 * GSI_XFER_ELEM_NOP: for event generation only
Amir Levycdccd632016-10-30 09:36:41 +0200415 *
416 * @xfer_user_data: cookie used in xfer_cb
417 *
418 */
419struct gsi_xfer_elem {
420 uint64_t addr;
421 uint16_t len;
422 uint16_t flags;
423 enum gsi_xfer_elem_type type;
424 void *xfer_user_data;
425};
426
427/**
428 * gsi_gpi_channel_scratch - GPI protocol SW config area of
429 * channel scratch
430 *
431 * @max_outstanding_tre: Used for the prefetch management sequence by the
432 * sequencer. Defines the maximum number of allowed
433 * outstanding TREs in IPA/GSI (in Bytes). RE engine
434 * prefetch will be limited by this configuration. It
435 * is suggested to configure this value to IPA_IF
436 * channel TLV queue size times element size. To disable
437 * the feature in doorbell mode (DB Mode=1). Maximum
438 * outstanding TREs should be set to 64KB
439 * (or any value larger or equal to ring length . RLEN)
440 * @outstanding_threshold: Used for the prefetch management sequence by the
441 * sequencer. Defines the threshold (in Bytes) as to when
442 * to update the channel doorbell. Should be smaller than
443 * Maximum outstanding TREs. value. It is suggested to
444 * configure this value to 2 * element size.
445 */
446struct __packed gsi_gpi_channel_scratch {
447 uint64_t resvd1;
448 uint32_t resvd2:16;
449 uint32_t max_outstanding_tre:16;
450 uint32_t resvd3:16;
451 uint32_t outstanding_threshold:16;
452};
453
454/**
455 * gsi_mhi_channel_scratch - MHI protocol SW config area of
456 * channel scratch
457 *
458 * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines
459 * address in host from which channel write pointer
460 * should be read in polling mode
461 * @assert_bit40: 1: bit #41 in address should be asserted upon
462 * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
463 * transfers)
464 * 0: bit #41 in address should be deasserted upon
465 * IPA_IF.ProcessDescriptor routine (for non-MHI over
466 * PCIe transfers)
467 * @polling_configuration: Uplink channels: Defines timer to poll on MHI
468 * context. Range: 1 to 31 milliseconds.
469 * Downlink channel: Defines transfer ring buffer
470 * availability threshold to poll on MHI context in
471 * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
472 * elements. E.g., value of 2 indicates 16 ring elements.
473 * Valid only when Burst Mode Enabled is set to 1
474 * @burst_mode_enabled: 0: Burst mode is disabled for this channel
475 * 1: Burst mode is enabled for this channel
476 * @polling_mode: 0: the channel is not in polling mode, meaning the
477 * host should ring DBs.
478 * 1: the channel is in polling mode, meaning the host
479 * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8
480 * ring elements.
481 * should not ring DBs until notified of DB mode/OOB mode
482 * @max_outstanding_tre: Used for the prefetch management sequence by the
483 * sequencer. Defines the maximum number of allowed
484 * outstanding TREs in IPA/GSI (in Bytes). RE engine
485 * prefetch will be limited by this configuration. It
486 * is suggested to configure this value to IPA_IF
487 * channel TLV queue size times element size.
488 * To disable the feature in doorbell mode (DB Mode=1).
489 * Maximum outstanding TREs should be set to 64KB
490 * (or any value larger or equal to ring length . RLEN)
491 * @outstanding_threshold: Used for the prefetch management sequence by the
492 * sequencer. Defines the threshold (in Bytes) as to when
493 * to update the channel doorbell. Should be smaller than
494 * Maximum outstanding TREs. value. It is suggested to
495 * configure this value to min(TLV_FIFO_SIZE/2,8) *
496 * element size.
497 */
498struct __packed gsi_mhi_channel_scratch {
499 uint64_t mhi_host_wp_addr;
500 uint32_t rsvd1:1;
501 uint32_t assert_bit40:1;
502 uint32_t polling_configuration:5;
503 uint32_t burst_mode_enabled:1;
504 uint32_t polling_mode:1;
505 uint32_t oob_mod_threshold:5;
506 uint32_t resvd2:2;
507 uint32_t max_outstanding_tre:16;
508 uint32_t resvd3:16;
509 uint32_t outstanding_threshold:16;
510};
511
512/**
513 * gsi_xdci_channel_scratch - xDCI protocol SW config area of
514 * channel scratch
515 *
516 * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi
517 * configuration). Must be aligned to Max USB Packet Size
518 * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned
519 * transfer resource index for the transfer, which was
520 * returned in response to the Start Transfer command.
521 * This field is used for "Update Transfer" command
522 * @last_trb_addr: Address (LSB - based on alignment restrictions) of
523 * last TRB in queue. Used to identify rollover case
524 * @depcmd_low_addr: Used to generate "Update Transfer" command
525 * @max_outstanding_tre: Used for the prefetch management sequence by the
526 * sequencer. Defines the maximum number of allowed
527 * outstanding TREs in IPA/GSI (in Bytes). RE engine
528 * prefetch will be limited by this configuration. It
529 * is suggested to configure this value to IPA_IF
530 * channel TLV queue size times element size.
531 * To disable the feature in doorbell mode (DB Mode=1)
532 * Maximum outstanding TREs should be set to 64KB
533 * (or any value larger or equal to ring length . RLEN)
534 * @depcmd_hi_addr: Used to generate "Update Transfer" command
535 * @outstanding_threshold: Used for the prefetch management sequence by the
536 * sequencer. Defines the threshold (in Bytes) as to when
537 * to update the channel doorbell. Should be smaller than
538 * Maximum outstanding TREs. value. It is suggested to
539 * configure this value to 2 * element size. for MBIM the
540 * suggested configuration is the element size.
541 */
542struct __packed gsi_xdci_channel_scratch {
543 uint32_t last_trb_addr:16;
544 uint32_t resvd1:4;
545 uint32_t xferrscidx:7;
546 uint32_t const_buffer_size:5;
547 uint32_t depcmd_low_addr;
548 uint32_t depcmd_hi_addr:8;
549 uint32_t resvd2:8;
550 uint32_t max_outstanding_tre:16;
551 uint32_t resvd3:16;
552 uint32_t outstanding_threshold:16;
553};
554
555/**
556 * gsi_channel_scratch - channel scratch SW config area
557 *
558 */
559union __packed gsi_channel_scratch {
560 struct __packed gsi_gpi_channel_scratch gpi;
561 struct __packed gsi_mhi_channel_scratch mhi;
562 struct __packed gsi_xdci_channel_scratch xdci;
563 struct __packed {
564 uint32_t word1;
565 uint32_t word2;
566 uint32_t word3;
567 uint32_t word4;
568 } data;
569};
570
571/**
572 * gsi_mhi_evt_scratch - MHI protocol SW config area of
573 * event scratch
574 */
575struct __packed gsi_mhi_evt_scratch {
576 uint32_t resvd1;
577 uint32_t resvd2;
578};
579
580/**
581 * gsi_xdci_evt_scratch - xDCI protocol SW config area of
582 * event scratch
583 *
584 */
585struct __packed gsi_xdci_evt_scratch {
586 uint32_t gevntcount_low_addr;
587 uint32_t gevntcount_hi_addr:8;
588 uint32_t resvd1:24;
589};
590
591/**
592 * gsi_evt_scratch - event scratch SW config area
593 *
594 */
595union __packed gsi_evt_scratch {
596 struct __packed gsi_mhi_evt_scratch mhi;
597 struct __packed gsi_xdci_evt_scratch xdci;
598 struct __packed {
599 uint32_t word1;
600 uint32_t word2;
601 } data;
602};
603
604/**
605 * gsi_device_scratch - EE scratch config parameters
606 *
607 * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid?
608 * @mhi_base_chan_idx: base index of IPA MHI channel indexes.
609 * IPA MHI channel index = GSI channel ID +
610 * MHI base channel index
611 * @max_usb_pkt_size_valid: is max_usb_pkt_size valid?
612 * @max_usb_pkt_size: max USB packet size in bytes (valid values are
613 * 512 and 1024)
614 */
615struct gsi_device_scratch {
616 bool mhi_base_chan_idx_valid;
617 uint8_t mhi_base_chan_idx;
618 bool max_usb_pkt_size_valid;
619 uint16_t max_usb_pkt_size;
620};
621
622/**
623 * gsi_chan_info - information about channel occupancy
624 *
625 * @wp: channel write pointer (physical address)
626 * @rp: channel read pointer (physical address)
627 * @evt_valid: is evt* info valid?
628 * @evt_wp: event ring write pointer (physical address)
629 * @evt_rp: event ring read pointer (physical address)
630 */
631struct gsi_chan_info {
632 uint64_t wp;
633 uint64_t rp;
634 bool evt_valid;
635 uint64_t evt_wp;
636 uint64_t evt_rp;
637};
638
639#ifdef CONFIG_GSI
640/**
641 * gsi_register_device - Peripheral should call this function to
642 * register itself with GSI before invoking any other APIs
643 *
644 * @props: Peripheral properties
645 * @dev_hdl: Handle populated by GSI, opaque to client
646 *
647 * @Return -GSI_STATUS_AGAIN if request should be re-tried later
648 * other error codes for failure
649 */
650int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl);
651
652/**
653 * gsi_complete_clk_grant - Peripheral should call this function to
654 * grant the clock resource requested by GSI previously that could not
655 * be granted synchronously. GSI will release the clock resource using
656 * the rel_clk_cb when appropriate
657 *
658 * @dev_hdl: Client handle previously obtained from
659 * gsi_register_device
660 *
661 * @Return gsi_status
662 */
663int gsi_complete_clk_grant(unsigned long dev_hdl);
664
665/**
666 * gsi_write_device_scratch - Peripheral should call this function to
667 * write to the EE scratch area
668 *
669 * @dev_hdl: Client handle previously obtained from
670 * gsi_register_device
671 * @val: Value to write
672 *
673 * @Return gsi_status
674 */
675int gsi_write_device_scratch(unsigned long dev_hdl,
676 struct gsi_device_scratch *val);
677
678/**
679 * gsi_deregister_device - Peripheral should call this function to
680 * de-register itself with GSI
681 *
682 * @dev_hdl: Client handle previously obtained from
683 * gsi_register_device
684 * @force: When set to true, cleanup is performed even if there
685 * are in use resources like channels, event rings, etc.
686 * this would be used after GSI reset to recover from some
687 * fatal error
688 * When set to false, there must not exist any allocated
689 * channels and event rings.
690 *
691 * @Return gsi_status
692 */
693int gsi_deregister_device(unsigned long dev_hdl, bool force);
694
695/**
696 * gsi_alloc_evt_ring - Peripheral should call this function to
697 * allocate an event ring
698 *
699 * @props: Event ring properties
700 * @dev_hdl: Client handle previously obtained from
701 * gsi_register_device
702 * @evt_ring_hdl: Handle populated by GSI, opaque to client
703 *
704 * This function can sleep
705 *
706 * @Return gsi_status
707 */
708int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
709 unsigned long *evt_ring_hdl);
710
711/**
712 * gsi_write_evt_ring_scratch - Peripheral should call this function to
713 * write to the scratch area of the event ring context
714 *
715 * @evt_ring_hdl: Client handle previously obtained from
716 * gsi_alloc_evt_ring
717 * @val: Value to write
718 *
719 * @Return gsi_status
720 */
721int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
722 union __packed gsi_evt_scratch val);
723
724/**
725 * gsi_dealloc_evt_ring - Peripheral should call this function to
726 * de-allocate an event ring. There should not exist any active
727 * channels using this event ring
728 *
729 * @evt_ring_hdl: Client handle previously obtained from
730 * gsi_alloc_evt_ring
731 *
732 * This function can sleep
733 *
734 * @Return gsi_status
735 */
736int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl);
737
738/**
739 * gsi_query_evt_ring_db_addr - Peripheral should call this function to
740 * query the physical addresses of the event ring doorbell registers
741 *
742 * @evt_ring_hdl: Client handle previously obtained from
743 * gsi_alloc_evt_ring
744 * @db_addr_wp_lsb: Physical address of doorbell register where the 32
745 * LSBs of the doorbell value should be written
746 * @db_addr_wp_msb: Physical address of doorbell register where the 32
747 * MSBs of the doorbell value should be written
748 *
749 * @Return gsi_status
750 */
751int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
752 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
753
754/**
Ghanim Fodia4fc49b2017-06-20 10:35:20 +0300755 * gsi_ring_evt_ring_db - Peripheral should call this function for
756 * ringing the event ring doorbell with given value
757 *
758 * @evt_ring_hdl: Client handle previously obtained from
759 * gsi_alloc_evt_ring
760 * @value: The value to be used for ringing the doorbell
761 *
762 * @Return gsi_status
763 */
764int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value);
765
766/**
Amir Levycdccd632016-10-30 09:36:41 +0200767 * gsi_reset_evt_ring - Peripheral should call this function to
768 * reset an event ring to recover from error state
769 *
770 * @evt_ring_hdl: Client handle previously obtained from
771 * gsi_alloc_evt_ring
772 *
773 * This function can sleep
774 *
775 * @Return gsi_status
776 */
777int gsi_reset_evt_ring(unsigned long evt_ring_hdl);
778
779/**
780 * gsi_get_evt_ring_cfg - This function returns the current config
781 * of the specified event ring
782 *
783 * @evt_ring_hdl: Client handle previously obtained from
784 * gsi_alloc_evt_ring
785 * @props: where to copy properties to
786 * @scr: where to copy scratch info to
787 *
788 * @Return gsi_status
789 */
790int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
791 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
792
793/**
794 * gsi_set_evt_ring_cfg - This function applies the supplied config
795 * to the specified event ring.
796 *
797 * exclusive property of the event ring cannot be changed after
798 * gsi_alloc_evt_ring
799 *
800 * @evt_ring_hdl: Client handle previously obtained from
801 * gsi_alloc_evt_ring
802 * @props: the properties to apply
803 * @scr: the scratch info to apply
804 *
805 * @Return gsi_status
806 */
807int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
808 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
809
810/**
811 * gsi_alloc_channel - Peripheral should call this function to
812 * allocate a channel
813 *
814 * @props: Channel properties
815 * @dev_hdl: Client handle previously obtained from
816 * gsi_register_device
817 * @chan_hdl: Handle populated by GSI, opaque to client
818 *
819 * This function can sleep
820 *
821 * @Return gsi_status
822 */
823int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
824 unsigned long *chan_hdl);
825
826/**
827 * gsi_write_channel_scratch - Peripheral should call this function to
828 * write to the scratch area of the channel context
829 *
830 * @chan_hdl: Client handle previously obtained from
831 * gsi_alloc_channel
832 * @val: Value to write
833 *
834 * @Return gsi_status
835 */
836int gsi_write_channel_scratch(unsigned long chan_hdl,
837 union __packed gsi_channel_scratch val);
838
839/**
840 * gsi_start_channel - Peripheral should call this function to
841 * start a channel i.e put into running state
842 *
843 * @chan_hdl: Client handle previously obtained from
844 * gsi_alloc_channel
845 *
846 * This function can sleep
847 *
848 * @Return gsi_status
849 */
850int gsi_start_channel(unsigned long chan_hdl);
851
852/**
853 * gsi_stop_channel - Peripheral should call this function to
854 * stop a channel. Stop will happen on a packet boundary
855 *
856 * @chan_hdl: Client handle previously obtained from
857 * gsi_alloc_channel
858 *
859 * This function can sleep
860 *
861 * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
862 * other error codes for failure
863 */
864int gsi_stop_channel(unsigned long chan_hdl);
865
866/**
867 * gsi_reset_channel - Peripheral should call this function to
868 * reset a channel to recover from error state
869 *
870 * @chan_hdl: Client handle previously obtained from
871 * gsi_alloc_channel
872 *
873 * This function can sleep
874 *
875 * @Return gsi_status
876 */
877int gsi_reset_channel(unsigned long chan_hdl);
878
879/**
880 * gsi_dealloc_channel - Peripheral should call this function to
881 * de-allocate a channel
882 *
883 * @chan_hdl: Client handle previously obtained from
884 * gsi_alloc_channel
885 *
886 * This function can sleep
887 *
888 * @Return gsi_status
889 */
890int gsi_dealloc_channel(unsigned long chan_hdl);
891
892/**
893 * gsi_stop_db_channel - Peripheral should call this function to
894 * stop a channel when all transfer elements till the doorbell
895 * have been processed
896 *
897 * @chan_hdl: Client handle previously obtained from
898 * gsi_alloc_channel
899 *
900 * This function can sleep
901 *
902 * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
903 * other error codes for failure
904 */
905int gsi_stop_db_channel(unsigned long chan_hdl);
906
907/**
908 * gsi_query_channel_db_addr - Peripheral should call this function to
909 * query the physical addresses of the channel doorbell registers
910 *
911 * @chan_hdl: Client handle previously obtained from
912 * gsi_alloc_channel
913 * @db_addr_wp_lsb: Physical address of doorbell register where the 32
914 * LSBs of the doorbell value should be written
915 * @db_addr_wp_msb: Physical address of doorbell register where the 32
916 * MSBs of the doorbell value should be written
917 *
918 * @Return gsi_status
919 */
920int gsi_query_channel_db_addr(unsigned long chan_hdl,
921 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
922
923/**
924 * gsi_query_channel_info - Peripheral can call this function to query the
925 * channel and associated event ring (if any) status.
926 *
927 * @chan_hdl: Client handle previously obtained from
928 * gsi_alloc_channel
929 * @info: Where to read the values into
930 *
931 * @Return gsi_status
932 */
933int gsi_query_channel_info(unsigned long chan_hdl,
934 struct gsi_chan_info *info);
935
936/**
937 * gsi_is_channel_empty - Peripheral can call this function to query if
938 * the channel is empty. This is only applicable to GPI. "Empty" means
939 * GSI has consumed all descriptors for a TO_GSI channel and SW has
940 * processed all completed descriptors for a FROM_GSI channel.
941 *
942 * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
943 * @is_empty: set by GSI based on channel emptiness
944 *
945 * @Return gsi_status
946 */
947int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
948
949/**
950 * gsi_get_channel_cfg - This function returns the current config
951 * of the specified channel
952 *
953 * @chan_hdl: Client handle previously obtained from
954 * gsi_alloc_channel
955 * @props: where to copy properties to
956 * @scr: where to copy scratch info to
957 *
958 * @Return gsi_status
959 */
960int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
961 union gsi_channel_scratch *scr);
962
963/**
964 * gsi_set_channel_cfg - This function applies the supplied config
965 * to the specified channel
966 *
967 * ch_id and evt_ring_hdl of the channel cannot be changed after
968 * gsi_alloc_channel
969 *
970 * @chan_hdl: Client handle previously obtained from
971 * gsi_alloc_channel
972 * @props: the properties to apply
973 * @scr: the scratch info to apply
974 *
975 * @Return gsi_status
976 */
977int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
978 union gsi_channel_scratch *scr);
979
980/**
981 * gsi_poll_channel - Peripheral should call this function to query for
982 * completed transfer descriptors.
983 *
984 * @chan_hdl: Client handle previously obtained from
985 * gsi_alloc_channel
986 * @notify: Information about the completed transfer if any
987 *
988 * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
989 * completed)
990 */
991int gsi_poll_channel(unsigned long chan_hdl,
992 struct gsi_chan_xfer_notify *notify);
993
994/**
995 * gsi_config_channel_mode - Peripheral should call this function
996 * to configure the channel mode.
997 *
998 * @chan_hdl: Client handle previously obtained from
999 * gsi_alloc_channel
1000 * @mode: Mode to move the channel into
1001 *
1002 * @Return gsi_status
1003 */
1004int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode);
1005
1006/**
1007 * gsi_queue_xfer - Peripheral should call this function
1008 * to queue transfers on the given channel
1009 *
1010 * @chan_hdl: Client handle previously obtained from
1011 * gsi_alloc_channel
1012 * @num_xfers: Number of transfer in the array @ xfer
1013 * @xfer: Array of num_xfers transfer descriptors
1014 * @ring_db: If true, tell HW about these queued xfers
1015 * If false, do not notify HW at this time
1016 *
1017 * @Return gsi_status
1018 */
1019int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
1020 struct gsi_xfer_elem *xfer, bool ring_db);
1021
1022/**
1023 * gsi_start_xfer - Peripheral should call this function to
1024 * inform HW about queued xfers
1025 *
1026 * @chan_hdl: Client handle previously obtained from
1027 * gsi_alloc_channel
1028 *
1029 * @Return gsi_status
1030 */
1031int gsi_start_xfer(unsigned long chan_hdl);
1032
1033/**
1034 * gsi_configure_regs - Peripheral should call this function
1035 * to configure the GSI registers before/after the FW is
1036 * loaded but before it is enabled.
1037 *
1038 * @gsi_base_addr: Base address of GSI register space
1039 * @gsi_size: Mapping size of the GSI register space
1040 * @per_base_addr: Base address of the peripheral using GSI
1041 *
1042 * @Return gsi_status
1043 */
1044int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
1045 phys_addr_t per_base_addr);
1046
1047/**
1048 * gsi_enable_fw - Peripheral should call this function
1049 * to enable the GSI FW after the FW has been loaded to the SRAM.
1050 *
1051 * @gsi_base_addr: Base address of GSI register space
1052 * @gsi_size: Mapping size of the GSI register space
Amir Levy85dcd172016-12-06 17:47:39 +02001053 * @ver: GSI core version
Amir Levycdccd632016-10-30 09:36:41 +02001054
1055 * @Return gsi_status
1056 */
Amir Levy85dcd172016-12-06 17:47:39 +02001057int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver);
Amir Levycdccd632016-10-30 09:36:41 +02001058
Ghanim Fodi37b64952017-01-24 15:42:30 +02001059/**
1060 * gsi_get_inst_ram_offset_and_size - Peripheral should call this function
1061 * to get instruction RAM base address offset and size. Peripheral typically
1062 * uses this info to load GSI FW into the IRAM.
1063 *
1064 * @base_offset:[OUT] - IRAM base offset address
1065 * @size: [OUT] - IRAM size
1066
1067 * @Return none
1068 */
1069void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
1070 unsigned long *size);
1071
Skylar Changc9939cf2017-02-21 09:46:46 -08001072/**
1073 * gsi_halt_channel_ee - Peripheral should call this function
1074 * to stop other EE's channel. This is usually used in SSR clean
1075 *
1076 * @chan_idx: Virtual channel index
1077 * @ee: EE
1078 * @code: [out] response code for operation
1079
1080 * @Return gsi_status
1081 */
1082int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
1083
Amir Levycdccd632016-10-30 09:36:41 +02001084/*
1085 * Here is a typical sequence of calls
1086 *
1087 * gsi_register_device
1088 *
1089 * gsi_write_device_scratch (if the protocol needs this)
1090 *
1091 * gsi_alloc_evt_ring (for as many event rings as needed)
1092 * gsi_write_evt_ring_scratch
1093 *
1094 * gsi_alloc_channel (for as many channels as needed; channels can have
1095 * no event ring, an exclusive event ring or a shared event ring)
1096 * gsi_write_channel_scratch
1097 * gsi_start_channel
1098 * gsi_queue_xfer/gsi_start_xfer
1099 * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
1100 * xfer completions)
1101 * gsi_stop_db_channel/gsi_stop_channel
1102 *
1103 * gsi_dealloc_channel
1104 *
1105 * gsi_dealloc_evt_ring
1106 *
1107 * gsi_deregister_device
1108 *
1109 */
1110#else
1111static inline int gsi_register_device(struct gsi_per_props *props,
1112 unsigned long *dev_hdl)
1113{
1114 return -GSI_STATUS_UNSUPPORTED_OP;
1115}
1116
1117static inline int gsi_complete_clk_grant(unsigned long dev_hdl)
1118{
1119 return -GSI_STATUS_UNSUPPORTED_OP;
1120}
1121
1122static inline int gsi_write_device_scratch(unsigned long dev_hdl,
1123 struct gsi_device_scratch *val)
1124{
1125 return -GSI_STATUS_UNSUPPORTED_OP;
1126}
1127
1128static inline int gsi_deregister_device(unsigned long dev_hdl, bool force)
1129{
1130 return -GSI_STATUS_UNSUPPORTED_OP;
1131}
1132
1133static inline int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props,
1134 unsigned long dev_hdl,
1135 unsigned long *evt_ring_hdl)
1136{
1137 return -GSI_STATUS_UNSUPPORTED_OP;
1138}
1139
1140static inline int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
1141 union __packed gsi_evt_scratch val)
1142{
1143 return -GSI_STATUS_UNSUPPORTED_OP;
1144}
1145
1146static inline int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
1147{
1148 return -GSI_STATUS_UNSUPPORTED_OP;
1149}
1150
1151static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
1152 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1153{
1154 return -GSI_STATUS_UNSUPPORTED_OP;
1155}
1156
Ghanim Fodia4fc49b2017-06-20 10:35:20 +03001157static inline int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl,
1158 uint64_t value)
1159{
1160 return -GSI_STATUS_UNSUPPORTED_OP;
1161}
1162
Amir Levycdccd632016-10-30 09:36:41 +02001163static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
1164{
1165 return -GSI_STATUS_UNSUPPORTED_OP;
1166}
1167
1168static inline int gsi_alloc_channel(struct gsi_chan_props *props,
1169 unsigned long dev_hdl,
1170 unsigned long *chan_hdl)
1171{
1172 return -GSI_STATUS_UNSUPPORTED_OP;
1173}
1174
1175static inline int gsi_write_channel_scratch(unsigned long chan_hdl,
1176 union __packed gsi_channel_scratch val)
1177{
1178 return -GSI_STATUS_UNSUPPORTED_OP;
1179}
1180
1181static inline int gsi_start_channel(unsigned long chan_hdl)
1182{
1183 return -GSI_STATUS_UNSUPPORTED_OP;
1184}
1185
1186static inline int gsi_stop_channel(unsigned long chan_hdl)
1187{
1188 return -GSI_STATUS_UNSUPPORTED_OP;
1189}
1190
1191static inline int gsi_reset_channel(unsigned long chan_hdl)
1192{
1193 return -GSI_STATUS_UNSUPPORTED_OP;
1194}
1195
1196static inline int gsi_dealloc_channel(unsigned long chan_hdl)
1197{
1198 return -GSI_STATUS_UNSUPPORTED_OP;
1199}
1200
1201static inline int gsi_stop_db_channel(unsigned long chan_hdl)
1202{
1203 return -GSI_STATUS_UNSUPPORTED_OP;
1204}
1205
1206static inline int gsi_query_channel_db_addr(unsigned long chan_hdl,
1207 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1208{
1209 return -GSI_STATUS_UNSUPPORTED_OP;
1210}
1211
1212static inline int gsi_query_channel_info(unsigned long chan_hdl,
1213 struct gsi_chan_info *info)
1214{
1215 return -GSI_STATUS_UNSUPPORTED_OP;
1216}
1217
1218static inline int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
1219{
1220 return -GSI_STATUS_UNSUPPORTED_OP;
1221}
1222
1223static inline int gsi_poll_channel(unsigned long chan_hdl,
1224 struct gsi_chan_xfer_notify *notify)
1225{
1226 return -GSI_STATUS_UNSUPPORTED_OP;
1227}
1228
1229static inline int gsi_config_channel_mode(unsigned long chan_hdl,
1230 enum gsi_chan_mode mode)
1231{
1232 return -GSI_STATUS_UNSUPPORTED_OP;
1233}
1234
1235static inline int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
1236 struct gsi_xfer_elem *xfer, bool ring_db)
1237{
1238 return -GSI_STATUS_UNSUPPORTED_OP;
1239}
1240
1241static inline int gsi_start_xfer(unsigned long chan_hdl)
1242{
1243 return -GSI_STATUS_UNSUPPORTED_OP;
1244}
1245
1246static inline int gsi_get_channel_cfg(unsigned long chan_hdl,
1247 struct gsi_chan_props *props,
1248 union gsi_channel_scratch *scr)
1249{
1250 return -GSI_STATUS_UNSUPPORTED_OP;
1251}
1252
1253static inline int gsi_set_channel_cfg(unsigned long chan_hdl,
1254 struct gsi_chan_props *props,
1255 union gsi_channel_scratch *scr)
1256{
1257 return -GSI_STATUS_UNSUPPORTED_OP;
1258}
1259
1260static inline int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
1261 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1262{
1263 return -GSI_STATUS_UNSUPPORTED_OP;
1264}
1265
1266static inline int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
1267 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1268{
1269 return -GSI_STATUS_UNSUPPORTED_OP;
1270}
1271
1272static inline int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
1273 phys_addr_t per_base_addr)
1274{
1275 return -GSI_STATUS_UNSUPPORTED_OP;
1276}
Ghanim Fodi37b64952017-01-24 15:42:30 +02001277
Amir Levycdccd632016-10-30 09:36:41 +02001278static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
1279{
1280 return -GSI_STATUS_UNSUPPORTED_OP;
1281}
Ghanim Fodi37b64952017-01-24 15:42:30 +02001282
1283static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
1284 unsigned long *size)
1285{
1286}
Skylar Changc9939cf2017-02-21 09:46:46 -08001287
1288static inline int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee,
1289 int *code)
1290{
1291 return -GSI_STATUS_UNSUPPORTED_OP;
1292}
Amir Levycdccd632016-10-30 09:36:41 +02001293#endif
1294#endif