blob: 6e0b439fcdc8a3ebd8e1f9a019b960d3c3552798 [file] [log] [blame]
Ghanim Fodi37b64952017-01-24 15:42:30 +02001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Amir Levycdccd632016-10-30 09:36:41 +02002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#ifndef MSM_GSI_H
13#define MSM_GSI_H
14#include <linux/types.h>
15
Amir Levy41644242016-11-03 15:38:09 +020016enum gsi_ver {
17 GSI_VER_ERR = 0,
18 GSI_VER_1_0 = 1,
19 GSI_VER_1_2 = 2,
20 GSI_VER_1_3 = 3,
Michael Adisumarta8522e212017-05-15 11:59:42 -070021 GSI_VER_2_0 = 4,
Amir Levy41644242016-11-03 15:38:09 +020022 GSI_VER_MAX,
23};
24
Amir Levycdccd632016-10-30 09:36:41 +020025enum gsi_status {
26 GSI_STATUS_SUCCESS = 0,
27 GSI_STATUS_ERROR = 1,
28 GSI_STATUS_RING_INSUFFICIENT_SPACE = 2,
29 GSI_STATUS_RING_EMPTY = 3,
30 GSI_STATUS_RES_ALLOC_FAILURE = 4,
31 GSI_STATUS_BAD_STATE = 5,
32 GSI_STATUS_INVALID_PARAMS = 6,
33 GSI_STATUS_UNSUPPORTED_OP = 7,
34 GSI_STATUS_NODEV = 8,
35 GSI_STATUS_POLL_EMPTY = 9,
36 GSI_STATUS_EVT_RING_INCOMPATIBLE = 10,
37 GSI_STATUS_TIMED_OUT = 11,
38 GSI_STATUS_AGAIN = 12,
39};
40
41enum gsi_per_evt {
42 GSI_PER_EVT_GLOB_ERROR,
43 GSI_PER_EVT_GLOB_GP1,
44 GSI_PER_EVT_GLOB_GP2,
45 GSI_PER_EVT_GLOB_GP3,
46 GSI_PER_EVT_GENERAL_BREAK_POINT,
47 GSI_PER_EVT_GENERAL_BUS_ERROR,
48 GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW,
49 GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW,
50};
51
52/**
53 * gsi_per_notify - Peripheral callback info
54 *
55 * @user_data: cookie supplied in gsi_register_device
56 * @evt_id: type of notification
57 * @err_desc: error related information
58 *
59 */
60struct gsi_per_notify {
61 void *user_data;
62 enum gsi_per_evt evt_id;
63 union {
64 uint16_t err_desc;
65 } data;
66};
67
68enum gsi_intr_type {
69 GSI_INTR_MSI = 0x0,
70 GSI_INTR_IRQ = 0x1
71};
72
73
74/**
75 * gsi_per_props - Peripheral related properties
76 *
Amir Levy41644242016-11-03 15:38:09 +020077 * @gsi: GSI core version
Amir Levycdccd632016-10-30 09:36:41 +020078 * @ee: EE where this driver and peripheral driver runs
79 * @intr: control interrupt type
80 * @intvec: write data for MSI write
81 * @msi_addr: MSI address
82 * @irq: IRQ number
83 * @phys_addr: physical address of GSI block
84 * @size: register size of GSI block
Ghanim Fodic823bc62017-10-21 17:29:53 +030085 * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits
86 * @mhi_er_id_limits: MHI event ring start and end ids
Amir Levycdccd632016-10-30 09:36:41 +020087 * @notify_cb: general notification callback
88 * @req_clk_cb: callback to request peripheral clock
89 * granted should be set to true if request is completed
90 * synchronously, false otherwise (peripheral needs
91 * to call gsi_complete_clk_grant later when request is
92 * completed)
93 * if this callback is not provided, then GSI will assume
94 * peripheral is clocked at all times
95 * @rel_clk_cb: callback to release peripheral clock
96 * @user_data: cookie used for notifications
97 *
98 * All the callbacks are in interrupt context
99 *
100 */
101struct gsi_per_props {
Amir Levy41644242016-11-03 15:38:09 +0200102 enum gsi_ver ver;
Amir Levycdccd632016-10-30 09:36:41 +0200103 unsigned int ee;
104 enum gsi_intr_type intr;
105 uint32_t intvec;
106 uint64_t msi_addr;
107 unsigned int irq;
108 phys_addr_t phys_addr;
109 unsigned long size;
Ghanim Fodic823bc62017-10-21 17:29:53 +0300110 bool mhi_er_id_limits_valid;
111 uint32_t mhi_er_id_limits[2];
Amir Levycdccd632016-10-30 09:36:41 +0200112 void (*notify_cb)(struct gsi_per_notify *notify);
113 void (*req_clk_cb)(void *user_data, bool *granted);
114 int (*rel_clk_cb)(void *user_data);
115 void *user_data;
116};
117
118enum gsi_evt_err {
119 GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0,
120 GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1,
121 GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2,
122 GSI_EVT_EVT_RING_EMPTY_ERR = 0x3,
123};
124
125/**
126 * gsi_evt_err_notify - event ring error callback info
127 *
128 * @user_data: cookie supplied in gsi_alloc_evt_ring
129 * @evt_id: type of error
130 * @err_desc: more info about the error
131 *
132 */
133struct gsi_evt_err_notify {
134 void *user_data;
135 enum gsi_evt_err evt_id;
136 uint16_t err_desc;
137};
138
139enum gsi_evt_chtype {
140 GSI_EVT_CHTYPE_MHI_EV = 0x0,
141 GSI_EVT_CHTYPE_XHCI_EV = 0x1,
142 GSI_EVT_CHTYPE_GPI_EV = 0x2,
143 GSI_EVT_CHTYPE_XDCI_EV = 0x3
144};
145
146enum gsi_evt_ring_elem_size {
147 GSI_EVT_RING_RE_SIZE_4B = 4,
148 GSI_EVT_RING_RE_SIZE_16B = 16,
149};
150
151/**
152 * gsi_evt_ring_props - Event ring related properties
153 *
154 * @intf: interface type (of the associated channel)
155 * @intr: interrupt type
156 * @re_size: size of event ring element
157 * @ring_len: length of ring in bytes (must be integral multiple of
158 * re_size)
159 * @ring_base_addr: physical base address of ring. Address must be aligned to
160 * ring_len rounded to power of two
161 * @ring_base_vaddr: virtual base address of ring (set to NULL when not
162 * applicable)
163 * @int_modt: cycles base interrupt moderation (32KHz clock)
164 * @int_modc: interrupt moderation packet counter
165 * @intvec: write data for MSI write
166 * @msi_addr: MSI address
167 * @rp_update_addr: physical address to which event read pointer should be
168 * written on every event generation. must be set to 0 when
169 * no update is desdired
170 * @exclusive: if true, only one GSI channel can be associated with this
171 * event ring. if false, the event ring can be shared among
172 * multiple GSI channels but in that case no polling
173 * (GSI_CHAN_MODE_POLL) is supported on any of those channels
174 * @err_cb: error notification callback
175 * @user_data: cookie used for error notifications
176 * @evchid_valid: is evchid valid?
177 * @evchid: the event ID that is being specifically requested (this is
178 * relevant for MHI where doorbell routing requires ERs to be
179 * physically contiguous)
180 */
181struct gsi_evt_ring_props {
182 enum gsi_evt_chtype intf;
183 enum gsi_intr_type intr;
184 enum gsi_evt_ring_elem_size re_size;
185 uint16_t ring_len;
186 uint64_t ring_base_addr;
187 void *ring_base_vaddr;
188 uint16_t int_modt;
189 uint8_t int_modc;
190 uint32_t intvec;
191 uint64_t msi_addr;
192 uint64_t rp_update_addr;
193 bool exclusive;
194 void (*err_cb)(struct gsi_evt_err_notify *notify);
195 void *user_data;
196 bool evchid_valid;
197 uint8_t evchid;
198};
199
200enum gsi_chan_mode {
201 GSI_CHAN_MODE_CALLBACK = 0x0,
202 GSI_CHAN_MODE_POLL = 0x1,
203};
204
205enum gsi_chan_prot {
206 GSI_CHAN_PROT_MHI = 0x0,
207 GSI_CHAN_PROT_XHCI = 0x1,
208 GSI_CHAN_PROT_GPI = 0x2,
209 GSI_CHAN_PROT_XDCI = 0x3
210};
211
212enum gsi_chan_dir {
213 GSI_CHAN_DIR_FROM_GSI = 0x0,
214 GSI_CHAN_DIR_TO_GSI = 0x1
215};
216
217enum gsi_max_prefetch {
218 GSI_ONE_PREFETCH_SEG = 0x0,
219 GSI_TWO_PREFETCH_SEG = 0x1
220};
221
222enum gsi_chan_evt {
223 GSI_CHAN_EVT_INVALID = 0x0,
224 GSI_CHAN_EVT_SUCCESS = 0x1,
225 GSI_CHAN_EVT_EOT = 0x2,
226 GSI_CHAN_EVT_OVERFLOW = 0x3,
227 GSI_CHAN_EVT_EOB = 0x4,
228 GSI_CHAN_EVT_OOB = 0x5,
229 GSI_CHAN_EVT_DB_MODE = 0x6,
230 GSI_CHAN_EVT_UNDEFINED = 0x10,
231 GSI_CHAN_EVT_RE_ERROR = 0x11,
232};
233
234/**
235 * gsi_chan_xfer_notify - Channel callback info
236 *
237 * @chan_user_data: cookie supplied in gsi_alloc_channel
238 * @xfer_user_data: cookie of the gsi_xfer_elem that caused the
239 * event to be generated
240 * @evt_id: type of event triggered by the associated TRE
241 * (corresponding to xfer_user_data)
242 * @bytes_xfered: number of bytes transferred by the associated TRE
243 * (corresponding to xfer_user_data)
244 *
245 */
246struct gsi_chan_xfer_notify {
247 void *chan_user_data;
248 void *xfer_user_data;
249 enum gsi_chan_evt evt_id;
250 uint16_t bytes_xfered;
251};
252
253enum gsi_chan_err {
254 GSI_CHAN_INVALID_TRE_ERR = 0x0,
255 GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1,
256 GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2,
257 GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3,
258 GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
259 GSI_CHAN_HWO_1_ERR = 0x5
260};
261
262/**
263 * gsi_chan_err_notify - Channel general callback info
264 *
265 * @chan_user_data: cookie supplied in gsi_alloc_channel
266 * @evt_id: type of error
267 * @err_desc: more info about the error
268 *
269 */
270struct gsi_chan_err_notify {
271 void *chan_user_data;
272 enum gsi_chan_err evt_id;
273 uint16_t err_desc;
274};
275
276enum gsi_chan_ring_elem_size {
277 GSI_CHAN_RE_SIZE_4B = 4,
278 GSI_CHAN_RE_SIZE_16B = 16,
279 GSI_CHAN_RE_SIZE_32B = 32,
280};
281
282enum gsi_chan_use_db_eng {
283 GSI_CHAN_DIRECT_MODE = 0x0,
284 GSI_CHAN_DB_MODE = 0x1,
285};
286
287/**
288 * gsi_chan_props - Channel related properties
289 *
290 * @prot: interface type
291 * @dir: channel direction
292 * @ch_id: virtual channel ID
293 * @evt_ring_hdl: handle of associated event ring. set to ~0 if no
294 * event ring associated
295 * @re_size: size of channel ring element
296 * @ring_len: length of ring in bytes (must be integral multiple of
297 * re_size)
298 * @max_re_expected: maximal number of ring elements expected to be queued.
299 * used for data path statistics gathering. if 0 provided
300 * ring_len / re_size will be used.
301 * @ring_base_addr: physical base address of ring. Address must be aligned to
302 * ring_len rounded to power of two
303 * @ring_base_vaddr: virtual base address of ring (set to NULL when not
304 * applicable)
305 * @use_db_eng: 0 => direct mode (doorbells are written directly to RE
306 * engine)
307 * 1 => DB mode (doorbells are written to DB engine)
308 * @max_prefetch: limit number of pre-fetch segments for channel
309 * @low_weight: low channel weight (priority of channel for RE engine
310 * round robin algorithm); must be >= 1
311 * @xfer_cb: transfer notification callback, this callback happens
312 * on event boundaries
313 *
314 * e.g. 1
315 *
316 * out TD with 3 REs
317 *
318 * RE1: EOT=0, EOB=0, CHAIN=1;
319 * RE2: EOT=0, EOB=0, CHAIN=1;
320 * RE3: EOT=1, EOB=0, CHAIN=0;
321 *
322 * the callback will be triggered for RE3 using the
323 * xfer_user_data of that RE
324 *
325 * e.g. 2
326 *
327 * in REs
328 *
329 * RE1: EOT=1, EOB=0, CHAIN=0;
330 * RE2: EOT=1, EOB=0, CHAIN=0;
331 * RE3: EOT=1, EOB=0, CHAIN=0;
332 *
333 * received packet consumes all of RE1, RE2 and part of RE3
334 * for EOT condition. there will be three callbacks in below
335 * order
336 *
337 * callback for RE1 using GSI_CHAN_EVT_OVERFLOW
338 * callback for RE2 using GSI_CHAN_EVT_OVERFLOW
339 * callback for RE3 using GSI_CHAN_EVT_EOT
340 *
341 * @err_cb: error notification callback
342 * @chan_user_data: cookie used for notifications
343 *
344 * All the callbacks are in interrupt context
345 *
346 */
347struct gsi_chan_props {
348 enum gsi_chan_prot prot;
349 enum gsi_chan_dir dir;
350 uint8_t ch_id;
351 unsigned long evt_ring_hdl;
352 enum gsi_chan_ring_elem_size re_size;
353 uint16_t ring_len;
354 uint16_t max_re_expected;
355 uint64_t ring_base_addr;
356 void *ring_base_vaddr;
357 enum gsi_chan_use_db_eng use_db_eng;
358 enum gsi_max_prefetch max_prefetch;
359 uint8_t low_weight;
360 void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
361 void (*err_cb)(struct gsi_chan_err_notify *notify);
362 void *chan_user_data;
363};
364
365enum gsi_xfer_flag {
366 GSI_XFER_FLAG_CHAIN = 0x1,
367 GSI_XFER_FLAG_EOB = 0x100,
368 GSI_XFER_FLAG_EOT = 0x200,
369 GSI_XFER_FLAG_BEI = 0x400
370};
371
372enum gsi_xfer_elem_type {
373 GSI_XFER_ELEM_DATA,
374 GSI_XFER_ELEM_IMME_CMD,
Skylar Changa7975cf2017-03-21 17:20:20 -0700375 GSI_XFER_ELEM_NOP,
Amir Levycdccd632016-10-30 09:36:41 +0200376};
377
378/**
379 * gsi_xfer_elem - Metadata about a single transfer
380 *
381 * @addr: physical address of buffer
382 * @len: size of buffer for GSI_XFER_ELEM_DATA:
383 * for outbound transfers this is the number of bytes to
384 * transfer.
385 * for inbound transfers, this is the maximum number of
386 * bytes the host expects from device in this transfer
387 *
388 * immediate command opcode for GSI_XFER_ELEM_IMME_CMD
389 * @flags: transfer flags, OR of all the applicable flags
390 *
391 * GSI_XFER_FLAG_BEI: Block event interrupt
392 * 1: Event generated by this ring element must not assert
393 * an interrupt to the host
394 * 0: Event generated by this ring element must assert an
395 * interrupt to the host
396 *
397 * GSI_XFER_FLAG_EOT: Interrupt on end of transfer
398 * 1: If an EOT condition is encountered when processing
399 * this ring element, an event is generated by the device
400 * with its completion code set to EOT.
401 * 0: If an EOT condition is encountered for this ring
402 * element, a completion event is not be generated by the
403 * device, unless IEOB is 1
404 *
405 * GSI_XFER_FLAG_EOB: Interrupt on end of block
406 * 1: Device notifies host after processing this ring element
407 * by sending a completion event
408 * 0: Completion event is not required after processing this
409 * ring element
410 *
411 * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring
412 * elements in a TD
413 *
414 * @type: transfer type
415 *
416 * GSI_XFER_ELEM_DATA: for all data transfers
417 * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
Skylar Changa7975cf2017-03-21 17:20:20 -0700418 * GSI_XFER_ELEM_NOP: for event generation only
Amir Levycdccd632016-10-30 09:36:41 +0200419 *
420 * @xfer_user_data: cookie used in xfer_cb
421 *
422 */
423struct gsi_xfer_elem {
424 uint64_t addr;
425 uint16_t len;
426 uint16_t flags;
427 enum gsi_xfer_elem_type type;
428 void *xfer_user_data;
429};
430
431/**
432 * gsi_gpi_channel_scratch - GPI protocol SW config area of
433 * channel scratch
434 *
435 * @max_outstanding_tre: Used for the prefetch management sequence by the
436 * sequencer. Defines the maximum number of allowed
437 * outstanding TREs in IPA/GSI (in Bytes). RE engine
438 * prefetch will be limited by this configuration. It
439 * is suggested to configure this value to IPA_IF
440 * channel TLV queue size times element size. To disable
441 * the feature in doorbell mode (DB Mode=1). Maximum
442 * outstanding TREs should be set to 64KB
443 * (or any value larger or equal to ring length . RLEN)
444 * @outstanding_threshold: Used for the prefetch management sequence by the
445 * sequencer. Defines the threshold (in Bytes) as to when
446 * to update the channel doorbell. Should be smaller than
447 * Maximum outstanding TREs. value. It is suggested to
448 * configure this value to 2 * element size.
449 */
450struct __packed gsi_gpi_channel_scratch {
451 uint64_t resvd1;
452 uint32_t resvd2:16;
453 uint32_t max_outstanding_tre:16;
454 uint32_t resvd3:16;
455 uint32_t outstanding_threshold:16;
456};
457
458/**
459 * gsi_mhi_channel_scratch - MHI protocol SW config area of
460 * channel scratch
461 *
462 * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines
463 * address in host from which channel write pointer
464 * should be read in polling mode
465 * @assert_bit40: 1: bit #41 in address should be asserted upon
466 * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
467 * transfers)
468 * 0: bit #41 in address should be deasserted upon
469 * IPA_IF.ProcessDescriptor routine (for non-MHI over
470 * PCIe transfers)
471 * @polling_configuration: Uplink channels: Defines timer to poll on MHI
472 * context. Range: 1 to 31 milliseconds.
473 * Downlink channel: Defines transfer ring buffer
474 * availability threshold to poll on MHI context in
475 * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
476 * elements. E.g., value of 2 indicates 16 ring elements.
477 * Valid only when Burst Mode Enabled is set to 1
478 * @burst_mode_enabled: 0: Burst mode is disabled for this channel
479 * 1: Burst mode is enabled for this channel
480 * @polling_mode: 0: the channel is not in polling mode, meaning the
481 * host should ring DBs.
482 * 1: the channel is in polling mode, meaning the host
483 * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8
484 * ring elements.
485 * should not ring DBs until notified of DB mode/OOB mode
486 * @max_outstanding_tre: Used for the prefetch management sequence by the
487 * sequencer. Defines the maximum number of allowed
488 * outstanding TREs in IPA/GSI (in Bytes). RE engine
489 * prefetch will be limited by this configuration. It
490 * is suggested to configure this value to IPA_IF
491 * channel TLV queue size times element size.
492 * To disable the feature in doorbell mode (DB Mode=1).
493 * Maximum outstanding TREs should be set to 64KB
494 * (or any value larger or equal to ring length . RLEN)
495 * @outstanding_threshold: Used for the prefetch management sequence by the
496 * sequencer. Defines the threshold (in Bytes) as to when
497 * to update the channel doorbell. Should be smaller than
498 * Maximum outstanding TREs. value. It is suggested to
499 * configure this value to min(TLV_FIFO_SIZE/2,8) *
500 * element size.
501 */
502struct __packed gsi_mhi_channel_scratch {
503 uint64_t mhi_host_wp_addr;
504 uint32_t rsvd1:1;
505 uint32_t assert_bit40:1;
506 uint32_t polling_configuration:5;
507 uint32_t burst_mode_enabled:1;
508 uint32_t polling_mode:1;
509 uint32_t oob_mod_threshold:5;
510 uint32_t resvd2:2;
511 uint32_t max_outstanding_tre:16;
512 uint32_t resvd3:16;
513 uint32_t outstanding_threshold:16;
514};
515
516/**
517 * gsi_xdci_channel_scratch - xDCI protocol SW config area of
518 * channel scratch
519 *
520 * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi
521 * configuration). Must be aligned to Max USB Packet Size
522 * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned
523 * transfer resource index for the transfer, which was
524 * returned in response to the Start Transfer command.
525 * This field is used for "Update Transfer" command
526 * @last_trb_addr: Address (LSB - based on alignment restrictions) of
527 * last TRB in queue. Used to identify rollover case
528 * @depcmd_low_addr: Used to generate "Update Transfer" command
529 * @max_outstanding_tre: Used for the prefetch management sequence by the
530 * sequencer. Defines the maximum number of allowed
531 * outstanding TREs in IPA/GSI (in Bytes). RE engine
532 * prefetch will be limited by this configuration. It
533 * is suggested to configure this value to IPA_IF
534 * channel TLV queue size times element size.
535 * To disable the feature in doorbell mode (DB Mode=1)
536 * Maximum outstanding TREs should be set to 64KB
537 * (or any value larger or equal to ring length . RLEN)
538 * @depcmd_hi_addr: Used to generate "Update Transfer" command
539 * @outstanding_threshold: Used for the prefetch management sequence by the
540 * sequencer. Defines the threshold (in Bytes) as to when
541 * to update the channel doorbell. Should be smaller than
542 * Maximum outstanding TREs. value. It is suggested to
543 * configure this value to 2 * element size. for MBIM the
544 * suggested configuration is the element size.
545 */
546struct __packed gsi_xdci_channel_scratch {
547 uint32_t last_trb_addr:16;
548 uint32_t resvd1:4;
549 uint32_t xferrscidx:7;
550 uint32_t const_buffer_size:5;
551 uint32_t depcmd_low_addr;
552 uint32_t depcmd_hi_addr:8;
553 uint32_t resvd2:8;
554 uint32_t max_outstanding_tre:16;
555 uint32_t resvd3:16;
556 uint32_t outstanding_threshold:16;
557};
558
559/**
560 * gsi_channel_scratch - channel scratch SW config area
561 *
562 */
563union __packed gsi_channel_scratch {
564 struct __packed gsi_gpi_channel_scratch gpi;
565 struct __packed gsi_mhi_channel_scratch mhi;
566 struct __packed gsi_xdci_channel_scratch xdci;
567 struct __packed {
568 uint32_t word1;
569 uint32_t word2;
570 uint32_t word3;
571 uint32_t word4;
572 } data;
573};
574
575/**
576 * gsi_mhi_evt_scratch - MHI protocol SW config area of
577 * event scratch
578 */
579struct __packed gsi_mhi_evt_scratch {
580 uint32_t resvd1;
581 uint32_t resvd2;
582};
583
584/**
585 * gsi_xdci_evt_scratch - xDCI protocol SW config area of
586 * event scratch
587 *
588 */
589struct __packed gsi_xdci_evt_scratch {
590 uint32_t gevntcount_low_addr;
591 uint32_t gevntcount_hi_addr:8;
592 uint32_t resvd1:24;
593};
594
595/**
596 * gsi_evt_scratch - event scratch SW config area
597 *
598 */
599union __packed gsi_evt_scratch {
600 struct __packed gsi_mhi_evt_scratch mhi;
601 struct __packed gsi_xdci_evt_scratch xdci;
602 struct __packed {
603 uint32_t word1;
604 uint32_t word2;
605 } data;
606};
607
608/**
609 * gsi_device_scratch - EE scratch config parameters
610 *
611 * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid?
612 * @mhi_base_chan_idx: base index of IPA MHI channel indexes.
613 * IPA MHI channel index = GSI channel ID +
614 * MHI base channel index
615 * @max_usb_pkt_size_valid: is max_usb_pkt_size valid?
616 * @max_usb_pkt_size: max USB packet size in bytes (valid values are
617 * 512 and 1024)
618 */
619struct gsi_device_scratch {
620 bool mhi_base_chan_idx_valid;
621 uint8_t mhi_base_chan_idx;
622 bool max_usb_pkt_size_valid;
623 uint16_t max_usb_pkt_size;
624};
625
626/**
627 * gsi_chan_info - information about channel occupancy
628 *
629 * @wp: channel write pointer (physical address)
630 * @rp: channel read pointer (physical address)
631 * @evt_valid: is evt* info valid?
632 * @evt_wp: event ring write pointer (physical address)
633 * @evt_rp: event ring read pointer (physical address)
634 */
635struct gsi_chan_info {
636 uint64_t wp;
637 uint64_t rp;
638 bool evt_valid;
639 uint64_t evt_wp;
640 uint64_t evt_rp;
641};
642
643#ifdef CONFIG_GSI
644/**
645 * gsi_register_device - Peripheral should call this function to
646 * register itself with GSI before invoking any other APIs
647 *
648 * @props: Peripheral properties
649 * @dev_hdl: Handle populated by GSI, opaque to client
650 *
651 * @Return -GSI_STATUS_AGAIN if request should be re-tried later
652 * other error codes for failure
653 */
654int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl);
655
656/**
657 * gsi_complete_clk_grant - Peripheral should call this function to
658 * grant the clock resource requested by GSI previously that could not
659 * be granted synchronously. GSI will release the clock resource using
660 * the rel_clk_cb when appropriate
661 *
662 * @dev_hdl: Client handle previously obtained from
663 * gsi_register_device
664 *
665 * @Return gsi_status
666 */
667int gsi_complete_clk_grant(unsigned long dev_hdl);
668
669/**
670 * gsi_write_device_scratch - Peripheral should call this function to
671 * write to the EE scratch area
672 *
673 * @dev_hdl: Client handle previously obtained from
674 * gsi_register_device
675 * @val: Value to write
676 *
677 * @Return gsi_status
678 */
679int gsi_write_device_scratch(unsigned long dev_hdl,
680 struct gsi_device_scratch *val);
681
682/**
683 * gsi_deregister_device - Peripheral should call this function to
684 * de-register itself with GSI
685 *
686 * @dev_hdl: Client handle previously obtained from
687 * gsi_register_device
688 * @force: When set to true, cleanup is performed even if there
689 * are in use resources like channels, event rings, etc.
690 * this would be used after GSI reset to recover from some
691 * fatal error
692 * When set to false, there must not exist any allocated
693 * channels and event rings.
694 *
695 * @Return gsi_status
696 */
697int gsi_deregister_device(unsigned long dev_hdl, bool force);
698
699/**
700 * gsi_alloc_evt_ring - Peripheral should call this function to
701 * allocate an event ring
702 *
703 * @props: Event ring properties
704 * @dev_hdl: Client handle previously obtained from
705 * gsi_register_device
706 * @evt_ring_hdl: Handle populated by GSI, opaque to client
707 *
708 * This function can sleep
709 *
710 * @Return gsi_status
711 */
712int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
713 unsigned long *evt_ring_hdl);
714
715/**
716 * gsi_write_evt_ring_scratch - Peripheral should call this function to
717 * write to the scratch area of the event ring context
718 *
719 * @evt_ring_hdl: Client handle previously obtained from
720 * gsi_alloc_evt_ring
721 * @val: Value to write
722 *
723 * @Return gsi_status
724 */
725int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
726 union __packed gsi_evt_scratch val);
727
728/**
729 * gsi_dealloc_evt_ring - Peripheral should call this function to
730 * de-allocate an event ring. There should not exist any active
731 * channels using this event ring
732 *
733 * @evt_ring_hdl: Client handle previously obtained from
734 * gsi_alloc_evt_ring
735 *
736 * This function can sleep
737 *
738 * @Return gsi_status
739 */
740int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl);
741
742/**
743 * gsi_query_evt_ring_db_addr - Peripheral should call this function to
744 * query the physical addresses of the event ring doorbell registers
745 *
746 * @evt_ring_hdl: Client handle previously obtained from
747 * gsi_alloc_evt_ring
748 * @db_addr_wp_lsb: Physical address of doorbell register where the 32
749 * LSBs of the doorbell value should be written
750 * @db_addr_wp_msb: Physical address of doorbell register where the 32
751 * MSBs of the doorbell value should be written
752 *
753 * @Return gsi_status
754 */
755int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
756 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
757
758/**
Ghanim Fodia4fc49b2017-06-20 10:35:20 +0300759 * gsi_ring_evt_ring_db - Peripheral should call this function for
760 * ringing the event ring doorbell with given value
761 *
762 * @evt_ring_hdl: Client handle previously obtained from
763 * gsi_alloc_evt_ring
764 * @value: The value to be used for ringing the doorbell
765 *
766 * @Return gsi_status
767 */
768int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value);
769
770/**
Amir Levycdccd632016-10-30 09:36:41 +0200771 * gsi_reset_evt_ring - Peripheral should call this function to
772 * reset an event ring to recover from error state
773 *
774 * @evt_ring_hdl: Client handle previously obtained from
775 * gsi_alloc_evt_ring
776 *
777 * This function can sleep
778 *
779 * @Return gsi_status
780 */
781int gsi_reset_evt_ring(unsigned long evt_ring_hdl);
782
783/**
784 * gsi_get_evt_ring_cfg - This function returns the current config
785 * of the specified event ring
786 *
787 * @evt_ring_hdl: Client handle previously obtained from
788 * gsi_alloc_evt_ring
789 * @props: where to copy properties to
790 * @scr: where to copy scratch info to
791 *
792 * @Return gsi_status
793 */
794int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
795 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
796
797/**
798 * gsi_set_evt_ring_cfg - This function applies the supplied config
799 * to the specified event ring.
800 *
801 * exclusive property of the event ring cannot be changed after
802 * gsi_alloc_evt_ring
803 *
804 * @evt_ring_hdl: Client handle previously obtained from
805 * gsi_alloc_evt_ring
806 * @props: the properties to apply
807 * @scr: the scratch info to apply
808 *
809 * @Return gsi_status
810 */
811int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
812 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
813
814/**
815 * gsi_alloc_channel - Peripheral should call this function to
816 * allocate a channel
817 *
818 * @props: Channel properties
819 * @dev_hdl: Client handle previously obtained from
820 * gsi_register_device
821 * @chan_hdl: Handle populated by GSI, opaque to client
822 *
823 * This function can sleep
824 *
825 * @Return gsi_status
826 */
827int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
828 unsigned long *chan_hdl);
829
830/**
831 * gsi_write_channel_scratch - Peripheral should call this function to
832 * write to the scratch area of the channel context
833 *
834 * @chan_hdl: Client handle previously obtained from
835 * gsi_alloc_channel
836 * @val: Value to write
837 *
838 * @Return gsi_status
839 */
840int gsi_write_channel_scratch(unsigned long chan_hdl,
841 union __packed gsi_channel_scratch val);
842
843/**
844 * gsi_start_channel - Peripheral should call this function to
845 * start a channel i.e put into running state
846 *
847 * @chan_hdl: Client handle previously obtained from
848 * gsi_alloc_channel
849 *
850 * This function can sleep
851 *
852 * @Return gsi_status
853 */
854int gsi_start_channel(unsigned long chan_hdl);
855
856/**
857 * gsi_stop_channel - Peripheral should call this function to
858 * stop a channel. Stop will happen on a packet boundary
859 *
860 * @chan_hdl: Client handle previously obtained from
861 * gsi_alloc_channel
862 *
863 * This function can sleep
864 *
865 * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
866 * other error codes for failure
867 */
868int gsi_stop_channel(unsigned long chan_hdl);
869
870/**
871 * gsi_reset_channel - Peripheral should call this function to
872 * reset a channel to recover from error state
873 *
874 * @chan_hdl: Client handle previously obtained from
875 * gsi_alloc_channel
876 *
877 * This function can sleep
878 *
879 * @Return gsi_status
880 */
881int gsi_reset_channel(unsigned long chan_hdl);
882
883/**
884 * gsi_dealloc_channel - Peripheral should call this function to
885 * de-allocate a channel
886 *
887 * @chan_hdl: Client handle previously obtained from
888 * gsi_alloc_channel
889 *
890 * This function can sleep
891 *
892 * @Return gsi_status
893 */
894int gsi_dealloc_channel(unsigned long chan_hdl);
895
896/**
897 * gsi_stop_db_channel - Peripheral should call this function to
898 * stop a channel when all transfer elements till the doorbell
899 * have been processed
900 *
901 * @chan_hdl: Client handle previously obtained from
902 * gsi_alloc_channel
903 *
904 * This function can sleep
905 *
906 * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
907 * other error codes for failure
908 */
909int gsi_stop_db_channel(unsigned long chan_hdl);
910
911/**
912 * gsi_query_channel_db_addr - Peripheral should call this function to
913 * query the physical addresses of the channel doorbell registers
914 *
915 * @chan_hdl: Client handle previously obtained from
916 * gsi_alloc_channel
917 * @db_addr_wp_lsb: Physical address of doorbell register where the 32
918 * LSBs of the doorbell value should be written
919 * @db_addr_wp_msb: Physical address of doorbell register where the 32
920 * MSBs of the doorbell value should be written
921 *
922 * @Return gsi_status
923 */
924int gsi_query_channel_db_addr(unsigned long chan_hdl,
925 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
926
927/**
928 * gsi_query_channel_info - Peripheral can call this function to query the
929 * channel and associated event ring (if any) status.
930 *
931 * @chan_hdl: Client handle previously obtained from
932 * gsi_alloc_channel
933 * @info: Where to read the values into
934 *
935 * @Return gsi_status
936 */
937int gsi_query_channel_info(unsigned long chan_hdl,
938 struct gsi_chan_info *info);
939
940/**
941 * gsi_is_channel_empty - Peripheral can call this function to query if
942 * the channel is empty. This is only applicable to GPI. "Empty" means
943 * GSI has consumed all descriptors for a TO_GSI channel and SW has
944 * processed all completed descriptors for a FROM_GSI channel.
945 *
946 * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
947 * @is_empty: set by GSI based on channel emptiness
948 *
949 * @Return gsi_status
950 */
951int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
952
953/**
954 * gsi_get_channel_cfg - This function returns the current config
955 * of the specified channel
956 *
957 * @chan_hdl: Client handle previously obtained from
958 * gsi_alloc_channel
959 * @props: where to copy properties to
960 * @scr: where to copy scratch info to
961 *
962 * @Return gsi_status
963 */
964int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
965 union gsi_channel_scratch *scr);
966
967/**
968 * gsi_set_channel_cfg - This function applies the supplied config
969 * to the specified channel
970 *
971 * ch_id and evt_ring_hdl of the channel cannot be changed after
972 * gsi_alloc_channel
973 *
974 * @chan_hdl: Client handle previously obtained from
975 * gsi_alloc_channel
976 * @props: the properties to apply
977 * @scr: the scratch info to apply
978 *
979 * @Return gsi_status
980 */
981int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
982 union gsi_channel_scratch *scr);
983
984/**
985 * gsi_poll_channel - Peripheral should call this function to query for
986 * completed transfer descriptors.
987 *
988 * @chan_hdl: Client handle previously obtained from
989 * gsi_alloc_channel
990 * @notify: Information about the completed transfer if any
991 *
992 * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
993 * completed)
994 */
995int gsi_poll_channel(unsigned long chan_hdl,
996 struct gsi_chan_xfer_notify *notify);
997
998/**
999 * gsi_config_channel_mode - Peripheral should call this function
1000 * to configure the channel mode.
1001 *
1002 * @chan_hdl: Client handle previously obtained from
1003 * gsi_alloc_channel
1004 * @mode: Mode to move the channel into
1005 *
1006 * @Return gsi_status
1007 */
1008int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode);
1009
1010/**
1011 * gsi_queue_xfer - Peripheral should call this function
1012 * to queue transfers on the given channel
1013 *
1014 * @chan_hdl: Client handle previously obtained from
1015 * gsi_alloc_channel
1016 * @num_xfers: Number of transfer in the array @ xfer
1017 * @xfer: Array of num_xfers transfer descriptors
1018 * @ring_db: If true, tell HW about these queued xfers
1019 * If false, do not notify HW at this time
1020 *
1021 * @Return gsi_status
1022 */
1023int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
1024 struct gsi_xfer_elem *xfer, bool ring_db);
1025
1026/**
1027 * gsi_start_xfer - Peripheral should call this function to
1028 * inform HW about queued xfers
1029 *
1030 * @chan_hdl: Client handle previously obtained from
1031 * gsi_alloc_channel
1032 *
1033 * @Return gsi_status
1034 */
1035int gsi_start_xfer(unsigned long chan_hdl);
1036
1037/**
1038 * gsi_configure_regs - Peripheral should call this function
1039 * to configure the GSI registers before/after the FW is
1040 * loaded but before it is enabled.
1041 *
1042 * @gsi_base_addr: Base address of GSI register space
1043 * @gsi_size: Mapping size of the GSI register space
1044 * @per_base_addr: Base address of the peripheral using GSI
1045 *
1046 * @Return gsi_status
1047 */
1048int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
1049 phys_addr_t per_base_addr);
1050
1051/**
1052 * gsi_enable_fw - Peripheral should call this function
1053 * to enable the GSI FW after the FW has been loaded to the SRAM.
1054 *
1055 * @gsi_base_addr: Base address of GSI register space
1056 * @gsi_size: Mapping size of the GSI register space
Amir Levy85dcd172016-12-06 17:47:39 +02001057 * @ver: GSI core version
Amir Levycdccd632016-10-30 09:36:41 +02001058
1059 * @Return gsi_status
1060 */
Amir Levy85dcd172016-12-06 17:47:39 +02001061int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver);
Amir Levycdccd632016-10-30 09:36:41 +02001062
Ghanim Fodi37b64952017-01-24 15:42:30 +02001063/**
1064 * gsi_get_inst_ram_offset_and_size - Peripheral should call this function
1065 * to get instruction RAM base address offset and size. Peripheral typically
1066 * uses this info to load GSI FW into the IRAM.
1067 *
1068 * @base_offset:[OUT] - IRAM base offset address
1069 * @size: [OUT] - IRAM size
1070
1071 * @Return none
1072 */
1073void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
1074 unsigned long *size);
1075
Skylar Changc9939cf2017-02-21 09:46:46 -08001076/**
1077 * gsi_halt_channel_ee - Peripheral should call this function
1078 * to stop other EE's channel. This is usually used in SSR clean
1079 *
1080 * @chan_idx: Virtual channel index
1081 * @ee: EE
1082 * @code: [out] response code for operation
1083
1084 * @Return gsi_status
1085 */
1086int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
1087
Amir Levycdccd632016-10-30 09:36:41 +02001088/*
1089 * Here is a typical sequence of calls
1090 *
1091 * gsi_register_device
1092 *
1093 * gsi_write_device_scratch (if the protocol needs this)
1094 *
1095 * gsi_alloc_evt_ring (for as many event rings as needed)
1096 * gsi_write_evt_ring_scratch
1097 *
1098 * gsi_alloc_channel (for as many channels as needed; channels can have
1099 * no event ring, an exclusive event ring or a shared event ring)
1100 * gsi_write_channel_scratch
1101 * gsi_start_channel
1102 * gsi_queue_xfer/gsi_start_xfer
1103 * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
1104 * xfer completions)
1105 * gsi_stop_db_channel/gsi_stop_channel
1106 *
1107 * gsi_dealloc_channel
1108 *
1109 * gsi_dealloc_evt_ring
1110 *
1111 * gsi_deregister_device
1112 *
1113 */
1114#else
1115static inline int gsi_register_device(struct gsi_per_props *props,
1116 unsigned long *dev_hdl)
1117{
1118 return -GSI_STATUS_UNSUPPORTED_OP;
1119}
1120
1121static inline int gsi_complete_clk_grant(unsigned long dev_hdl)
1122{
1123 return -GSI_STATUS_UNSUPPORTED_OP;
1124}
1125
1126static inline int gsi_write_device_scratch(unsigned long dev_hdl,
1127 struct gsi_device_scratch *val)
1128{
1129 return -GSI_STATUS_UNSUPPORTED_OP;
1130}
1131
1132static inline int gsi_deregister_device(unsigned long dev_hdl, bool force)
1133{
1134 return -GSI_STATUS_UNSUPPORTED_OP;
1135}
1136
1137static inline int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props,
1138 unsigned long dev_hdl,
1139 unsigned long *evt_ring_hdl)
1140{
1141 return -GSI_STATUS_UNSUPPORTED_OP;
1142}
1143
1144static inline int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
1145 union __packed gsi_evt_scratch val)
1146{
1147 return -GSI_STATUS_UNSUPPORTED_OP;
1148}
1149
1150static inline int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
1151{
1152 return -GSI_STATUS_UNSUPPORTED_OP;
1153}
1154
1155static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
1156 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1157{
1158 return -GSI_STATUS_UNSUPPORTED_OP;
1159}
1160
Ghanim Fodia4fc49b2017-06-20 10:35:20 +03001161static inline int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl,
1162 uint64_t value)
1163{
1164 return -GSI_STATUS_UNSUPPORTED_OP;
1165}
1166
Amir Levycdccd632016-10-30 09:36:41 +02001167static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
1168{
1169 return -GSI_STATUS_UNSUPPORTED_OP;
1170}
1171
1172static inline int gsi_alloc_channel(struct gsi_chan_props *props,
1173 unsigned long dev_hdl,
1174 unsigned long *chan_hdl)
1175{
1176 return -GSI_STATUS_UNSUPPORTED_OP;
1177}
1178
1179static inline int gsi_write_channel_scratch(unsigned long chan_hdl,
1180 union __packed gsi_channel_scratch val)
1181{
1182 return -GSI_STATUS_UNSUPPORTED_OP;
1183}
1184
1185static inline int gsi_start_channel(unsigned long chan_hdl)
1186{
1187 return -GSI_STATUS_UNSUPPORTED_OP;
1188}
1189
1190static inline int gsi_stop_channel(unsigned long chan_hdl)
1191{
1192 return -GSI_STATUS_UNSUPPORTED_OP;
1193}
1194
1195static inline int gsi_reset_channel(unsigned long chan_hdl)
1196{
1197 return -GSI_STATUS_UNSUPPORTED_OP;
1198}
1199
1200static inline int gsi_dealloc_channel(unsigned long chan_hdl)
1201{
1202 return -GSI_STATUS_UNSUPPORTED_OP;
1203}
1204
1205static inline int gsi_stop_db_channel(unsigned long chan_hdl)
1206{
1207 return -GSI_STATUS_UNSUPPORTED_OP;
1208}
1209
1210static inline int gsi_query_channel_db_addr(unsigned long chan_hdl,
1211 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1212{
1213 return -GSI_STATUS_UNSUPPORTED_OP;
1214}
1215
1216static inline int gsi_query_channel_info(unsigned long chan_hdl,
1217 struct gsi_chan_info *info)
1218{
1219 return -GSI_STATUS_UNSUPPORTED_OP;
1220}
1221
1222static inline int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
1223{
1224 return -GSI_STATUS_UNSUPPORTED_OP;
1225}
1226
1227static inline int gsi_poll_channel(unsigned long chan_hdl,
1228 struct gsi_chan_xfer_notify *notify)
1229{
1230 return -GSI_STATUS_UNSUPPORTED_OP;
1231}
1232
1233static inline int gsi_config_channel_mode(unsigned long chan_hdl,
1234 enum gsi_chan_mode mode)
1235{
1236 return -GSI_STATUS_UNSUPPORTED_OP;
1237}
1238
1239static inline int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
1240 struct gsi_xfer_elem *xfer, bool ring_db)
1241{
1242 return -GSI_STATUS_UNSUPPORTED_OP;
1243}
1244
1245static inline int gsi_start_xfer(unsigned long chan_hdl)
1246{
1247 return -GSI_STATUS_UNSUPPORTED_OP;
1248}
1249
1250static inline int gsi_get_channel_cfg(unsigned long chan_hdl,
1251 struct gsi_chan_props *props,
1252 union gsi_channel_scratch *scr)
1253{
1254 return -GSI_STATUS_UNSUPPORTED_OP;
1255}
1256
1257static inline int gsi_set_channel_cfg(unsigned long chan_hdl,
1258 struct gsi_chan_props *props,
1259 union gsi_channel_scratch *scr)
1260{
1261 return -GSI_STATUS_UNSUPPORTED_OP;
1262}
1263
1264static inline int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
1265 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1266{
1267 return -GSI_STATUS_UNSUPPORTED_OP;
1268}
1269
1270static inline int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
1271 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1272{
1273 return -GSI_STATUS_UNSUPPORTED_OP;
1274}
1275
1276static inline int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
1277 phys_addr_t per_base_addr)
1278{
1279 return -GSI_STATUS_UNSUPPORTED_OP;
1280}
Ghanim Fodi37b64952017-01-24 15:42:30 +02001281
Amir Levycdccd632016-10-30 09:36:41 +02001282static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
1283{
1284 return -GSI_STATUS_UNSUPPORTED_OP;
1285}
Ghanim Fodi37b64952017-01-24 15:42:30 +02001286
1287static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
1288 unsigned long *size)
1289{
1290}
Skylar Changc9939cf2017-02-21 09:46:46 -08001291
1292static inline int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee,
1293 int *code)
1294{
1295 return -GSI_STATUS_UNSUPPORTED_OP;
1296}
Amir Levycdccd632016-10-30 09:36:41 +02001297#endif
1298#endif