blob: 0c460a0b304b937c1f4474bfe88abaf966dd50d5 [file] [log] [blame]
Ghanim Fodi37b64952017-01-24 15:42:30 +02001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Amir Levycdccd632016-10-30 09:36:41 +02002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#ifndef MSM_GSI_H
13#define MSM_GSI_H
14#include <linux/types.h>
15
Amir Levy41644242016-11-03 15:38:09 +020016enum gsi_ver {
17 GSI_VER_ERR = 0,
18 GSI_VER_1_0 = 1,
19 GSI_VER_1_2 = 2,
20 GSI_VER_1_3 = 3,
Michael Adisumarta8522e212017-05-15 11:59:42 -070021 GSI_VER_2_0 = 4,
Amir Levy41644242016-11-03 15:38:09 +020022 GSI_VER_MAX,
23};
24
Amir Levycdccd632016-10-30 09:36:41 +020025enum gsi_status {
26 GSI_STATUS_SUCCESS = 0,
27 GSI_STATUS_ERROR = 1,
28 GSI_STATUS_RING_INSUFFICIENT_SPACE = 2,
29 GSI_STATUS_RING_EMPTY = 3,
30 GSI_STATUS_RES_ALLOC_FAILURE = 4,
31 GSI_STATUS_BAD_STATE = 5,
32 GSI_STATUS_INVALID_PARAMS = 6,
33 GSI_STATUS_UNSUPPORTED_OP = 7,
34 GSI_STATUS_NODEV = 8,
35 GSI_STATUS_POLL_EMPTY = 9,
36 GSI_STATUS_EVT_RING_INCOMPATIBLE = 10,
37 GSI_STATUS_TIMED_OUT = 11,
38 GSI_STATUS_AGAIN = 12,
39};
40
41enum gsi_per_evt {
42 GSI_PER_EVT_GLOB_ERROR,
43 GSI_PER_EVT_GLOB_GP1,
44 GSI_PER_EVT_GLOB_GP2,
45 GSI_PER_EVT_GLOB_GP3,
46 GSI_PER_EVT_GENERAL_BREAK_POINT,
47 GSI_PER_EVT_GENERAL_BUS_ERROR,
48 GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW,
49 GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW,
50};
51
52/**
53 * gsi_per_notify - Peripheral callback info
54 *
55 * @user_data: cookie supplied in gsi_register_device
56 * @evt_id: type of notification
57 * @err_desc: error related information
58 *
59 */
60struct gsi_per_notify {
61 void *user_data;
62 enum gsi_per_evt evt_id;
63 union {
64 uint16_t err_desc;
65 } data;
66};
67
68enum gsi_intr_type {
69 GSI_INTR_MSI = 0x0,
70 GSI_INTR_IRQ = 0x1
71};
72
73
74/**
75 * gsi_per_props - Peripheral related properties
76 *
Amir Levy41644242016-11-03 15:38:09 +020077 * @gsi: GSI core version
Amir Levycdccd632016-10-30 09:36:41 +020078 * @ee: EE where this driver and peripheral driver runs
79 * @intr: control interrupt type
80 * @intvec: write data for MSI write
81 * @msi_addr: MSI address
82 * @irq: IRQ number
83 * @phys_addr: physical address of GSI block
84 * @size: register size of GSI block
85 * @notify_cb: general notification callback
86 * @req_clk_cb: callback to request peripheral clock
87 * granted should be set to true if request is completed
88 * synchronously, false otherwise (peripheral needs
89 * to call gsi_complete_clk_grant later when request is
90 * completed)
91 * if this callback is not provided, then GSI will assume
92 * peripheral is clocked at all times
93 * @rel_clk_cb: callback to release peripheral clock
94 * @user_data: cookie used for notifications
95 *
96 * All the callbacks are in interrupt context
97 *
98 */
99struct gsi_per_props {
Amir Levy41644242016-11-03 15:38:09 +0200100 enum gsi_ver ver;
Amir Levycdccd632016-10-30 09:36:41 +0200101 unsigned int ee;
102 enum gsi_intr_type intr;
103 uint32_t intvec;
104 uint64_t msi_addr;
105 unsigned int irq;
106 phys_addr_t phys_addr;
107 unsigned long size;
108 void (*notify_cb)(struct gsi_per_notify *notify);
109 void (*req_clk_cb)(void *user_data, bool *granted);
110 int (*rel_clk_cb)(void *user_data);
111 void *user_data;
112};
113
114enum gsi_evt_err {
115 GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0,
116 GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1,
117 GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2,
118 GSI_EVT_EVT_RING_EMPTY_ERR = 0x3,
119};
120
121/**
122 * gsi_evt_err_notify - event ring error callback info
123 *
124 * @user_data: cookie supplied in gsi_alloc_evt_ring
125 * @evt_id: type of error
126 * @err_desc: more info about the error
127 *
128 */
129struct gsi_evt_err_notify {
130 void *user_data;
131 enum gsi_evt_err evt_id;
132 uint16_t err_desc;
133};
134
135enum gsi_evt_chtype {
136 GSI_EVT_CHTYPE_MHI_EV = 0x0,
137 GSI_EVT_CHTYPE_XHCI_EV = 0x1,
138 GSI_EVT_CHTYPE_GPI_EV = 0x2,
139 GSI_EVT_CHTYPE_XDCI_EV = 0x3
140};
141
142enum gsi_evt_ring_elem_size {
143 GSI_EVT_RING_RE_SIZE_4B = 4,
144 GSI_EVT_RING_RE_SIZE_16B = 16,
145};
146
147/**
148 * gsi_evt_ring_props - Event ring related properties
149 *
150 * @intf: interface type (of the associated channel)
151 * @intr: interrupt type
152 * @re_size: size of event ring element
153 * @ring_len: length of ring in bytes (must be integral multiple of
154 * re_size)
155 * @ring_base_addr: physical base address of ring. Address must be aligned to
156 * ring_len rounded to power of two
157 * @ring_base_vaddr: virtual base address of ring (set to NULL when not
158 * applicable)
159 * @int_modt: cycles base interrupt moderation (32KHz clock)
160 * @int_modc: interrupt moderation packet counter
161 * @intvec: write data for MSI write
162 * @msi_addr: MSI address
163 * @rp_update_addr: physical address to which event read pointer should be
164 * written on every event generation. must be set to 0 when
165 * no update is desdired
166 * @exclusive: if true, only one GSI channel can be associated with this
167 * event ring. if false, the event ring can be shared among
168 * multiple GSI channels but in that case no polling
169 * (GSI_CHAN_MODE_POLL) is supported on any of those channels
170 * @err_cb: error notification callback
171 * @user_data: cookie used for error notifications
172 * @evchid_valid: is evchid valid?
173 * @evchid: the event ID that is being specifically requested (this is
174 * relevant for MHI where doorbell routing requires ERs to be
175 * physically contiguous)
176 */
177struct gsi_evt_ring_props {
178 enum gsi_evt_chtype intf;
179 enum gsi_intr_type intr;
180 enum gsi_evt_ring_elem_size re_size;
181 uint16_t ring_len;
182 uint64_t ring_base_addr;
183 void *ring_base_vaddr;
184 uint16_t int_modt;
185 uint8_t int_modc;
186 uint32_t intvec;
187 uint64_t msi_addr;
188 uint64_t rp_update_addr;
189 bool exclusive;
190 void (*err_cb)(struct gsi_evt_err_notify *notify);
191 void *user_data;
192 bool evchid_valid;
193 uint8_t evchid;
194};
195
196enum gsi_chan_mode {
197 GSI_CHAN_MODE_CALLBACK = 0x0,
198 GSI_CHAN_MODE_POLL = 0x1,
199};
200
201enum gsi_chan_prot {
202 GSI_CHAN_PROT_MHI = 0x0,
203 GSI_CHAN_PROT_XHCI = 0x1,
204 GSI_CHAN_PROT_GPI = 0x2,
205 GSI_CHAN_PROT_XDCI = 0x3
206};
207
208enum gsi_chan_dir {
209 GSI_CHAN_DIR_FROM_GSI = 0x0,
210 GSI_CHAN_DIR_TO_GSI = 0x1
211};
212
213enum gsi_max_prefetch {
214 GSI_ONE_PREFETCH_SEG = 0x0,
215 GSI_TWO_PREFETCH_SEG = 0x1
216};
217
218enum gsi_chan_evt {
219 GSI_CHAN_EVT_INVALID = 0x0,
220 GSI_CHAN_EVT_SUCCESS = 0x1,
221 GSI_CHAN_EVT_EOT = 0x2,
222 GSI_CHAN_EVT_OVERFLOW = 0x3,
223 GSI_CHAN_EVT_EOB = 0x4,
224 GSI_CHAN_EVT_OOB = 0x5,
225 GSI_CHAN_EVT_DB_MODE = 0x6,
226 GSI_CHAN_EVT_UNDEFINED = 0x10,
227 GSI_CHAN_EVT_RE_ERROR = 0x11,
228};
229
230/**
231 * gsi_chan_xfer_notify - Channel callback info
232 *
233 * @chan_user_data: cookie supplied in gsi_alloc_channel
234 * @xfer_user_data: cookie of the gsi_xfer_elem that caused the
235 * event to be generated
236 * @evt_id: type of event triggered by the associated TRE
237 * (corresponding to xfer_user_data)
238 * @bytes_xfered: number of bytes transferred by the associated TRE
239 * (corresponding to xfer_user_data)
240 *
241 */
242struct gsi_chan_xfer_notify {
243 void *chan_user_data;
244 void *xfer_user_data;
245 enum gsi_chan_evt evt_id;
246 uint16_t bytes_xfered;
247};
248
249enum gsi_chan_err {
250 GSI_CHAN_INVALID_TRE_ERR = 0x0,
251 GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1,
252 GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2,
253 GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3,
254 GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
255 GSI_CHAN_HWO_1_ERR = 0x5
256};
257
258/**
259 * gsi_chan_err_notify - Channel general callback info
260 *
261 * @chan_user_data: cookie supplied in gsi_alloc_channel
262 * @evt_id: type of error
263 * @err_desc: more info about the error
264 *
265 */
266struct gsi_chan_err_notify {
267 void *chan_user_data;
268 enum gsi_chan_err evt_id;
269 uint16_t err_desc;
270};
271
272enum gsi_chan_ring_elem_size {
273 GSI_CHAN_RE_SIZE_4B = 4,
274 GSI_CHAN_RE_SIZE_16B = 16,
275 GSI_CHAN_RE_SIZE_32B = 32,
276};
277
278enum gsi_chan_use_db_eng {
279 GSI_CHAN_DIRECT_MODE = 0x0,
280 GSI_CHAN_DB_MODE = 0x1,
281};
282
283/**
284 * gsi_chan_props - Channel related properties
285 *
286 * @prot: interface type
287 * @dir: channel direction
288 * @ch_id: virtual channel ID
289 * @evt_ring_hdl: handle of associated event ring. set to ~0 if no
290 * event ring associated
291 * @re_size: size of channel ring element
292 * @ring_len: length of ring in bytes (must be integral multiple of
293 * re_size)
294 * @max_re_expected: maximal number of ring elements expected to be queued.
295 * used for data path statistics gathering. if 0 provided
296 * ring_len / re_size will be used.
297 * @ring_base_addr: physical base address of ring. Address must be aligned to
298 * ring_len rounded to power of two
299 * @ring_base_vaddr: virtual base address of ring (set to NULL when not
300 * applicable)
301 * @use_db_eng: 0 => direct mode (doorbells are written directly to RE
302 * engine)
303 * 1 => DB mode (doorbells are written to DB engine)
304 * @max_prefetch: limit number of pre-fetch segments for channel
305 * @low_weight: low channel weight (priority of channel for RE engine
306 * round robin algorithm); must be >= 1
307 * @xfer_cb: transfer notification callback, this callback happens
308 * on event boundaries
309 *
310 * e.g. 1
311 *
312 * out TD with 3 REs
313 *
314 * RE1: EOT=0, EOB=0, CHAIN=1;
315 * RE2: EOT=0, EOB=0, CHAIN=1;
316 * RE3: EOT=1, EOB=0, CHAIN=0;
317 *
318 * the callback will be triggered for RE3 using the
319 * xfer_user_data of that RE
320 *
321 * e.g. 2
322 *
323 * in REs
324 *
325 * RE1: EOT=1, EOB=0, CHAIN=0;
326 * RE2: EOT=1, EOB=0, CHAIN=0;
327 * RE3: EOT=1, EOB=0, CHAIN=0;
328 *
329 * received packet consumes all of RE1, RE2 and part of RE3
330 * for EOT condition. there will be three callbacks in below
331 * order
332 *
333 * callback for RE1 using GSI_CHAN_EVT_OVERFLOW
334 * callback for RE2 using GSI_CHAN_EVT_OVERFLOW
335 * callback for RE3 using GSI_CHAN_EVT_EOT
336 *
337 * @err_cb: error notification callback
338 * @chan_user_data: cookie used for notifications
339 *
340 * All the callbacks are in interrupt context
341 *
342 */
343struct gsi_chan_props {
344 enum gsi_chan_prot prot;
345 enum gsi_chan_dir dir;
346 uint8_t ch_id;
347 unsigned long evt_ring_hdl;
348 enum gsi_chan_ring_elem_size re_size;
349 uint16_t ring_len;
350 uint16_t max_re_expected;
351 uint64_t ring_base_addr;
352 void *ring_base_vaddr;
353 enum gsi_chan_use_db_eng use_db_eng;
354 enum gsi_max_prefetch max_prefetch;
355 uint8_t low_weight;
356 void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
357 void (*err_cb)(struct gsi_chan_err_notify *notify);
358 void *chan_user_data;
359};
360
361enum gsi_xfer_flag {
362 GSI_XFER_FLAG_CHAIN = 0x1,
363 GSI_XFER_FLAG_EOB = 0x100,
364 GSI_XFER_FLAG_EOT = 0x200,
365 GSI_XFER_FLAG_BEI = 0x400
366};
367
368enum gsi_xfer_elem_type {
369 GSI_XFER_ELEM_DATA,
370 GSI_XFER_ELEM_IMME_CMD,
Skylar Changa7975cf2017-03-21 17:20:20 -0700371 GSI_XFER_ELEM_NOP,
Amir Levycdccd632016-10-30 09:36:41 +0200372};
373
374/**
375 * gsi_xfer_elem - Metadata about a single transfer
376 *
377 * @addr: physical address of buffer
378 * @len: size of buffer for GSI_XFER_ELEM_DATA:
379 * for outbound transfers this is the number of bytes to
380 * transfer.
381 * for inbound transfers, this is the maximum number of
382 * bytes the host expects from device in this transfer
383 *
384 * immediate command opcode for GSI_XFER_ELEM_IMME_CMD
385 * @flags: transfer flags, OR of all the applicable flags
386 *
387 * GSI_XFER_FLAG_BEI: Block event interrupt
388 * 1: Event generated by this ring element must not assert
389 * an interrupt to the host
390 * 0: Event generated by this ring element must assert an
391 * interrupt to the host
392 *
393 * GSI_XFER_FLAG_EOT: Interrupt on end of transfer
394 * 1: If an EOT condition is encountered when processing
395 * this ring element, an event is generated by the device
396 * with its completion code set to EOT.
397 * 0: If an EOT condition is encountered for this ring
398 * element, a completion event is not be generated by the
399 * device, unless IEOB is 1
400 *
401 * GSI_XFER_FLAG_EOB: Interrupt on end of block
402 * 1: Device notifies host after processing this ring element
403 * by sending a completion event
404 * 0: Completion event is not required after processing this
405 * ring element
406 *
407 * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring
408 * elements in a TD
409 *
410 * @type: transfer type
411 *
412 * GSI_XFER_ELEM_DATA: for all data transfers
413 * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
Skylar Changa7975cf2017-03-21 17:20:20 -0700414 * GSI_XFER_ELEM_NOP: for event generation only
Amir Levycdccd632016-10-30 09:36:41 +0200415 *
416 * @xfer_user_data: cookie used in xfer_cb
417 *
418 */
419struct gsi_xfer_elem {
420 uint64_t addr;
421 uint16_t len;
422 uint16_t flags;
423 enum gsi_xfer_elem_type type;
424 void *xfer_user_data;
425};
426
427/**
428 * gsi_gpi_channel_scratch - GPI protocol SW config area of
429 * channel scratch
430 *
431 * @max_outstanding_tre: Used for the prefetch management sequence by the
432 * sequencer. Defines the maximum number of allowed
433 * outstanding TREs in IPA/GSI (in Bytes). RE engine
434 * prefetch will be limited by this configuration. It
435 * is suggested to configure this value to IPA_IF
436 * channel TLV queue size times element size. To disable
437 * the feature in doorbell mode (DB Mode=1). Maximum
438 * outstanding TREs should be set to 64KB
439 * (or any value larger or equal to ring length . RLEN)
440 * @outstanding_threshold: Used for the prefetch management sequence by the
441 * sequencer. Defines the threshold (in Bytes) as to when
442 * to update the channel doorbell. Should be smaller than
443 * Maximum outstanding TREs. value. It is suggested to
444 * configure this value to 2 * element size.
445 */
446struct __packed gsi_gpi_channel_scratch {
447 uint64_t resvd1;
448 uint32_t resvd2:16;
449 uint32_t max_outstanding_tre:16;
450 uint32_t resvd3:16;
451 uint32_t outstanding_threshold:16;
452};
453
454/**
455 * gsi_mhi_channel_scratch - MHI protocol SW config area of
456 * channel scratch
457 *
458 * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines
459 * address in host from which channel write pointer
460 * should be read in polling mode
461 * @assert_bit40: 1: bit #41 in address should be asserted upon
462 * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
463 * transfers)
464 * 0: bit #41 in address should be deasserted upon
465 * IPA_IF.ProcessDescriptor routine (for non-MHI over
466 * PCIe transfers)
467 * @polling_configuration: Uplink channels: Defines timer to poll on MHI
468 * context. Range: 1 to 31 milliseconds.
469 * Downlink channel: Defines transfer ring buffer
470 * availability threshold to poll on MHI context in
471 * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
472 * elements. E.g., value of 2 indicates 16 ring elements.
473 * Valid only when Burst Mode Enabled is set to 1
474 * @burst_mode_enabled: 0: Burst mode is disabled for this channel
475 * 1: Burst mode is enabled for this channel
476 * @polling_mode: 0: the channel is not in polling mode, meaning the
477 * host should ring DBs.
478 * 1: the channel is in polling mode, meaning the host
479 * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8
480 * ring elements.
481 * should not ring DBs until notified of DB mode/OOB mode
482 * @max_outstanding_tre: Used for the prefetch management sequence by the
483 * sequencer. Defines the maximum number of allowed
484 * outstanding TREs in IPA/GSI (in Bytes). RE engine
485 * prefetch will be limited by this configuration. It
486 * is suggested to configure this value to IPA_IF
487 * channel TLV queue size times element size.
488 * To disable the feature in doorbell mode (DB Mode=1).
489 * Maximum outstanding TREs should be set to 64KB
490 * (or any value larger or equal to ring length . RLEN)
491 * @outstanding_threshold: Used for the prefetch management sequence by the
492 * sequencer. Defines the threshold (in Bytes) as to when
493 * to update the channel doorbell. Should be smaller than
494 * Maximum outstanding TREs. value. It is suggested to
495 * configure this value to min(TLV_FIFO_SIZE/2,8) *
496 * element size.
497 */
498struct __packed gsi_mhi_channel_scratch {
499 uint64_t mhi_host_wp_addr;
500 uint32_t rsvd1:1;
501 uint32_t assert_bit40:1;
502 uint32_t polling_configuration:5;
503 uint32_t burst_mode_enabled:1;
504 uint32_t polling_mode:1;
505 uint32_t oob_mod_threshold:5;
506 uint32_t resvd2:2;
507 uint32_t max_outstanding_tre:16;
508 uint32_t resvd3:16;
509 uint32_t outstanding_threshold:16;
510};
511
512/**
513 * gsi_xdci_channel_scratch - xDCI protocol SW config area of
514 * channel scratch
515 *
516 * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi
517 * configuration). Must be aligned to Max USB Packet Size
518 * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned
519 * transfer resource index for the transfer, which was
520 * returned in response to the Start Transfer command.
521 * This field is used for "Update Transfer" command
522 * @last_trb_addr: Address (LSB - based on alignment restrictions) of
523 * last TRB in queue. Used to identify rollover case
524 * @depcmd_low_addr: Used to generate "Update Transfer" command
525 * @max_outstanding_tre: Used for the prefetch management sequence by the
526 * sequencer. Defines the maximum number of allowed
527 * outstanding TREs in IPA/GSI (in Bytes). RE engine
528 * prefetch will be limited by this configuration. It
529 * is suggested to configure this value to IPA_IF
530 * channel TLV queue size times element size.
531 * To disable the feature in doorbell mode (DB Mode=1)
532 * Maximum outstanding TREs should be set to 64KB
533 * (or any value larger or equal to ring length . RLEN)
534 * @depcmd_hi_addr: Used to generate "Update Transfer" command
535 * @outstanding_threshold: Used for the prefetch management sequence by the
536 * sequencer. Defines the threshold (in Bytes) as to when
537 * to update the channel doorbell. Should be smaller than
538 * Maximum outstanding TREs. value. It is suggested to
539 * configure this value to 2 * element size. for MBIM the
540 * suggested configuration is the element size.
541 */
542struct __packed gsi_xdci_channel_scratch {
543 uint32_t last_trb_addr:16;
544 uint32_t resvd1:4;
545 uint32_t xferrscidx:7;
546 uint32_t const_buffer_size:5;
547 uint32_t depcmd_low_addr;
548 uint32_t depcmd_hi_addr:8;
549 uint32_t resvd2:8;
550 uint32_t max_outstanding_tre:16;
551 uint32_t resvd3:16;
552 uint32_t outstanding_threshold:16;
553};
554
555/**
556 * gsi_channel_scratch - channel scratch SW config area
557 *
558 */
559union __packed gsi_channel_scratch {
560 struct __packed gsi_gpi_channel_scratch gpi;
561 struct __packed gsi_mhi_channel_scratch mhi;
562 struct __packed gsi_xdci_channel_scratch xdci;
563 struct __packed {
564 uint32_t word1;
565 uint32_t word2;
566 uint32_t word3;
567 uint32_t word4;
568 } data;
569};
570
571/**
572 * gsi_mhi_evt_scratch - MHI protocol SW config area of
573 * event scratch
574 */
575struct __packed gsi_mhi_evt_scratch {
576 uint32_t resvd1;
577 uint32_t resvd2;
578};
579
580/**
581 * gsi_xdci_evt_scratch - xDCI protocol SW config area of
582 * event scratch
583 *
584 */
585struct __packed gsi_xdci_evt_scratch {
586 uint32_t gevntcount_low_addr;
587 uint32_t gevntcount_hi_addr:8;
588 uint32_t resvd1:24;
589};
590
591/**
592 * gsi_evt_scratch - event scratch SW config area
593 *
594 */
595union __packed gsi_evt_scratch {
596 struct __packed gsi_mhi_evt_scratch mhi;
597 struct __packed gsi_xdci_evt_scratch xdci;
598 struct __packed {
599 uint32_t word1;
600 uint32_t word2;
601 } data;
602};
603
604/**
605 * gsi_device_scratch - EE scratch config parameters
606 *
607 * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid?
608 * @mhi_base_chan_idx: base index of IPA MHI channel indexes.
609 * IPA MHI channel index = GSI channel ID +
610 * MHI base channel index
611 * @max_usb_pkt_size_valid: is max_usb_pkt_size valid?
612 * @max_usb_pkt_size: max USB packet size in bytes (valid values are
613 * 512 and 1024)
614 */
615struct gsi_device_scratch {
616 bool mhi_base_chan_idx_valid;
617 uint8_t mhi_base_chan_idx;
618 bool max_usb_pkt_size_valid;
619 uint16_t max_usb_pkt_size;
620};
621
622/**
623 * gsi_chan_info - information about channel occupancy
624 *
625 * @wp: channel write pointer (physical address)
626 * @rp: channel read pointer (physical address)
627 * @evt_valid: is evt* info valid?
628 * @evt_wp: event ring write pointer (physical address)
629 * @evt_rp: event ring read pointer (physical address)
630 */
631struct gsi_chan_info {
632 uint64_t wp;
633 uint64_t rp;
634 bool evt_valid;
635 uint64_t evt_wp;
636 uint64_t evt_rp;
637};
638
639#ifdef CONFIG_GSI
640/**
641 * gsi_register_device - Peripheral should call this function to
642 * register itself with GSI before invoking any other APIs
643 *
644 * @props: Peripheral properties
645 * @dev_hdl: Handle populated by GSI, opaque to client
646 *
647 * @Return -GSI_STATUS_AGAIN if request should be re-tried later
648 * other error codes for failure
649 */
650int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl);
651
652/**
653 * gsi_complete_clk_grant - Peripheral should call this function to
654 * grant the clock resource requested by GSI previously that could not
655 * be granted synchronously. GSI will release the clock resource using
656 * the rel_clk_cb when appropriate
657 *
658 * @dev_hdl: Client handle previously obtained from
659 * gsi_register_device
660 *
661 * @Return gsi_status
662 */
663int gsi_complete_clk_grant(unsigned long dev_hdl);
664
665/**
666 * gsi_write_device_scratch - Peripheral should call this function to
667 * write to the EE scratch area
668 *
669 * @dev_hdl: Client handle previously obtained from
670 * gsi_register_device
671 * @val: Value to write
672 *
673 * @Return gsi_status
674 */
675int gsi_write_device_scratch(unsigned long dev_hdl,
676 struct gsi_device_scratch *val);
677
678/**
679 * gsi_deregister_device - Peripheral should call this function to
680 * de-register itself with GSI
681 *
682 * @dev_hdl: Client handle previously obtained from
683 * gsi_register_device
684 * @force: When set to true, cleanup is performed even if there
685 * are in use resources like channels, event rings, etc.
686 * this would be used after GSI reset to recover from some
687 * fatal error
688 * When set to false, there must not exist any allocated
689 * channels and event rings.
690 *
691 * @Return gsi_status
692 */
693int gsi_deregister_device(unsigned long dev_hdl, bool force);
694
695/**
696 * gsi_alloc_evt_ring - Peripheral should call this function to
697 * allocate an event ring
698 *
699 * @props: Event ring properties
700 * @dev_hdl: Client handle previously obtained from
701 * gsi_register_device
702 * @evt_ring_hdl: Handle populated by GSI, opaque to client
703 *
704 * This function can sleep
705 *
706 * @Return gsi_status
707 */
708int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
709 unsigned long *evt_ring_hdl);
710
711/**
712 * gsi_write_evt_ring_scratch - Peripheral should call this function to
713 * write to the scratch area of the event ring context
714 *
715 * @evt_ring_hdl: Client handle previously obtained from
716 * gsi_alloc_evt_ring
717 * @val: Value to write
718 *
719 * @Return gsi_status
720 */
721int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
722 union __packed gsi_evt_scratch val);
723
724/**
725 * gsi_dealloc_evt_ring - Peripheral should call this function to
726 * de-allocate an event ring. There should not exist any active
727 * channels using this event ring
728 *
729 * @evt_ring_hdl: Client handle previously obtained from
730 * gsi_alloc_evt_ring
731 *
732 * This function can sleep
733 *
734 * @Return gsi_status
735 */
736int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl);
737
738/**
739 * gsi_query_evt_ring_db_addr - Peripheral should call this function to
740 * query the physical addresses of the event ring doorbell registers
741 *
742 * @evt_ring_hdl: Client handle previously obtained from
743 * gsi_alloc_evt_ring
744 * @db_addr_wp_lsb: Physical address of doorbell register where the 32
745 * LSBs of the doorbell value should be written
746 * @db_addr_wp_msb: Physical address of doorbell register where the 32
747 * MSBs of the doorbell value should be written
748 *
749 * @Return gsi_status
750 */
751int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
752 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
753
754/**
755 * gsi_reset_evt_ring - Peripheral should call this function to
756 * reset an event ring to recover from error state
757 *
758 * @evt_ring_hdl: Client handle previously obtained from
759 * gsi_alloc_evt_ring
760 *
761 * This function can sleep
762 *
763 * @Return gsi_status
764 */
765int gsi_reset_evt_ring(unsigned long evt_ring_hdl);
766
767/**
768 * gsi_get_evt_ring_cfg - This function returns the current config
769 * of the specified event ring
770 *
771 * @evt_ring_hdl: Client handle previously obtained from
772 * gsi_alloc_evt_ring
773 * @props: where to copy properties to
774 * @scr: where to copy scratch info to
775 *
776 * @Return gsi_status
777 */
778int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
779 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
780
781/**
782 * gsi_set_evt_ring_cfg - This function applies the supplied config
783 * to the specified event ring.
784 *
785 * exclusive property of the event ring cannot be changed after
786 * gsi_alloc_evt_ring
787 *
788 * @evt_ring_hdl: Client handle previously obtained from
789 * gsi_alloc_evt_ring
790 * @props: the properties to apply
791 * @scr: the scratch info to apply
792 *
793 * @Return gsi_status
794 */
795int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
796 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
797
798/**
799 * gsi_alloc_channel - Peripheral should call this function to
800 * allocate a channel
801 *
802 * @props: Channel properties
803 * @dev_hdl: Client handle previously obtained from
804 * gsi_register_device
805 * @chan_hdl: Handle populated by GSI, opaque to client
806 *
807 * This function can sleep
808 *
809 * @Return gsi_status
810 */
811int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
812 unsigned long *chan_hdl);
813
814/**
815 * gsi_write_channel_scratch - Peripheral should call this function to
816 * write to the scratch area of the channel context
817 *
818 * @chan_hdl: Client handle previously obtained from
819 * gsi_alloc_channel
820 * @val: Value to write
821 *
822 * @Return gsi_status
823 */
824int gsi_write_channel_scratch(unsigned long chan_hdl,
825 union __packed gsi_channel_scratch val);
826
827/**
828 * gsi_start_channel - Peripheral should call this function to
829 * start a channel i.e put into running state
830 *
831 * @chan_hdl: Client handle previously obtained from
832 * gsi_alloc_channel
833 *
834 * This function can sleep
835 *
836 * @Return gsi_status
837 */
838int gsi_start_channel(unsigned long chan_hdl);
839
840/**
841 * gsi_stop_channel - Peripheral should call this function to
842 * stop a channel. Stop will happen on a packet boundary
843 *
844 * @chan_hdl: Client handle previously obtained from
845 * gsi_alloc_channel
846 *
847 * This function can sleep
848 *
849 * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
850 * other error codes for failure
851 */
852int gsi_stop_channel(unsigned long chan_hdl);
853
854/**
855 * gsi_reset_channel - Peripheral should call this function to
856 * reset a channel to recover from error state
857 *
858 * @chan_hdl: Client handle previously obtained from
859 * gsi_alloc_channel
860 *
861 * This function can sleep
862 *
863 * @Return gsi_status
864 */
865int gsi_reset_channel(unsigned long chan_hdl);
866
867/**
868 * gsi_dealloc_channel - Peripheral should call this function to
869 * de-allocate a channel
870 *
871 * @chan_hdl: Client handle previously obtained from
872 * gsi_alloc_channel
873 *
874 * This function can sleep
875 *
876 * @Return gsi_status
877 */
878int gsi_dealloc_channel(unsigned long chan_hdl);
879
880/**
881 * gsi_stop_db_channel - Peripheral should call this function to
882 * stop a channel when all transfer elements till the doorbell
883 * have been processed
884 *
885 * @chan_hdl: Client handle previously obtained from
886 * gsi_alloc_channel
887 *
888 * This function can sleep
889 *
890 * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
891 * other error codes for failure
892 */
893int gsi_stop_db_channel(unsigned long chan_hdl);
894
895/**
896 * gsi_query_channel_db_addr - Peripheral should call this function to
897 * query the physical addresses of the channel doorbell registers
898 *
899 * @chan_hdl: Client handle previously obtained from
900 * gsi_alloc_channel
901 * @db_addr_wp_lsb: Physical address of doorbell register where the 32
902 * LSBs of the doorbell value should be written
903 * @db_addr_wp_msb: Physical address of doorbell register where the 32
904 * MSBs of the doorbell value should be written
905 *
906 * @Return gsi_status
907 */
908int gsi_query_channel_db_addr(unsigned long chan_hdl,
909 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
910
911/**
912 * gsi_query_channel_info - Peripheral can call this function to query the
913 * channel and associated event ring (if any) status.
914 *
915 * @chan_hdl: Client handle previously obtained from
916 * gsi_alloc_channel
917 * @info: Where to read the values into
918 *
919 * @Return gsi_status
920 */
921int gsi_query_channel_info(unsigned long chan_hdl,
922 struct gsi_chan_info *info);
923
924/**
925 * gsi_is_channel_empty - Peripheral can call this function to query if
926 * the channel is empty. This is only applicable to GPI. "Empty" means
927 * GSI has consumed all descriptors for a TO_GSI channel and SW has
928 * processed all completed descriptors for a FROM_GSI channel.
929 *
930 * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
931 * @is_empty: set by GSI based on channel emptiness
932 *
933 * @Return gsi_status
934 */
935int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
936
937/**
938 * gsi_get_channel_cfg - This function returns the current config
939 * of the specified channel
940 *
941 * @chan_hdl: Client handle previously obtained from
942 * gsi_alloc_channel
943 * @props: where to copy properties to
944 * @scr: where to copy scratch info to
945 *
946 * @Return gsi_status
947 */
948int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
949 union gsi_channel_scratch *scr);
950
951/**
952 * gsi_set_channel_cfg - This function applies the supplied config
953 * to the specified channel
954 *
955 * ch_id and evt_ring_hdl of the channel cannot be changed after
956 * gsi_alloc_channel
957 *
958 * @chan_hdl: Client handle previously obtained from
959 * gsi_alloc_channel
960 * @props: the properties to apply
961 * @scr: the scratch info to apply
962 *
963 * @Return gsi_status
964 */
965int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
966 union gsi_channel_scratch *scr);
967
968/**
969 * gsi_poll_channel - Peripheral should call this function to query for
970 * completed transfer descriptors.
971 *
972 * @chan_hdl: Client handle previously obtained from
973 * gsi_alloc_channel
974 * @notify: Information about the completed transfer if any
975 *
976 * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
977 * completed)
978 */
979int gsi_poll_channel(unsigned long chan_hdl,
980 struct gsi_chan_xfer_notify *notify);
981
982/**
983 * gsi_config_channel_mode - Peripheral should call this function
984 * to configure the channel mode.
985 *
986 * @chan_hdl: Client handle previously obtained from
987 * gsi_alloc_channel
988 * @mode: Mode to move the channel into
989 *
990 * @Return gsi_status
991 */
992int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode);
993
994/**
995 * gsi_queue_xfer - Peripheral should call this function
996 * to queue transfers on the given channel
997 *
998 * @chan_hdl: Client handle previously obtained from
999 * gsi_alloc_channel
1000 * @num_xfers: Number of transfer in the array @ xfer
1001 * @xfer: Array of num_xfers transfer descriptors
1002 * @ring_db: If true, tell HW about these queued xfers
1003 * If false, do not notify HW at this time
1004 *
1005 * @Return gsi_status
1006 */
1007int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
1008 struct gsi_xfer_elem *xfer, bool ring_db);
1009
1010/**
1011 * gsi_start_xfer - Peripheral should call this function to
1012 * inform HW about queued xfers
1013 *
1014 * @chan_hdl: Client handle previously obtained from
1015 * gsi_alloc_channel
1016 *
1017 * @Return gsi_status
1018 */
1019int gsi_start_xfer(unsigned long chan_hdl);
1020
1021/**
1022 * gsi_configure_regs - Peripheral should call this function
1023 * to configure the GSI registers before/after the FW is
1024 * loaded but before it is enabled.
1025 *
1026 * @gsi_base_addr: Base address of GSI register space
1027 * @gsi_size: Mapping size of the GSI register space
1028 * @per_base_addr: Base address of the peripheral using GSI
1029 *
1030 * @Return gsi_status
1031 */
1032int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
1033 phys_addr_t per_base_addr);
1034
1035/**
1036 * gsi_enable_fw - Peripheral should call this function
1037 * to enable the GSI FW after the FW has been loaded to the SRAM.
1038 *
1039 * @gsi_base_addr: Base address of GSI register space
1040 * @gsi_size: Mapping size of the GSI register space
Amir Levy85dcd172016-12-06 17:47:39 +02001041 * @ver: GSI core version
Amir Levycdccd632016-10-30 09:36:41 +02001042
1043 * @Return gsi_status
1044 */
Amir Levy85dcd172016-12-06 17:47:39 +02001045int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver);
Amir Levycdccd632016-10-30 09:36:41 +02001046
Ghanim Fodi37b64952017-01-24 15:42:30 +02001047/**
1048 * gsi_get_inst_ram_offset_and_size - Peripheral should call this function
1049 * to get instruction RAM base address offset and size. Peripheral typically
1050 * uses this info to load GSI FW into the IRAM.
1051 *
1052 * @base_offset:[OUT] - IRAM base offset address
1053 * @size: [OUT] - IRAM size
1054
1055 * @Return none
1056 */
1057void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
1058 unsigned long *size);
1059
Skylar Changc9939cf2017-02-21 09:46:46 -08001060/**
1061 * gsi_halt_channel_ee - Peripheral should call this function
1062 * to stop other EE's channel. This is usually used in SSR clean
1063 *
1064 * @chan_idx: Virtual channel index
1065 * @ee: EE
1066 * @code: [out] response code for operation
1067
1068 * @Return gsi_status
1069 */
1070int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
1071
Amir Levycdccd632016-10-30 09:36:41 +02001072/*
1073 * Here is a typical sequence of calls
1074 *
1075 * gsi_register_device
1076 *
1077 * gsi_write_device_scratch (if the protocol needs this)
1078 *
1079 * gsi_alloc_evt_ring (for as many event rings as needed)
1080 * gsi_write_evt_ring_scratch
1081 *
1082 * gsi_alloc_channel (for as many channels as needed; channels can have
1083 * no event ring, an exclusive event ring or a shared event ring)
1084 * gsi_write_channel_scratch
1085 * gsi_start_channel
1086 * gsi_queue_xfer/gsi_start_xfer
1087 * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
1088 * xfer completions)
1089 * gsi_stop_db_channel/gsi_stop_channel
1090 *
1091 * gsi_dealloc_channel
1092 *
1093 * gsi_dealloc_evt_ring
1094 *
1095 * gsi_deregister_device
1096 *
1097 */
1098#else
1099static inline int gsi_register_device(struct gsi_per_props *props,
1100 unsigned long *dev_hdl)
1101{
1102 return -GSI_STATUS_UNSUPPORTED_OP;
1103}
1104
1105static inline int gsi_complete_clk_grant(unsigned long dev_hdl)
1106{
1107 return -GSI_STATUS_UNSUPPORTED_OP;
1108}
1109
1110static inline int gsi_write_device_scratch(unsigned long dev_hdl,
1111 struct gsi_device_scratch *val)
1112{
1113 return -GSI_STATUS_UNSUPPORTED_OP;
1114}
1115
1116static inline int gsi_deregister_device(unsigned long dev_hdl, bool force)
1117{
1118 return -GSI_STATUS_UNSUPPORTED_OP;
1119}
1120
1121static inline int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props,
1122 unsigned long dev_hdl,
1123 unsigned long *evt_ring_hdl)
1124{
1125 return -GSI_STATUS_UNSUPPORTED_OP;
1126}
1127
1128static inline int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
1129 union __packed gsi_evt_scratch val)
1130{
1131 return -GSI_STATUS_UNSUPPORTED_OP;
1132}
1133
1134static inline int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
1135{
1136 return -GSI_STATUS_UNSUPPORTED_OP;
1137}
1138
1139static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
1140 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1141{
1142 return -GSI_STATUS_UNSUPPORTED_OP;
1143}
1144
1145static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
1146{
1147 return -GSI_STATUS_UNSUPPORTED_OP;
1148}
1149
1150static inline int gsi_alloc_channel(struct gsi_chan_props *props,
1151 unsigned long dev_hdl,
1152 unsigned long *chan_hdl)
1153{
1154 return -GSI_STATUS_UNSUPPORTED_OP;
1155}
1156
1157static inline int gsi_write_channel_scratch(unsigned long chan_hdl,
1158 union __packed gsi_channel_scratch val)
1159{
1160 return -GSI_STATUS_UNSUPPORTED_OP;
1161}
1162
1163static inline int gsi_start_channel(unsigned long chan_hdl)
1164{
1165 return -GSI_STATUS_UNSUPPORTED_OP;
1166}
1167
1168static inline int gsi_stop_channel(unsigned long chan_hdl)
1169{
1170 return -GSI_STATUS_UNSUPPORTED_OP;
1171}
1172
1173static inline int gsi_reset_channel(unsigned long chan_hdl)
1174{
1175 return -GSI_STATUS_UNSUPPORTED_OP;
1176}
1177
1178static inline int gsi_dealloc_channel(unsigned long chan_hdl)
1179{
1180 return -GSI_STATUS_UNSUPPORTED_OP;
1181}
1182
1183static inline int gsi_stop_db_channel(unsigned long chan_hdl)
1184{
1185 return -GSI_STATUS_UNSUPPORTED_OP;
1186}
1187
1188static inline int gsi_query_channel_db_addr(unsigned long chan_hdl,
1189 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1190{
1191 return -GSI_STATUS_UNSUPPORTED_OP;
1192}
1193
1194static inline int gsi_query_channel_info(unsigned long chan_hdl,
1195 struct gsi_chan_info *info)
1196{
1197 return -GSI_STATUS_UNSUPPORTED_OP;
1198}
1199
1200static inline int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
1201{
1202 return -GSI_STATUS_UNSUPPORTED_OP;
1203}
1204
1205static inline int gsi_poll_channel(unsigned long chan_hdl,
1206 struct gsi_chan_xfer_notify *notify)
1207{
1208 return -GSI_STATUS_UNSUPPORTED_OP;
1209}
1210
1211static inline int gsi_config_channel_mode(unsigned long chan_hdl,
1212 enum gsi_chan_mode mode)
1213{
1214 return -GSI_STATUS_UNSUPPORTED_OP;
1215}
1216
1217static inline int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
1218 struct gsi_xfer_elem *xfer, bool ring_db)
1219{
1220 return -GSI_STATUS_UNSUPPORTED_OP;
1221}
1222
1223static inline int gsi_start_xfer(unsigned long chan_hdl)
1224{
1225 return -GSI_STATUS_UNSUPPORTED_OP;
1226}
1227
1228static inline int gsi_get_channel_cfg(unsigned long chan_hdl,
1229 struct gsi_chan_props *props,
1230 union gsi_channel_scratch *scr)
1231{
1232 return -GSI_STATUS_UNSUPPORTED_OP;
1233}
1234
1235static inline int gsi_set_channel_cfg(unsigned long chan_hdl,
1236 struct gsi_chan_props *props,
1237 union gsi_channel_scratch *scr)
1238{
1239 return -GSI_STATUS_UNSUPPORTED_OP;
1240}
1241
1242static inline int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
1243 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1244{
1245 return -GSI_STATUS_UNSUPPORTED_OP;
1246}
1247
1248static inline int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
1249 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1250{
1251 return -GSI_STATUS_UNSUPPORTED_OP;
1252}
1253
1254static inline int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
1255 phys_addr_t per_base_addr)
1256{
1257 return -GSI_STATUS_UNSUPPORTED_OP;
1258}
Ghanim Fodi37b64952017-01-24 15:42:30 +02001259
Amir Levycdccd632016-10-30 09:36:41 +02001260static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
1261{
1262 return -GSI_STATUS_UNSUPPORTED_OP;
1263}
Ghanim Fodi37b64952017-01-24 15:42:30 +02001264
1265static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
1266 unsigned long *size)
1267{
1268}
Skylar Changc9939cf2017-02-21 09:46:46 -08001269
1270static inline int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee,
1271 int *code)
1272{
1273 return -GSI_STATUS_UNSUPPORTED_OP;
1274}
Amir Levycdccd632016-10-30 09:36:41 +02001275#endif
1276#endif