blob: 9c133a8733ee5f063a3d7bcf08bd7ad5e2e80669 [file] [log] [blame]
Skylar Chang4b98d922017-01-16 10:29:49 -08001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Amir Levycdccd632016-10-30 09:36:41 +02002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/of.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/log2.h>
17#include <linux/module.h>
18#include <linux/msm_gsi.h>
19#include <linux/platform_device.h>
Skylar Chang1feb2a32016-10-17 10:01:36 -070020#include <linux/delay.h>
Amir Levycdccd632016-10-30 09:36:41 +020021#include "gsi.h"
22#include "gsi_reg.h"
23
24#define GSI_CMD_TIMEOUT (5*HZ)
Skylar Chang5ce9a6d2017-03-09 10:57:53 -080025#define GSI_STOP_CMD_TIMEOUT_MS 20
Amir Levycdccd632016-10-30 09:36:41 +020026#define GSI_MAX_CH_LOW_WEIGHT 15
27#define GSI_MHI_ER_START 10
28#define GSI_MHI_ER_END 16
29
Skylar Chang1feb2a32016-10-17 10:01:36 -070030#define GSI_RESET_WA_MIN_SLEEP 1000
31#define GSI_RESET_WA_MAX_SLEEP 2000
Amir Levycdccd632016-10-30 09:36:41 +020032static const struct of_device_id msm_gsi_match[] = {
33 { .compatible = "qcom,msm_gsi", },
34 { },
35};
36
37struct gsi_ctx *gsi_ctx;
38
39static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
40{
41 uint32_t curr;
42
43 curr = gsi_readl(gsi_ctx->base +
44 GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
45 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
46 GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
47}
48
49static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
50{
51 uint32_t curr;
52
53 curr = gsi_readl(gsi_ctx->base +
54 GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
55 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
56 GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
57}
58
59static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
60{
61 uint32_t curr;
62
63 curr = gsi_readl(gsi_ctx->base +
64 GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
65 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
66 GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
67}
68
69static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
70{
71 uint32_t curr;
72
73 curr = gsi_readl(gsi_ctx->base +
74 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
75 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
76 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
77}
78
79static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
80{
81 uint32_t curr;
82
83 curr = gsi_readl(gsi_ctx->base +
84 GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
85 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
86 GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
87}
88
89static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
90{
91 uint32_t curr;
92
93 curr = gsi_readl(gsi_ctx->base +
94 GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
95 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
96 GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
97}
98
99static void gsi_handle_ch_ctrl(int ee)
100{
101 uint32_t ch;
102 int i;
103 uint32_t val;
104 struct gsi_chan_ctx *ctx;
105
106 ch = gsi_readl(gsi_ctx->base +
107 GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800108 gsi_writel(ch, gsi_ctx->base +
109 GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200110 GSIDBG("ch %x\n", ch);
111 for (i = 0; i < 32; i++) {
112 if ((1 << i) & ch) {
Skylar Chang22ecb822016-10-21 10:15:04 -0700113 if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) {
114 GSIERR("invalid channel %d\n", i);
115 break;
116 }
117
Amir Levycdccd632016-10-30 09:36:41 +0200118 ctx = &gsi_ctx->chan[i];
119 val = gsi_readl(gsi_ctx->base +
120 GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee));
121 ctx->state = (val &
122 GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
123 GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
124 GSIDBG("ch %u state updated to %u\n", i, ctx->state);
125 complete(&ctx->compl);
Skylar Chang22ecb822016-10-21 10:15:04 -0700126 gsi_ctx->ch_dbg[i].cmd_completed++;
Amir Levycdccd632016-10-30 09:36:41 +0200127 }
128 }
Amir Levycdccd632016-10-30 09:36:41 +0200129}
130
131static void gsi_handle_ev_ctrl(int ee)
132{
133 uint32_t ch;
134 int i;
135 uint32_t val;
136 struct gsi_evt_ctx *ctx;
137
138 ch = gsi_readl(gsi_ctx->base +
139 GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800140 gsi_writel(ch, gsi_ctx->base +
141 GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200142 GSIDBG("ev %x\n", ch);
143 for (i = 0; i < 32; i++) {
144 if ((1 << i) & ch) {
Skylar Chang22ecb822016-10-21 10:15:04 -0700145 if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
146 GSIERR("invalid event %d\n", i);
147 break;
148 }
149
Amir Levycdccd632016-10-30 09:36:41 +0200150 ctx = &gsi_ctx->evtr[i];
151 val = gsi_readl(gsi_ctx->base +
152 GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee));
153 ctx->state = (val &
154 GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK) >>
155 GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
156 GSIDBG("evt %u state updated to %u\n", i, ctx->state);
157 complete(&ctx->compl);
158 }
159 }
Amir Levycdccd632016-10-30 09:36:41 +0200160}
161
162static void gsi_handle_glob_err(uint32_t err)
163{
164 struct gsi_log_err *log;
165 struct gsi_chan_ctx *ch;
166 struct gsi_evt_ctx *ev;
167 struct gsi_chan_err_notify chan_notify;
168 struct gsi_evt_err_notify evt_notify;
169 struct gsi_per_notify per_notify;
170 uint32_t val;
171
172 log = (struct gsi_log_err *)&err;
173 GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
174 log->virt_idx);
175 GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
176 log->arg2, log->arg3);
177 switch (log->err_type) {
178 case GSI_ERR_TYPE_GLOB:
179 per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
180 per_notify.user_data = gsi_ctx->per.user_data;
181 per_notify.data.err_desc = err & 0xFFFF;
182 gsi_ctx->per.notify_cb(&per_notify);
183 break;
184 case GSI_ERR_TYPE_CHAN:
Amir Levy41644242016-11-03 15:38:09 +0200185 if (log->virt_idx >= gsi_ctx->max_ch) {
186 GSIERR("Unexpected ch %d\n", log->virt_idx);
187 WARN_ON(1);
188 return;
189 }
190
Amir Levycdccd632016-10-30 09:36:41 +0200191 ch = &gsi_ctx->chan[log->virt_idx];
192 chan_notify.chan_user_data = ch->props.chan_user_data;
193 chan_notify.err_desc = err & 0xFFFF;
194 if (log->code == GSI_INVALID_TRE_ERR) {
195 BUG_ON(log->ee != gsi_ctx->per.ee);
196 val = gsi_readl(gsi_ctx->base +
197 GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(log->virt_idx,
198 gsi_ctx->per.ee));
199 ch->state = (val &
200 GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
201 GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
202 GSIDBG("ch %u state updated to %u\n", log->virt_idx,
203 ch->state);
204 ch->stats.invalid_tre_error++;
205 BUG_ON(ch->state != GSI_CHAN_STATE_ERROR);
206 chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
207 } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
208 BUG_ON(log->ee != gsi_ctx->per.ee);
209 chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
210 } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
211 BUG_ON(log->ee != gsi_ctx->per.ee);
212 chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
213 complete(&ch->compl);
214 } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
215 chan_notify.evt_id =
216 GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
217 } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
218 BUG_ON(log->ee != gsi_ctx->per.ee);
219 chan_notify.evt_id =
220 GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
221 } else if (log->code == GSI_HWO_1_ERR) {
222 BUG_ON(log->ee != gsi_ctx->per.ee);
223 chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
224 } else {
225 BUG();
226 }
227 if (ch->props.err_cb)
228 ch->props.err_cb(&chan_notify);
229 else
230 WARN_ON(1);
231 break;
232 case GSI_ERR_TYPE_EVT:
Amir Levy41644242016-11-03 15:38:09 +0200233 if (log->virt_idx >= gsi_ctx->max_ev) {
234 GSIERR("Unexpected ev %d\n", log->virt_idx);
235 WARN_ON(1);
236 return;
237 }
238
Amir Levycdccd632016-10-30 09:36:41 +0200239 ev = &gsi_ctx->evtr[log->virt_idx];
240 evt_notify.user_data = ev->props.user_data;
241 evt_notify.err_desc = err & 0xFFFF;
242 if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
243 BUG_ON(log->ee != gsi_ctx->per.ee);
244 evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
245 } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
246 BUG_ON(log->ee != gsi_ctx->per.ee);
247 evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
248 complete(&ev->compl);
249 } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
250 evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
251 } else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
252 BUG_ON(log->ee != gsi_ctx->per.ee);
253 evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
254 } else {
255 BUG();
256 }
257 if (ev->props.err_cb)
258 ev->props.err_cb(&evt_notify);
259 else
260 WARN_ON(1);
261 break;
262 default:
263 WARN_ON(1);
264 }
265}
266
Skylar Changc9939cf2017-02-21 09:46:46 -0800267static void gsi_handle_gp_int1(void)
268{
269 complete(&gsi_ctx->gen_ee_cmd_compl);
270}
271
Amir Levycdccd632016-10-30 09:36:41 +0200272static void gsi_handle_glob_ee(int ee)
273{
274 uint32_t val;
275 uint32_t err;
276 struct gsi_per_notify notify;
277 uint32_t clr = ~0;
278
279 val = gsi_readl(gsi_ctx->base +
280 GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(ee));
281
282 notify.user_data = gsi_ctx->per.user_data;
283
284 if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
285 err = gsi_readl(gsi_ctx->base +
286 GSI_EE_n_ERROR_LOG_OFFS(ee));
Amir Levy41644242016-11-03 15:38:09 +0200287 if (gsi_ctx->per.ver >= GSI_VER_1_2)
288 gsi_writel(0, gsi_ctx->base +
289 GSI_EE_n_ERROR_LOG_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200290 gsi_writel(clr, gsi_ctx->base +
291 GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
292 gsi_handle_glob_err(err);
293 }
294
295 if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK) {
Skylar Changc9939cf2017-02-21 09:46:46 -0800296 gsi_handle_gp_int1();
Amir Levycdccd632016-10-30 09:36:41 +0200297 }
298
299 if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
300 notify.evt_id = GSI_PER_EVT_GLOB_GP2;
301 gsi_ctx->per.notify_cb(&notify);
302 }
303
304 if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK) {
305 notify.evt_id = GSI_PER_EVT_GLOB_GP3;
306 gsi_ctx->per.notify_cb(&notify);
307 }
308
309 gsi_writel(val, gsi_ctx->base +
310 GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(ee));
311}
312
313static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
314{
315 ctx->wp_local += ctx->elem_sz;
316 if (ctx->wp_local == ctx->end)
317 ctx->wp_local = ctx->base;
318}
319
320static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
321{
322 ctx->rp_local += ctx->elem_sz;
323 if (ctx->rp_local == ctx->end)
324 ctx->rp_local = ctx->base;
325}
326
327uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
328{
329 BUG_ON(addr < ctx->base || addr >= ctx->end);
330
331 return (uint32_t)(addr - ctx->base)/ctx->elem_sz;
332}
333
334static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
335 struct gsi_chan_xfer_notify *notify, bool callback)
336{
337 uint32_t ch_id;
338 struct gsi_chan_ctx *ch_ctx;
339 uint16_t rp_idx;
340 uint64_t rp;
341
342 ch_id = evt->chid;
Amir Levy41644242016-11-03 15:38:09 +0200343 if (ch_id >= gsi_ctx->max_ch) {
344 GSIERR("Unexpected ch %d\n", ch_id);
345 WARN_ON(1);
346 return;
347 }
348
Amir Levycdccd632016-10-30 09:36:41 +0200349 ch_ctx = &gsi_ctx->chan[ch_id];
350 BUG_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI);
351 rp = evt->xfer_ptr;
352
353 while (ch_ctx->ring.rp_local != rp) {
354 gsi_incr_ring_rp(&ch_ctx->ring);
355 ch_ctx->stats.completed++;
356 }
357
358 /* the element at RP is also processed */
359 gsi_incr_ring_rp(&ch_ctx->ring);
360 ch_ctx->stats.completed++;
361
362 ch_ctx->ring.rp = ch_ctx->ring.rp_local;
363
364 rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
365 notify->xfer_user_data = ch_ctx->user_data[rp_idx];
366 notify->chan_user_data = ch_ctx->props.chan_user_data;
367 notify->evt_id = evt->code;
368 notify->bytes_xfered = evt->len;
Skylar Chang4b98d922017-01-16 10:29:49 -0800369 if (callback) {
370 if (atomic_read(&ch_ctx->poll_mode)) {
371 GSIERR("Calling client callback in polling mode\n");
372 WARN_ON(1);
373 }
Amir Levycdccd632016-10-30 09:36:41 +0200374 ch_ctx->props.xfer_cb(notify);
Skylar Chang4b98d922017-01-16 10:29:49 -0800375 }
Amir Levycdccd632016-10-30 09:36:41 +0200376}
377
378static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
379 struct gsi_chan_xfer_notify *notify, bool callback)
380{
381 struct gsi_xfer_compl_evt *evt;
382 uint16_t idx;
383
384 idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.rp_local);
385 evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
386 idx * ctx->ring.elem_sz);
387 gsi_process_chan(evt, notify, callback);
388 gsi_incr_ring_rp(&ctx->ring);
389 /* recycle this element */
390 gsi_incr_ring_wp(&ctx->ring);
391 ctx->stats.completed++;
392}
393
394static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
395{
396 uint32_t val;
397
398 /* write order MUST be MSB followed by LSB */
399 val = ((ctx->ring.wp_local >> 32) &
400 GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
401 GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
402 gsi_writel(val, gsi_ctx->base +
403 GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id,
404 gsi_ctx->per.ee));
405
406 val = (ctx->ring.wp_local &
407 GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
408 GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
409 gsi_writel(val, gsi_ctx->base +
410 GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(ctx->id,
411 gsi_ctx->per.ee));
412}
413
414static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
415{
416 uint32_t val;
417
Skylar Chang8568eb92017-03-21 17:18:51 -0700418 /*
419 * allocate new events for this channel first
420 * before submitting the new TREs.
421 * for TO_GSI channels the event ring doorbell is rang as part of
422 * interrupt handling.
423 */
424 if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
425 gsi_ring_evt_doorbell(ctx->evtr);
426 ctx->ring.wp = ctx->ring.wp_local;
427
Amir Levycdccd632016-10-30 09:36:41 +0200428 /* write order MUST be MSB followed by LSB */
429 val = ((ctx->ring.wp_local >> 32) &
430 GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
431 GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
432 gsi_writel(val, gsi_ctx->base +
433 GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
434 gsi_ctx->per.ee));
435
436 val = (ctx->ring.wp_local &
437 GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
438 GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
439 gsi_writel(val, gsi_ctx->base +
440 GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(ctx->props.ch_id,
441 gsi_ctx->per.ee));
442}
443
444static void gsi_handle_ieob(int ee)
445{
446 uint32_t ch;
447 int i;
448 uint64_t rp;
449 struct gsi_evt_ctx *ctx;
450 struct gsi_chan_xfer_notify notify;
451 unsigned long flags;
452 unsigned long cntr;
453 uint32_t msk;
454
455 ch = gsi_readl(gsi_ctx->base +
456 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee));
457 msk = gsi_readl(gsi_ctx->base +
458 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800459 gsi_writel(ch & msk, gsi_ctx->base +
460 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200461
462 for (i = 0; i < 32; i++) {
463 if ((1 << i) & ch & msk) {
Skylar Changcfd46c12016-11-18 10:56:51 -0800464 if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
465 GSIERR("invalid event %d\n", i);
466 break;
467 }
Amir Levycdccd632016-10-30 09:36:41 +0200468 ctx = &gsi_ctx->evtr[i];
Amir Levy2c951a92017-03-09 09:09:33 +0200469
470 /*
471 * Don't handle MSI interrupts, only handle IEOB
472 * IRQs
473 */
474 if (ctx->props.intr == GSI_INTR_MSI)
475 continue;
476
Amir Levycdccd632016-10-30 09:36:41 +0200477 BUG_ON(ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV);
478 spin_lock_irqsave(&ctx->ring.slock, flags);
479check_again:
480 cntr = 0;
481 rp = gsi_readl(gsi_ctx->base +
482 GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
Skylar Chang8568eb92017-03-21 17:18:51 -0700483 rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
484
Amir Levycdccd632016-10-30 09:36:41 +0200485 ctx->ring.rp = rp;
486 while (ctx->ring.rp_local != rp) {
487 ++cntr;
Amir Levycdccd632016-10-30 09:36:41 +0200488 if (ctx->props.exclusive &&
489 atomic_read(&ctx->chan->poll_mode)) {
490 cntr = 0;
491 break;
492 }
Skylar Chang4b98d922017-01-16 10:29:49 -0800493 gsi_process_evt_re(ctx, &notify, true);
Amir Levycdccd632016-10-30 09:36:41 +0200494 }
495 gsi_ring_evt_doorbell(ctx);
496 if (cntr != 0)
497 goto check_again;
498 spin_unlock_irqrestore(&ctx->ring.slock, flags);
499 }
500 }
Amir Levycdccd632016-10-30 09:36:41 +0200501}
502
503static void gsi_handle_inter_ee_ch_ctrl(int ee)
504{
505 uint32_t ch;
506 int i;
507
508 ch = gsi_readl(gsi_ctx->base +
509 GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800510 gsi_writel(ch, gsi_ctx->base +
511 GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200512 for (i = 0; i < 32; i++) {
513 if ((1 << i) & ch) {
514 /* not currently expected */
515 GSIERR("ch %u was inter-EE changed\n", i);
516 }
517 }
Amir Levycdccd632016-10-30 09:36:41 +0200518}
519
520static void gsi_handle_inter_ee_ev_ctrl(int ee)
521{
522 uint32_t ch;
523 int i;
524
525 ch = gsi_readl(gsi_ctx->base +
526 GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800527 gsi_writel(ch, gsi_ctx->base +
528 GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200529 for (i = 0; i < 32; i++) {
530 if ((1 << i) & ch) {
531 /* not currently expected */
532 GSIERR("evt %u was inter-EE changed\n", i);
533 }
534 }
Amir Levycdccd632016-10-30 09:36:41 +0200535}
536
537static void gsi_handle_general(int ee)
538{
539 uint32_t val;
540 struct gsi_per_notify notify;
541
542 val = gsi_readl(gsi_ctx->base +
543 GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(ee));
544
545 notify.user_data = gsi_ctx->per.user_data;
546
547 if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK)
548 notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
549
550 if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK)
551 notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
552
553 if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK)
554 notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
555
556 if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK)
557 notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
558
559 if (gsi_ctx->per.notify_cb)
560 gsi_ctx->per.notify_cb(&notify);
561
562 gsi_writel(val, gsi_ctx->base +
563 GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(ee));
564}
565
566#define GSI_ISR_MAX_ITER 50
567
568static void gsi_handle_irq(void)
569{
570 uint32_t type;
571 int ee = gsi_ctx->per.ee;
572 unsigned long cnt = 0;
573
574 while (1) {
575 type = gsi_readl(gsi_ctx->base +
576 GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee));
577
578 if (!type)
579 break;
580
Skylar Chang4d6a8fe2017-03-16 16:55:57 -0700581 GSIDBG_LOW("type %x\n", type);
Amir Levycdccd632016-10-30 09:36:41 +0200582
583 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
584 gsi_handle_ch_ctrl(ee);
585
586 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK)
587 gsi_handle_ev_ctrl(ee);
588
589 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK)
590 gsi_handle_glob_ee(ee);
591
592 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK)
593 gsi_handle_ieob(ee);
594
595 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK)
596 gsi_handle_inter_ee_ch_ctrl(ee);
597
598 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK)
599 gsi_handle_inter_ee_ev_ctrl(ee);
600
601 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
602 gsi_handle_general(ee);
603
604 if (++cnt > GSI_ISR_MAX_ITER)
605 BUG();
606 }
607}
608
609static irqreturn_t gsi_isr(int irq, void *ctxt)
610{
611 BUG_ON(ctxt != gsi_ctx);
612
613 if (gsi_ctx->per.req_clk_cb) {
614 bool granted = false;
615
616 gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
617 if (granted) {
618 gsi_handle_irq();
619 gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
620 }
621 } else {
622 gsi_handle_irq();
623 }
624
625 return IRQ_HANDLED;
626}
627
Amir Levy41644242016-11-03 15:38:09 +0200628static uint32_t gsi_get_max_channels(enum gsi_ver ver)
629{
630 uint32_t reg;
631
632 switch (ver) {
633 case GSI_VER_1_0:
634 reg = gsi_readl(gsi_ctx->base +
635 GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
636 reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
637 GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
638 break;
639 case GSI_VER_1_2:
640 reg = gsi_readl(gsi_ctx->base +
641 GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
642 reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
643 GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
644 break;
645 case GSI_VER_1_3:
646 reg = gsi_readl(gsi_ctx->base +
647 GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
648 reg = (reg &
649 GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
650 GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
651 break;
Michael Adisumarta8522e212017-05-15 11:59:42 -0700652 case GSI_VER_2_0:
653 reg = gsi_readl(gsi_ctx->base +
654 GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
655 reg = (reg &
656 GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
657 GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
658 break;
Amir Levy41644242016-11-03 15:38:09 +0200659 default:
660 GSIERR("bad gsi version %d\n", ver);
661 WARN_ON(1);
662 reg = 0;
663 }
664
665 GSIDBG("max channels %d\n", reg);
666
667 return reg;
668}
669
670static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
671{
672 uint32_t reg;
673
674 switch (ver) {
675 case GSI_VER_1_0:
676 reg = gsi_readl(gsi_ctx->base +
677 GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
678 reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
679 GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
680 break;
681 case GSI_VER_1_2:
682 reg = gsi_readl(gsi_ctx->base +
683 GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
684 reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
685 GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
686 break;
687 case GSI_VER_1_3:
688 reg = gsi_readl(gsi_ctx->base +
689 GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
690 reg = (reg &
691 GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
692 GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
693 break;
Michael Adisumarta8522e212017-05-15 11:59:42 -0700694 case GSI_VER_2_0:
695 reg = gsi_readl(gsi_ctx->base +
696 GSI_V2_0_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
697 reg = (reg &
698 GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
699 GSI_V2_0_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
700 break;
Amir Levy41644242016-11-03 15:38:09 +0200701 default:
702 GSIERR("bad gsi version %d\n", ver);
703 WARN_ON(1);
704 reg = 0;
705 }
706
707 GSIDBG("max event rings %d\n", reg);
708
709 return reg;
710}
Amir Levycdccd632016-10-30 09:36:41 +0200711int gsi_complete_clk_grant(unsigned long dev_hdl)
712{
713 unsigned long flags;
714
715 if (!gsi_ctx) {
716 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
717 return -GSI_STATUS_NODEV;
718 }
719
720 if (!gsi_ctx->per_registered) {
721 GSIERR("no client registered\n");
722 return -GSI_STATUS_INVALID_PARAMS;
723 }
724
725 if (dev_hdl != (uintptr_t)gsi_ctx) {
726 GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%p\n", dev_hdl,
727 gsi_ctx);
728 return -GSI_STATUS_INVALID_PARAMS;
729 }
730
731 spin_lock_irqsave(&gsi_ctx->slock, flags);
732 gsi_handle_irq();
733 gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
734 spin_unlock_irqrestore(&gsi_ctx->slock, flags);
735
736 return GSI_STATUS_SUCCESS;
737}
738EXPORT_SYMBOL(gsi_complete_clk_grant);
739
740int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
741{
742 int res;
743 uint32_t val;
744
745 if (!gsi_ctx) {
746 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
747 return -GSI_STATUS_NODEV;
748 }
749
750 if (!props || !dev_hdl) {
751 GSIERR("bad params props=%p dev_hdl=%p\n", props, dev_hdl);
752 return -GSI_STATUS_INVALID_PARAMS;
753 }
754
Amir Levy41644242016-11-03 15:38:09 +0200755 if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
756 GSIERR("bad params gsi_ver=%d\n", props->ver);
757 return -GSI_STATUS_INVALID_PARAMS;
758 }
759
Amir Levycdccd632016-10-30 09:36:41 +0200760 if (!props->notify_cb) {
761 GSIERR("notify callback must be provided\n");
762 return -GSI_STATUS_INVALID_PARAMS;
763 }
764
765 if (props->req_clk_cb && !props->rel_clk_cb) {
766 GSIERR("rel callback must be provided\n");
767 return -GSI_STATUS_INVALID_PARAMS;
768 }
769
770 if (gsi_ctx->per_registered) {
771 GSIERR("per already registered\n");
772 return -GSI_STATUS_UNSUPPORTED_OP;
773 }
774
775 spin_lock_init(&gsi_ctx->slock);
776 if (props->intr == GSI_INTR_IRQ) {
777 if (!props->irq) {
778 GSIERR("bad irq specified %u\n", props->irq);
779 return -GSI_STATUS_INVALID_PARAMS;
780 }
781
782 res = devm_request_irq(gsi_ctx->dev, props->irq,
783 (irq_handler_t) gsi_isr,
784 props->req_clk_cb ? IRQF_TRIGGER_RISING :
785 IRQF_TRIGGER_HIGH,
786 "gsi",
787 gsi_ctx);
788 if (res) {
789 GSIERR("failed to register isr for %u\n", props->irq);
790 return -GSI_STATUS_ERROR;
791 }
792
793 res = enable_irq_wake(props->irq);
794 if (res)
795 GSIERR("failed to enable wake irq %u\n", props->irq);
796 else
797 GSIERR("GSI irq is wake enabled %u\n", props->irq);
798
799 } else {
800 GSIERR("do not support interrupt type %u\n", props->intr);
801 return -GSI_STATUS_UNSUPPORTED_OP;
802 }
803
804 gsi_ctx->base = devm_ioremap_nocache(gsi_ctx->dev, props->phys_addr,
805 props->size);
806 if (!gsi_ctx->base) {
807 GSIERR("failed to remap GSI HW\n");
808 devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
809 return -GSI_STATUS_RES_ALLOC_FAILURE;
810 }
811
812 gsi_ctx->per = *props;
813 gsi_ctx->per_registered = true;
814 mutex_init(&gsi_ctx->mlock);
815 atomic_set(&gsi_ctx->num_chan, 0);
816 atomic_set(&gsi_ctx->num_evt_ring, 0);
Amir Levy41644242016-11-03 15:38:09 +0200817 gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
818 if (gsi_ctx->max_ch == 0) {
819 devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
820 devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
821 GSIERR("failed to get max channels\n");
822 return -GSI_STATUS_ERROR;
823 }
824 gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
825 if (gsi_ctx->max_ev == 0) {
826 devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
827 devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
828 GSIERR("failed to get max event rings\n");
829 return -GSI_STATUS_ERROR;
830 }
831
832 /* bitmap is max events excludes reserved events */
833 gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
834 gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^
835 ((1 << GSI_MHI_ER_START) - 1);
Amir Levycdccd632016-10-30 09:36:41 +0200836
837 /*
838 * enable all interrupts but GSI_BREAK_POINT.
839 * Inter EE commands / interrupt are no supported.
840 */
841 __gsi_config_type_irq(props->ee, ~0, ~0);
842 __gsi_config_ch_irq(props->ee, ~0, ~0);
843 __gsi_config_evt_irq(props->ee, ~0, ~0);
844 __gsi_config_ieob_irq(props->ee, ~0, ~0);
845 __gsi_config_glob_irq(props->ee, ~0, ~0);
846 __gsi_config_gen_irq(props->ee, ~0,
847 ~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK);
848
849 gsi_writel(props->intr, gsi_ctx->base +
850 GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
851
852 val = gsi_readl(gsi_ctx->base +
853 GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
854 if (val & GSI_EE_n_GSI_STATUS_ENABLED_BMSK)
855 gsi_ctx->enabled = true;
856 else
857 GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
858
Amir Levy41644242016-11-03 15:38:09 +0200859 if (gsi_ctx->per.ver >= GSI_VER_1_2)
860 gsi_writel(0, gsi_ctx->base +
861 GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
862
Amir Levycdccd632016-10-30 09:36:41 +0200863 *dev_hdl = (uintptr_t)gsi_ctx;
864
865 return GSI_STATUS_SUCCESS;
866}
867EXPORT_SYMBOL(gsi_register_device);
868
869int gsi_write_device_scratch(unsigned long dev_hdl,
870 struct gsi_device_scratch *val)
871{
872 if (!gsi_ctx) {
873 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
874 return -GSI_STATUS_NODEV;
875 }
876
877 if (!gsi_ctx->per_registered) {
878 GSIERR("no client registered\n");
879 return -GSI_STATUS_INVALID_PARAMS;
880 }
881
882 if (dev_hdl != (uintptr_t)gsi_ctx) {
883 GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%p\n", dev_hdl,
884 gsi_ctx);
885 return -GSI_STATUS_INVALID_PARAMS;
886 }
887
888 if (val->max_usb_pkt_size_valid &&
889 val->max_usb_pkt_size != 1024 &&
890 val->max_usb_pkt_size != 512) {
891 GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
892 val->max_usb_pkt_size);
893 return -GSI_STATUS_INVALID_PARAMS;
894 }
895
896 mutex_lock(&gsi_ctx->mlock);
897 if (val->mhi_base_chan_idx_valid)
898 gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
899 val->mhi_base_chan_idx;
900 if (val->max_usb_pkt_size_valid)
901 gsi_ctx->scratch.word0.s.max_usb_pkt_size =
902 (val->max_usb_pkt_size == 1024) ? 1 : 0;
903 gsi_writel(gsi_ctx->scratch.word0.val,
904 gsi_ctx->base +
905 GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
906 mutex_unlock(&gsi_ctx->mlock);
907
908 return GSI_STATUS_SUCCESS;
909}
910EXPORT_SYMBOL(gsi_write_device_scratch);
911
912int gsi_deregister_device(unsigned long dev_hdl, bool force)
913{
914 if (!gsi_ctx) {
915 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
916 return -GSI_STATUS_NODEV;
917 }
918
919 if (!gsi_ctx->per_registered) {
920 GSIERR("no client registered\n");
921 return -GSI_STATUS_INVALID_PARAMS;
922 }
923
924 if (dev_hdl != (uintptr_t)gsi_ctx) {
925 GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%p\n", dev_hdl,
926 gsi_ctx);
927 return -GSI_STATUS_INVALID_PARAMS;
928 }
929
930 if (!force && atomic_read(&gsi_ctx->num_chan)) {
931 GSIERR("%u channels are allocated\n",
932 atomic_read(&gsi_ctx->num_chan));
933 return -GSI_STATUS_UNSUPPORTED_OP;
934 }
935
936 if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
937 GSIERR("%u evt rings are allocated\n",
938 atomic_read(&gsi_ctx->num_evt_ring));
939 return -GSI_STATUS_UNSUPPORTED_OP;
940 }
941
942 /* disable all interrupts */
943 __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
944 __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
945 __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
946 __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
947 __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
948 __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
949
950 devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
951 devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
952 memset(gsi_ctx, 0, sizeof(*gsi_ctx));
953
954 return GSI_STATUS_SUCCESS;
955}
956EXPORT_SYMBOL(gsi_deregister_device);
957
958static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
959 uint8_t evt_id, unsigned int ee)
960{
961 uint32_t val;
962
963 GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
964 props->re_size);
965
966 val = (((props->intf << GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT) &
967 GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK) |
968 ((props->intr << GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT) &
969 GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK) |
970 ((props->re_size << GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
971 & GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
972
973 gsi_writel(val, gsi_ctx->base +
974 GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(evt_id, ee));
975
976 val = (props->ring_len & GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK) <<
977 GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
978 gsi_writel(val, gsi_ctx->base +
979 GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
980
981 val = (props->ring_base_addr &
982 GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
983 GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
984 gsi_writel(val, gsi_ctx->base +
985 GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(evt_id, ee));
986
987 val = ((props->ring_base_addr >> 32) &
988 GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
989 GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
990 gsi_writel(val, gsi_ctx->base +
991 GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(evt_id, ee));
992
993 val = (((props->int_modt << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT) &
994 GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK) |
995 ((props->int_modc << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT) &
996 GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK));
997 gsi_writel(val, gsi_ctx->base +
998 GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(evt_id, ee));
999
1000 val = (props->intvec & GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK) <<
1001 GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT;
1002 gsi_writel(val, gsi_ctx->base +
1003 GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(evt_id, ee));
1004
1005 val = (props->msi_addr & GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK) <<
1006 GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT;
1007 gsi_writel(val, gsi_ctx->base +
1008 GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_id, ee));
1009
1010 val = ((props->msi_addr >> 32) &
1011 GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK) <<
1012 GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT;
1013 gsi_writel(val, gsi_ctx->base +
1014 GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_id, ee));
1015
1016 val = (props->rp_update_addr &
1017 GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK) <<
1018 GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT;
1019 gsi_writel(val, gsi_ctx->base +
1020 GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_id, ee));
1021
1022 val = ((props->rp_update_addr >> 32) &
1023 GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK) <<
1024 GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT;
1025 gsi_writel(val, gsi_ctx->base +
1026 GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_id, ee));
1027}
1028
1029static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
1030 struct gsi_ring_ctx *ctx)
1031{
1032 ctx->base_va = (uintptr_t)props->ring_base_vaddr;
1033 ctx->base = props->ring_base_addr;
1034 ctx->wp = ctx->base;
1035 ctx->rp = ctx->base;
1036 ctx->wp_local = ctx->base;
1037 ctx->rp_local = ctx->base;
1038 ctx->len = props->ring_len;
1039 ctx->elem_sz = props->re_size;
1040 ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
1041 ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
1042}
1043
1044static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
1045{
1046 unsigned long flags;
1047
1048 spin_lock_irqsave(&ctx->ring.slock, flags);
1049 memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
1050 ctx->ring.wp_local = ctx->ring.base +
1051 ctx->ring.max_num_elem * ctx->ring.elem_sz;
1052 gsi_ring_evt_doorbell(ctx);
1053 spin_unlock_irqrestore(&ctx->ring.slock, flags);
1054}
1055
1056static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
1057{
1058 uint64_t ra;
1059
1060 if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
1061 props->ring_len % 4) ||
1062 (props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
1063 props->ring_len % 16)) {
1064 GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
1065 props->ring_len, props->re_size);
1066 return -GSI_STATUS_INVALID_PARAMS;
1067 }
1068
1069 ra = props->ring_base_addr;
1070 do_div(ra, roundup_pow_of_two(props->ring_len));
1071
1072 if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
1073 GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
1074 props->ring_base_addr,
1075 roundup_pow_of_two(props->ring_len));
1076 return -GSI_STATUS_INVALID_PARAMS;
1077 }
1078
1079 if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
1080 !props->ring_base_vaddr) {
1081 GSIERR("protocol %u requires ring base VA\n", props->intf);
1082 return -GSI_STATUS_INVALID_PARAMS;
1083 }
1084
1085 if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
1086 (!props->evchid_valid ||
1087 props->evchid > GSI_MHI_ER_END ||
1088 props->evchid < GSI_MHI_ER_START)) {
1089 GSIERR("MHI requires evchid valid=%d val=%u\n",
1090 props->evchid_valid, props->evchid);
1091 return -GSI_STATUS_INVALID_PARAMS;
1092 }
1093
1094 if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
1095 props->evchid_valid) {
1096 GSIERR("protocol %u cannot specify evchid\n", props->intf);
1097 return -GSI_STATUS_INVALID_PARAMS;
1098 }
1099
1100 if (!props->err_cb) {
1101 GSIERR("err callback must be provided\n");
1102 return -GSI_STATUS_INVALID_PARAMS;
1103 }
1104
1105 return GSI_STATUS_SUCCESS;
1106}
1107
1108int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
1109 unsigned long *evt_ring_hdl)
1110{
1111 unsigned long evt_id;
1112 enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
1113 uint32_t val;
1114 struct gsi_evt_ctx *ctx;
1115 int res;
1116 int ee = gsi_ctx->per.ee;
1117 unsigned long flags;
1118
1119 if (!gsi_ctx) {
1120 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1121 return -GSI_STATUS_NODEV;
1122 }
1123
1124 if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
1125 GSIERR("bad params props=%p dev_hdl=0x%lx evt_ring_hdl=%p\n",
1126 props, dev_hdl, evt_ring_hdl);
1127 return -GSI_STATUS_INVALID_PARAMS;
1128 }
1129
1130 if (gsi_validate_evt_ring_props(props)) {
1131 GSIERR("invalid params\n");
1132 return -GSI_STATUS_INVALID_PARAMS;
1133 }
1134
1135 if (!props->evchid_valid) {
1136 mutex_lock(&gsi_ctx->mlock);
1137 evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
1138 sizeof(unsigned long) * BITS_PER_BYTE);
1139 if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
1140 GSIERR("failed to alloc event ID\n");
1141 mutex_unlock(&gsi_ctx->mlock);
1142 return -GSI_STATUS_RES_ALLOC_FAILURE;
1143 }
1144 set_bit(evt_id, &gsi_ctx->evt_bmap);
1145 mutex_unlock(&gsi_ctx->mlock);
1146 } else {
1147 evt_id = props->evchid;
1148 }
1149 GSIDBG("Using %lu as virt evt id\n", evt_id);
1150
1151 ctx = &gsi_ctx->evtr[evt_id];
1152 memset(ctx, 0, sizeof(*ctx));
1153 mutex_init(&ctx->mlock);
1154 init_completion(&ctx->compl);
1155 atomic_set(&ctx->chan_ref_cnt, 0);
1156 ctx->props = *props;
1157
1158 mutex_lock(&gsi_ctx->mlock);
1159 val = (((evt_id << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
1160 GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
1161 ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
1162 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
1163 gsi_writel(val, gsi_ctx->base +
1164 GSI_EE_n_EV_CH_CMD_OFFS(ee));
1165 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1166 if (res == 0) {
1167 GSIERR("evt_id=%lu timed out\n", evt_id);
1168 if (!props->evchid_valid)
1169 clear_bit(evt_id, &gsi_ctx->evt_bmap);
1170 mutex_unlock(&gsi_ctx->mlock);
1171 return -GSI_STATUS_TIMED_OUT;
1172 }
1173
1174 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1175 GSIERR("evt_id=%lu allocation failed state=%u\n",
1176 evt_id, ctx->state);
1177 if (!props->evchid_valid)
1178 clear_bit(evt_id, &gsi_ctx->evt_bmap);
1179 mutex_unlock(&gsi_ctx->mlock);
1180 return -GSI_STATUS_RES_ALLOC_FAILURE;
1181 }
1182
1183 gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
1184
1185 spin_lock_init(&ctx->ring.slock);
1186 gsi_init_evt_ring(props, &ctx->ring);
1187
1188 ctx->id = evt_id;
1189 *evt_ring_hdl = evt_id;
1190 atomic_inc(&gsi_ctx->num_evt_ring);
1191 if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
1192 gsi_prime_evt_ring(ctx);
1193 mutex_unlock(&gsi_ctx->mlock);
1194
1195 spin_lock_irqsave(&gsi_ctx->slock, flags);
1196 gsi_writel(1 << evt_id, gsi_ctx->base +
1197 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
Amir Levy2c951a92017-03-09 09:09:33 +02001198
1199 /* enable ieob interrupts for GPI, enable MSI interrupts */
1200 if ((props->intf != GSI_EVT_CHTYPE_GPI_EV) &&
1201 (props->intr != GSI_INTR_MSI))
Amir Levycdccd632016-10-30 09:36:41 +02001202 __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
1203 else
1204 __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
1205 spin_unlock_irqrestore(&gsi_ctx->slock, flags);
1206
1207 return GSI_STATUS_SUCCESS;
1208}
1209EXPORT_SYMBOL(gsi_alloc_evt_ring);
1210
1211static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
1212 union __packed gsi_evt_scratch val)
1213{
1214 gsi_writel(val.data.word1, gsi_ctx->base +
1215 GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(evt_ring_hdl,
1216 gsi_ctx->per.ee));
1217 gsi_writel(val.data.word2, gsi_ctx->base +
1218 GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(evt_ring_hdl,
1219 gsi_ctx->per.ee));
1220}
1221
1222int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
1223 union __packed gsi_evt_scratch val)
1224{
1225 struct gsi_evt_ctx *ctx;
1226
1227 if (!gsi_ctx) {
1228 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1229 return -GSI_STATUS_NODEV;
1230 }
1231
Amir Levy41644242016-11-03 15:38:09 +02001232 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001233 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1234 return -GSI_STATUS_INVALID_PARAMS;
1235 }
1236
1237 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1238
1239 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1240 GSIERR("bad state %d\n",
1241 gsi_ctx->evtr[evt_ring_hdl].state);
1242 return -GSI_STATUS_UNSUPPORTED_OP;
1243 }
1244
1245 mutex_lock(&ctx->mlock);
1246 ctx->scratch = val;
1247 __gsi_write_evt_ring_scratch(evt_ring_hdl, val);
1248 mutex_unlock(&ctx->mlock);
1249
1250 return GSI_STATUS_SUCCESS;
1251}
1252EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
1253
1254int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
1255{
1256 uint32_t val;
1257 enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
1258 struct gsi_evt_ctx *ctx;
1259 int res;
1260
1261 if (!gsi_ctx) {
1262 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1263 return -GSI_STATUS_NODEV;
1264 }
1265
Amir Levy41644242016-11-03 15:38:09 +02001266 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001267 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1268 return -GSI_STATUS_INVALID_PARAMS;
1269 }
1270
1271 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1272
1273 if (atomic_read(&ctx->chan_ref_cnt)) {
1274 GSIERR("%d channels still using this event ring\n",
1275 atomic_read(&ctx->chan_ref_cnt));
1276 return -GSI_STATUS_UNSUPPORTED_OP;
1277 }
1278
1279 /* TODO: add check for ERROR state */
1280 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1281 GSIERR("bad state %d\n", ctx->state);
1282 return -GSI_STATUS_UNSUPPORTED_OP;
1283 }
1284
1285 mutex_lock(&gsi_ctx->mlock);
Skylar Chang0b9a8c22017-02-06 13:29:35 -08001286 reinit_completion(&ctx->compl);
Amir Levycdccd632016-10-30 09:36:41 +02001287 val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
1288 GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
1289 ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
1290 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
1291 gsi_writel(val, gsi_ctx->base +
1292 GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
1293 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1294 if (res == 0) {
1295 GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
1296 mutex_unlock(&gsi_ctx->mlock);
1297 return -GSI_STATUS_TIMED_OUT;
1298 }
1299
1300 if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
1301 GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
1302 ctx->state);
1303 BUG();
1304 }
1305 mutex_unlock(&gsi_ctx->mlock);
1306
1307 if (!ctx->props.evchid_valid) {
1308 mutex_lock(&gsi_ctx->mlock);
1309 clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
1310 mutex_unlock(&gsi_ctx->mlock);
1311 }
1312 atomic_dec(&gsi_ctx->num_evt_ring);
1313
1314 return GSI_STATUS_SUCCESS;
1315}
1316EXPORT_SYMBOL(gsi_dealloc_evt_ring);
1317
1318int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
1319 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1320{
1321 struct gsi_evt_ctx *ctx;
1322
1323 if (!gsi_ctx) {
1324 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1325 return -GSI_STATUS_NODEV;
1326 }
1327
1328 if (!db_addr_wp_msb || !db_addr_wp_lsb) {
1329 GSIERR("bad params msb=%p lsb=%p\n", db_addr_wp_msb,
1330 db_addr_wp_lsb);
1331 return -GSI_STATUS_INVALID_PARAMS;
1332 }
1333
Amir Levy41644242016-11-03 15:38:09 +02001334 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001335 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1336 return -GSI_STATUS_INVALID_PARAMS;
1337 }
1338
1339 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1340
1341 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1342 GSIERR("bad state %d\n",
1343 gsi_ctx->evtr[evt_ring_hdl].state);
1344 return -GSI_STATUS_UNSUPPORTED_OP;
1345 }
1346
1347 *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
1348 GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
1349 *db_addr_wp_msb = gsi_ctx->per.phys_addr +
1350 GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
1351
1352 return GSI_STATUS_SUCCESS;
1353}
1354EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
1355
Ghanim Fodia4fc49b2017-06-20 10:35:20 +03001356int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
1357{
1358 struct gsi_evt_ctx *ctx;
1359
1360 if (!gsi_ctx) {
1361 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1362 return -GSI_STATUS_NODEV;
1363 }
1364
1365 if (evt_ring_hdl >= gsi_ctx->max_ev) {
1366 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1367 return -GSI_STATUS_INVALID_PARAMS;
1368 }
1369
1370 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1371
1372 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1373 GSIERR("bad state %d\n",
1374 gsi_ctx->evtr[evt_ring_hdl].state);
1375 return -GSI_STATUS_UNSUPPORTED_OP;
1376 }
1377
1378 ctx->ring.wp_local = value;
1379 gsi_ring_evt_doorbell(ctx);
1380
1381 return GSI_STATUS_SUCCESS;
1382}
1383EXPORT_SYMBOL(gsi_ring_evt_ring_db);
1384
Amir Levycdccd632016-10-30 09:36:41 +02001385int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
1386{
1387 uint32_t val;
1388 enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
1389 struct gsi_evt_ctx *ctx;
1390 int res;
1391
1392 if (!gsi_ctx) {
1393 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1394 return -GSI_STATUS_NODEV;
1395 }
1396
Amir Levy41644242016-11-03 15:38:09 +02001397 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001398 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1399 return -GSI_STATUS_INVALID_PARAMS;
1400 }
1401
1402 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1403
1404 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1405 GSIERR("bad state %d\n", ctx->state);
1406 return -GSI_STATUS_UNSUPPORTED_OP;
1407 }
1408
1409 mutex_lock(&gsi_ctx->mlock);
Skylar Chang0b9a8c22017-02-06 13:29:35 -08001410 reinit_completion(&ctx->compl);
Amir Levycdccd632016-10-30 09:36:41 +02001411 val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
1412 GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
1413 ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
1414 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
1415 gsi_writel(val, gsi_ctx->base +
1416 GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
1417 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1418 if (res == 0) {
1419 GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
1420 mutex_unlock(&gsi_ctx->mlock);
1421 return -GSI_STATUS_TIMED_OUT;
1422 }
1423
1424 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1425 GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
1426 ctx->state);
1427 BUG();
1428 }
1429
1430 gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
1431 gsi_init_evt_ring(&ctx->props, &ctx->ring);
1432
1433 /* restore scratch */
1434 __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
1435
1436 if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
1437 gsi_prime_evt_ring(ctx);
1438 mutex_unlock(&gsi_ctx->mlock);
1439
1440 return GSI_STATUS_SUCCESS;
1441}
1442EXPORT_SYMBOL(gsi_reset_evt_ring);
1443
1444int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
1445 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1446{
1447 struct gsi_evt_ctx *ctx;
1448
1449 if (!gsi_ctx) {
1450 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1451 return -GSI_STATUS_NODEV;
1452 }
1453
1454 if (!props || !scr) {
1455 GSIERR("bad params props=%p scr=%p\n", props, scr);
1456 return -GSI_STATUS_INVALID_PARAMS;
1457 }
1458
Amir Levy41644242016-11-03 15:38:09 +02001459 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001460 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1461 return -GSI_STATUS_INVALID_PARAMS;
1462 }
1463
1464 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1465
1466 if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
1467 GSIERR("bad state %d\n", ctx->state);
1468 return -GSI_STATUS_UNSUPPORTED_OP;
1469 }
1470
1471 mutex_lock(&ctx->mlock);
1472 *props = ctx->props;
1473 *scr = ctx->scratch;
1474 mutex_unlock(&ctx->mlock);
1475
1476 return GSI_STATUS_SUCCESS;
1477}
1478EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
1479
1480int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
1481 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1482{
1483 struct gsi_evt_ctx *ctx;
1484
1485 if (!gsi_ctx) {
1486 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1487 return -GSI_STATUS_NODEV;
1488 }
1489
1490 if (!props || gsi_validate_evt_ring_props(props)) {
1491 GSIERR("bad params props=%p\n", props);
1492 return -GSI_STATUS_INVALID_PARAMS;
1493 }
1494
Amir Levy41644242016-11-03 15:38:09 +02001495 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001496 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1497 return -GSI_STATUS_INVALID_PARAMS;
1498 }
1499
1500 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1501
1502 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1503 GSIERR("bad state %d\n", ctx->state);
1504 return -GSI_STATUS_UNSUPPORTED_OP;
1505 }
1506
1507 if (ctx->props.exclusive != props->exclusive) {
1508 GSIERR("changing immutable fields not supported\n");
1509 return -GSI_STATUS_UNSUPPORTED_OP;
1510 }
1511
1512 mutex_lock(&ctx->mlock);
1513 ctx->props = *props;
1514 if (scr)
1515 ctx->scratch = *scr;
1516 mutex_unlock(&ctx->mlock);
1517
1518 return gsi_reset_evt_ring(evt_ring_hdl);
1519}
1520EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
1521
1522static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
1523 uint8_t erindex)
1524{
1525 uint32_t val;
1526
1527 val = (((props->prot << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT)
1528 & GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK) |
1529 ((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) &
1530 GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) |
1531 ((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) &
1532 GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) |
1533 ((props->re_size << GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
1534 & GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
1535 gsi_writel(val, gsi_ctx->base +
1536 GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(props->ch_id, ee));
1537
1538 val = (props->ring_len & GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK) <<
1539 GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
1540 gsi_writel(val, gsi_ctx->base +
1541 GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(props->ch_id, ee));
1542
1543 val = (props->ring_base_addr &
1544 GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
1545 GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
1546 gsi_writel(val, gsi_ctx->base +
1547 GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(props->ch_id, ee));
1548
1549 val = ((props->ring_base_addr >> 32) &
1550 GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
1551 GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
1552 gsi_writel(val, gsi_ctx->base +
1553 GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee));
1554
1555 val = (((props->low_weight << GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
1556 GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
1557 ((props->max_prefetch <<
1558 GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
1559 GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
1560 ((props->use_db_eng << GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
1561 GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK));
1562 gsi_writel(val, gsi_ctx->base +
1563 GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
1564}
1565
1566static void gsi_init_chan_ring(struct gsi_chan_props *props,
1567 struct gsi_ring_ctx *ctx)
1568{
1569 ctx->base_va = (uintptr_t)props->ring_base_vaddr;
1570 ctx->base = props->ring_base_addr;
1571 ctx->wp = ctx->base;
1572 ctx->rp = ctx->base;
1573 ctx->wp_local = ctx->base;
1574 ctx->rp_local = ctx->base;
1575 ctx->len = props->ring_len;
1576 ctx->elem_sz = props->re_size;
1577 ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
1578 ctx->end = ctx->base + (ctx->max_num_elem + 1) *
1579 ctx->elem_sz;
1580}
1581
1582static int gsi_validate_channel_props(struct gsi_chan_props *props)
1583{
1584 uint64_t ra;
Skylar Chang8568eb92017-03-21 17:18:51 -07001585 uint64_t last;
Amir Levycdccd632016-10-30 09:36:41 +02001586
Amir Levy41644242016-11-03 15:38:09 +02001587 if (props->ch_id >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001588 GSIERR("ch_id %u invalid\n", props->ch_id);
1589 return -GSI_STATUS_INVALID_PARAMS;
1590 }
1591
1592 if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
1593 props->ring_len % 4) ||
1594 (props->re_size == GSI_CHAN_RE_SIZE_16B &&
1595 props->ring_len % 16) ||
1596 (props->re_size == GSI_CHAN_RE_SIZE_32B &&
1597 props->ring_len % 32)) {
1598 GSIERR("bad params ring_len %u not a multiple of re size %u\n",
1599 props->ring_len, props->re_size);
1600 return -GSI_STATUS_INVALID_PARAMS;
1601 }
1602
1603 ra = props->ring_base_addr;
1604 do_div(ra, roundup_pow_of_two(props->ring_len));
1605
1606 if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
1607 GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
1608 props->ring_base_addr,
1609 roundup_pow_of_two(props->ring_len));
1610 return -GSI_STATUS_INVALID_PARAMS;
1611 }
1612
Skylar Chang8568eb92017-03-21 17:18:51 -07001613 last = props->ring_base_addr + props->ring_len - props->re_size;
1614
1615 /* MSB should stay same within the ring */
1616 if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
1617 (last & 0xFFFFFFFF00000000ULL)) {
1618 GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
1619 props->ring_base_addr,
1620 props->ring_len);
1621 return -GSI_STATUS_INVALID_PARAMS;
1622 }
1623
Amir Levycdccd632016-10-30 09:36:41 +02001624 if (props->prot == GSI_CHAN_PROT_GPI &&
1625 !props->ring_base_vaddr) {
1626 GSIERR("protocol %u requires ring base VA\n", props->prot);
1627 return -GSI_STATUS_INVALID_PARAMS;
1628 }
1629
1630 if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
1631 GSIERR("invalid channel low weight %u\n", props->low_weight);
1632 return -GSI_STATUS_INVALID_PARAMS;
1633 }
1634
1635 if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
1636 GSIERR("xfer callback must be provided\n");
1637 return -GSI_STATUS_INVALID_PARAMS;
1638 }
1639
1640 if (!props->err_cb) {
1641 GSIERR("err callback must be provided\n");
1642 return -GSI_STATUS_INVALID_PARAMS;
1643 }
1644
1645 return GSI_STATUS_SUCCESS;
1646}
1647
1648int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
1649 unsigned long *chan_hdl)
1650{
1651 struct gsi_chan_ctx *ctx;
1652 uint32_t val;
1653 int res;
1654 int ee = gsi_ctx->per.ee;
1655 enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
1656 uint8_t erindex;
1657 void **user_data;
1658
1659 if (!gsi_ctx) {
1660 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1661 return -GSI_STATUS_NODEV;
1662 }
1663
1664 if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
1665 GSIERR("bad params props=%p dev_hdl=0x%lx chan_hdl=%p\n",
1666 props, dev_hdl, chan_hdl);
1667 return -GSI_STATUS_INVALID_PARAMS;
1668 }
1669
1670 if (gsi_validate_channel_props(props)) {
1671 GSIERR("bad params\n");
1672 return -GSI_STATUS_INVALID_PARAMS;
1673 }
1674
Skylar Changa35aeb92017-01-24 10:11:33 -08001675 if (props->evt_ring_hdl != ~0) {
1676 if (props->evt_ring_hdl >= GSI_EVT_RING_MAX) {
1677 GSIERR("invalid evt ring=%lu\n", props->evt_ring_hdl);
1678 return -GSI_STATUS_INVALID_PARAMS;
1679 }
Amir Levycdccd632016-10-30 09:36:41 +02001680
Skylar Changa35aeb92017-01-24 10:11:33 -08001681 if (atomic_read(
1682 &gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
1683 gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive) {
1684 GSIERR("evt ring=%lu exclusively used by chan_hdl=%p\n",
1685 props->evt_ring_hdl, chan_hdl);
1686 return -GSI_STATUS_UNSUPPORTED_OP;
1687 }
1688 }
Amir Levycdccd632016-10-30 09:36:41 +02001689
1690 ctx = &gsi_ctx->chan[props->ch_id];
1691 if (ctx->allocated) {
1692 GSIERR("chan %d already allocated\n", props->ch_id);
1693 return -GSI_STATUS_NODEV;
1694 }
1695
1696 memset(ctx, 0, sizeof(*ctx));
1697 user_data = devm_kzalloc(gsi_ctx->dev,
1698 (props->ring_len / props->re_size) * sizeof(void *),
1699 GFP_KERNEL);
1700 if (user_data == NULL) {
1701 GSIERR("%s:%d gsi context not allocated\n", __func__, __LINE__);
1702 return -GSI_STATUS_RES_ALLOC_FAILURE;
1703 }
1704
1705 mutex_init(&ctx->mlock);
1706 init_completion(&ctx->compl);
1707 atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
1708 ctx->props = *props;
1709
1710 mutex_lock(&gsi_ctx->mlock);
Skylar Chang22ecb822016-10-21 10:15:04 -07001711 gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
Amir Levycdccd632016-10-30 09:36:41 +02001712 val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
1713 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
1714 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
1715 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
1716 gsi_writel(val, gsi_ctx->base +
1717 GSI_EE_n_GSI_CH_CMD_OFFS(ee));
1718 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1719 if (res == 0) {
1720 GSIERR("chan_hdl=%u timed out\n", props->ch_id);
1721 mutex_unlock(&gsi_ctx->mlock);
1722 devm_kfree(gsi_ctx->dev, user_data);
1723 return -GSI_STATUS_TIMED_OUT;
1724 }
1725 if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
1726 GSIERR("chan_hdl=%u allocation failed state=%d\n",
1727 props->ch_id, ctx->state);
1728 mutex_unlock(&gsi_ctx->mlock);
1729 devm_kfree(gsi_ctx->dev, user_data);
1730 return -GSI_STATUS_RES_ALLOC_FAILURE;
1731 }
1732 mutex_unlock(&gsi_ctx->mlock);
1733
1734 erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
1735 GSI_NO_EVT_ERINDEX;
1736 if (erindex != GSI_NO_EVT_ERINDEX) {
1737 ctx->evtr = &gsi_ctx->evtr[erindex];
1738 atomic_inc(&ctx->evtr->chan_ref_cnt);
1739 if (ctx->evtr->props.exclusive)
1740 ctx->evtr->chan = ctx;
1741 }
1742
1743 gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
1744
1745 spin_lock_init(&ctx->ring.slock);
1746 gsi_init_chan_ring(props, &ctx->ring);
1747 if (!props->max_re_expected)
1748 ctx->props.max_re_expected = ctx->ring.max_num_elem;
1749 ctx->user_data = user_data;
1750 *chan_hdl = props->ch_id;
1751 ctx->allocated = true;
1752 ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
1753 atomic_inc(&gsi_ctx->num_chan);
1754
1755 return GSI_STATUS_SUCCESS;
1756}
1757EXPORT_SYMBOL(gsi_alloc_channel);
1758
1759static void __gsi_write_channel_scratch(unsigned long chan_hdl,
1760 union __packed gsi_channel_scratch val)
1761{
1762 uint32_t reg;
1763
1764 gsi_writel(val.data.word1, gsi_ctx->base +
1765 GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
1766 gsi_ctx->per.ee));
1767 gsi_writel(val.data.word2, gsi_ctx->base +
1768 GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
1769 gsi_ctx->per.ee));
1770 gsi_writel(val.data.word3, gsi_ctx->base +
1771 GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
1772 gsi_ctx->per.ee));
1773 /* below sequence is not atomic. assumption is sequencer specific fields
1774 * will remain unchanged across this sequence
1775 */
1776 reg = gsi_readl(gsi_ctx->base +
1777 GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
1778 gsi_ctx->per.ee));
1779 reg &= 0xFFFF;
1780 reg |= (val.data.word4 & 0xFFFF0000);
1781 gsi_writel(reg, gsi_ctx->base +
1782 GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
1783 gsi_ctx->per.ee));
1784}
1785
1786int gsi_write_channel_scratch(unsigned long chan_hdl,
1787 union __packed gsi_channel_scratch val)
1788{
1789 struct gsi_chan_ctx *ctx;
1790
1791 if (!gsi_ctx) {
1792 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1793 return -GSI_STATUS_NODEV;
1794 }
1795
Amir Levy41644242016-11-03 15:38:09 +02001796 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001797 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1798 return -GSI_STATUS_INVALID_PARAMS;
1799 }
1800
1801 if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
1802 gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
1803 GSIERR("bad state %d\n",
1804 gsi_ctx->chan[chan_hdl].state);
1805 return -GSI_STATUS_UNSUPPORTED_OP;
1806 }
1807
1808 ctx = &gsi_ctx->chan[chan_hdl];
1809
1810 mutex_lock(&ctx->mlock);
1811 ctx->scratch = val;
1812 __gsi_write_channel_scratch(chan_hdl, val);
1813 mutex_unlock(&ctx->mlock);
1814
1815 return GSI_STATUS_SUCCESS;
1816}
1817EXPORT_SYMBOL(gsi_write_channel_scratch);
1818
1819int gsi_query_channel_db_addr(unsigned long chan_hdl,
1820 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1821{
1822 if (!gsi_ctx) {
1823 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1824 return -GSI_STATUS_NODEV;
1825 }
1826
1827 if (!db_addr_wp_msb || !db_addr_wp_lsb) {
1828 GSIERR("bad params msb=%p lsb=%p\n", db_addr_wp_msb,
1829 db_addr_wp_lsb);
1830 return -GSI_STATUS_INVALID_PARAMS;
1831 }
1832
Amir Levy41644242016-11-03 15:38:09 +02001833 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001834 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1835 return -GSI_STATUS_INVALID_PARAMS;
1836 }
1837
1838 if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
1839 GSIERR("bad state %d\n",
1840 gsi_ctx->chan[chan_hdl].state);
1841 return -GSI_STATUS_UNSUPPORTED_OP;
1842 }
1843
1844 *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
1845 GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(chan_hdl, gsi_ctx->per.ee);
1846 *db_addr_wp_msb = gsi_ctx->per.phys_addr +
1847 GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(chan_hdl, gsi_ctx->per.ee);
1848
1849 return GSI_STATUS_SUCCESS;
1850}
1851EXPORT_SYMBOL(gsi_query_channel_db_addr);
1852
1853int gsi_start_channel(unsigned long chan_hdl)
1854{
1855 enum gsi_ch_cmd_opcode op = GSI_CH_START;
1856 int res;
1857 uint32_t val;
1858 struct gsi_chan_ctx *ctx;
1859
1860 if (!gsi_ctx) {
1861 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1862 return -GSI_STATUS_NODEV;
1863 }
1864
Amir Levy41644242016-11-03 15:38:09 +02001865 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001866 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1867 return -GSI_STATUS_INVALID_PARAMS;
1868 }
1869
1870 ctx = &gsi_ctx->chan[chan_hdl];
1871
1872 if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
1873 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
1874 ctx->state != GSI_CHAN_STATE_STOPPED) {
1875 GSIERR("bad state %d\n", ctx->state);
1876 return -GSI_STATUS_UNSUPPORTED_OP;
1877 }
1878
1879 mutex_lock(&gsi_ctx->mlock);
Skylar Chang0b9a8c22017-02-06 13:29:35 -08001880 reinit_completion(&ctx->compl);
Amir Levycdccd632016-10-30 09:36:41 +02001881
Skylar Chang22ecb822016-10-21 10:15:04 -07001882 gsi_ctx->ch_dbg[chan_hdl].ch_start++;
Amir Levycdccd632016-10-30 09:36:41 +02001883 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
1884 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
1885 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
1886 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
1887 gsi_writel(val, gsi_ctx->base +
1888 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
1889 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1890 if (res == 0) {
1891 GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
1892 mutex_unlock(&gsi_ctx->mlock);
1893 return -GSI_STATUS_TIMED_OUT;
1894 }
1895 if (ctx->state != GSI_CHAN_STATE_STARTED) {
1896 GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
1897 BUG();
1898 }
1899
1900 mutex_unlock(&gsi_ctx->mlock);
1901
1902 return GSI_STATUS_SUCCESS;
1903}
1904EXPORT_SYMBOL(gsi_start_channel);
1905
1906int gsi_stop_channel(unsigned long chan_hdl)
1907{
1908 enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
1909 int res;
1910 uint32_t val;
1911 struct gsi_chan_ctx *ctx;
1912
1913 if (!gsi_ctx) {
1914 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1915 return -GSI_STATUS_NODEV;
1916 }
1917
Amir Levy41644242016-11-03 15:38:09 +02001918 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001919 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1920 return -GSI_STATUS_INVALID_PARAMS;
1921 }
1922
1923 ctx = &gsi_ctx->chan[chan_hdl];
1924
1925 if (ctx->state == GSI_CHAN_STATE_STOPPED) {
1926 GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
1927 return GSI_STATUS_SUCCESS;
1928 }
1929
1930 if (ctx->state != GSI_CHAN_STATE_STARTED &&
1931 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
1932 ctx->state != GSI_CHAN_STATE_ERROR) {
1933 GSIERR("bad state %d\n", ctx->state);
1934 return -GSI_STATUS_UNSUPPORTED_OP;
1935 }
1936
1937 mutex_lock(&gsi_ctx->mlock);
Skylar Chang0b9a8c22017-02-06 13:29:35 -08001938 reinit_completion(&ctx->compl);
Amir Levycdccd632016-10-30 09:36:41 +02001939
Skylar Chang22ecb822016-10-21 10:15:04 -07001940 gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
Amir Levycdccd632016-10-30 09:36:41 +02001941 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
1942 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
1943 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
1944 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
1945 gsi_writel(val, gsi_ctx->base +
1946 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
1947 res = wait_for_completion_timeout(&ctx->compl,
1948 msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
1949 if (res == 0) {
1950 GSIDBG("chan_hdl=%lu timed out\n", chan_hdl);
1951 res = -GSI_STATUS_TIMED_OUT;
1952 goto free_lock;
1953 }
1954
1955 if (ctx->state != GSI_CHAN_STATE_STOPPED &&
1956 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
1957 GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
1958 res = -GSI_STATUS_BAD_STATE;
1959 goto free_lock;
1960 }
1961
1962 if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
1963 GSIERR("chan=%lu busy try again\n", chan_hdl);
1964 res = -GSI_STATUS_AGAIN;
1965 goto free_lock;
1966 }
1967
1968 res = GSI_STATUS_SUCCESS;
1969
1970free_lock:
1971 mutex_unlock(&gsi_ctx->mlock);
1972 return res;
1973}
1974EXPORT_SYMBOL(gsi_stop_channel);
1975
1976int gsi_stop_db_channel(unsigned long chan_hdl)
1977{
1978 enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
1979 int res;
1980 uint32_t val;
1981 struct gsi_chan_ctx *ctx;
1982
1983 if (!gsi_ctx) {
1984 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1985 return -GSI_STATUS_NODEV;
1986 }
1987
Amir Levy41644242016-11-03 15:38:09 +02001988 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001989 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1990 return -GSI_STATUS_INVALID_PARAMS;
1991 }
1992
1993 ctx = &gsi_ctx->chan[chan_hdl];
1994
1995 if (ctx->state == GSI_CHAN_STATE_STOPPED) {
1996 GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
1997 return GSI_STATUS_SUCCESS;
1998 }
1999
2000 if (ctx->state != GSI_CHAN_STATE_STARTED &&
2001 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
2002 GSIERR("bad state %d\n", ctx->state);
2003 return -GSI_STATUS_UNSUPPORTED_OP;
2004 }
2005
2006 mutex_lock(&gsi_ctx->mlock);
Skylar Chang0b9a8c22017-02-06 13:29:35 -08002007 reinit_completion(&ctx->compl);
Amir Levycdccd632016-10-30 09:36:41 +02002008
Skylar Chang22ecb822016-10-21 10:15:04 -07002009 gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
Amir Levycdccd632016-10-30 09:36:41 +02002010 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
2011 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
2012 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
2013 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
2014 gsi_writel(val, gsi_ctx->base +
2015 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
2016 res = wait_for_completion_timeout(&ctx->compl,
2017 msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
2018 if (res == 0) {
2019 GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
2020 res = -GSI_STATUS_TIMED_OUT;
2021 goto free_lock;
2022 }
2023
2024 if (ctx->state != GSI_CHAN_STATE_STOPPED &&
2025 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
2026 GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
2027 res = -GSI_STATUS_BAD_STATE;
2028 goto free_lock;
2029 }
2030
2031 if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
2032 GSIERR("chan=%lu busy try again\n", chan_hdl);
2033 res = -GSI_STATUS_AGAIN;
2034 goto free_lock;
2035 }
2036
2037 res = GSI_STATUS_SUCCESS;
2038
2039free_lock:
2040 mutex_unlock(&gsi_ctx->mlock);
2041 return res;
2042}
2043EXPORT_SYMBOL(gsi_stop_db_channel);
2044
2045int gsi_reset_channel(unsigned long chan_hdl)
2046{
2047 enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
2048 int res;
2049 uint32_t val;
2050 struct gsi_chan_ctx *ctx;
2051 bool reset_done = false;
2052
2053 if (!gsi_ctx) {
2054 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2055 return -GSI_STATUS_NODEV;
2056 }
2057
Amir Levy41644242016-11-03 15:38:09 +02002058 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002059 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
2060 return -GSI_STATUS_INVALID_PARAMS;
2061 }
2062
2063 ctx = &gsi_ctx->chan[chan_hdl];
2064
2065 if (ctx->state != GSI_CHAN_STATE_STOPPED) {
2066 GSIERR("bad state %d\n", ctx->state);
2067 return -GSI_STATUS_UNSUPPORTED_OP;
2068 }
2069
2070 mutex_lock(&gsi_ctx->mlock);
2071
2072reset:
Skylar Chang0b9a8c22017-02-06 13:29:35 -08002073 reinit_completion(&ctx->compl);
Skylar Chang22ecb822016-10-21 10:15:04 -07002074 gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
Amir Levycdccd632016-10-30 09:36:41 +02002075 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
2076 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
2077 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
2078 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
2079 gsi_writel(val, gsi_ctx->base +
2080 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
2081 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
2082 if (res == 0) {
2083 GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
2084 mutex_unlock(&gsi_ctx->mlock);
2085 return -GSI_STATUS_TIMED_OUT;
2086 }
2087
2088 if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
2089 GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
2090 ctx->state);
2091 BUG();
2092 }
2093
2094 /* workaround: reset GSI producers again */
2095 if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
Skylar Chang1feb2a32016-10-17 10:01:36 -07002096 usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
Amir Levycdccd632016-10-30 09:36:41 +02002097 reset_done = true;
2098 goto reset;
2099 }
2100
2101 gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
2102 ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
2103 gsi_init_chan_ring(&ctx->props, &ctx->ring);
2104
2105 /* restore scratch */
2106 __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
2107
2108 mutex_unlock(&gsi_ctx->mlock);
2109
2110 return GSI_STATUS_SUCCESS;
2111}
2112EXPORT_SYMBOL(gsi_reset_channel);
2113
2114int gsi_dealloc_channel(unsigned long chan_hdl)
2115{
2116 enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
2117 int res;
2118 uint32_t val;
2119 struct gsi_chan_ctx *ctx;
2120
2121 if (!gsi_ctx) {
2122 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2123 return -GSI_STATUS_NODEV;
2124 }
2125
Amir Levy41644242016-11-03 15:38:09 +02002126 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002127 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
2128 return -GSI_STATUS_INVALID_PARAMS;
2129 }
2130
2131 ctx = &gsi_ctx->chan[chan_hdl];
2132
2133 if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
2134 GSIERR("bad state %d\n", ctx->state);
2135 return -GSI_STATUS_UNSUPPORTED_OP;
2136 }
2137
2138 mutex_lock(&gsi_ctx->mlock);
Skylar Chang0b9a8c22017-02-06 13:29:35 -08002139 reinit_completion(&ctx->compl);
Amir Levycdccd632016-10-30 09:36:41 +02002140
Skylar Chang22ecb822016-10-21 10:15:04 -07002141 gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
Amir Levycdccd632016-10-30 09:36:41 +02002142 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
2143 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
2144 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
2145 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
2146 gsi_writel(val, gsi_ctx->base +
2147 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
2148 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
2149 if (res == 0) {
2150 GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
2151 mutex_unlock(&gsi_ctx->mlock);
2152 return -GSI_STATUS_TIMED_OUT;
2153 }
2154 if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
2155 GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
2156 ctx->state);
2157 BUG();
2158 }
2159
2160 mutex_unlock(&gsi_ctx->mlock);
2161
2162 devm_kfree(gsi_ctx->dev, ctx->user_data);
2163 ctx->allocated = false;
2164 if (ctx->evtr)
2165 atomic_dec(&ctx->evtr->chan_ref_cnt);
2166 atomic_dec(&gsi_ctx->num_chan);
2167
2168 return GSI_STATUS_SUCCESS;
2169}
2170EXPORT_SYMBOL(gsi_dealloc_channel);
2171
2172void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
2173{
2174 unsigned long now = jiffies_to_msecs(jiffies);
2175 unsigned long elapsed;
2176
2177 if (used == 0) {
2178 elapsed = now - ctx->stats.dp.last_timestamp;
2179 if (ctx->stats.dp.empty_time < elapsed)
2180 ctx->stats.dp.empty_time = elapsed;
2181 }
2182
2183 if (used <= ctx->props.max_re_expected / 3)
2184 ++ctx->stats.dp.ch_below_lo;
2185 else if (used <= 2 * ctx->props.max_re_expected / 3)
2186 ++ctx->stats.dp.ch_below_hi;
2187 else
2188 ++ctx->stats.dp.ch_above_hi;
2189 ctx->stats.dp.last_timestamp = now;
2190}
2191
2192static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
2193 uint16_t *num_free_re)
2194{
2195 uint16_t start;
Amir Levycdccd632016-10-30 09:36:41 +02002196 uint16_t end;
2197 uint64_t rp;
Amir Levycdccd632016-10-30 09:36:41 +02002198 int ee = gsi_ctx->per.ee;
2199 uint16_t used;
Amir Levycdccd632016-10-30 09:36:41 +02002200
2201 if (!ctx->evtr) {
Skylar Chang8568eb92017-03-21 17:18:51 -07002202 rp = gsi_readl(gsi_ctx->base +
2203 GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
2204 rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
2205
Amir Levycdccd632016-10-30 09:36:41 +02002206 ctx->ring.rp = rp;
2207 } else {
2208 rp = ctx->ring.rp_local;
2209 }
2210
2211 start = gsi_find_idx_from_addr(&ctx->ring, rp);
Amir Levycdccd632016-10-30 09:36:41 +02002212 end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
2213
2214 if (end >= start)
2215 used = end - start;
2216 else
2217 used = ctx->ring.max_num_elem + 1 - (start - end);
2218
Amir Levycdccd632016-10-30 09:36:41 +02002219 *num_free_re = ctx->ring.max_num_elem - used;
Amir Levycdccd632016-10-30 09:36:41 +02002220}
2221
2222int gsi_query_channel_info(unsigned long chan_hdl,
2223 struct gsi_chan_info *info)
2224{
2225 struct gsi_chan_ctx *ctx;
2226 spinlock_t *slock;
2227 unsigned long flags;
2228 uint64_t rp;
2229 uint64_t wp;
2230 int ee = gsi_ctx->per.ee;
2231
2232 if (!gsi_ctx) {
2233 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2234 return -GSI_STATUS_NODEV;
2235 }
2236
Amir Levy41644242016-11-03 15:38:09 +02002237 if (chan_hdl >= gsi_ctx->max_ch || !info) {
Amir Levycdccd632016-10-30 09:36:41 +02002238 GSIERR("bad params chan_hdl=%lu info=%p\n", chan_hdl, info);
2239 return -GSI_STATUS_INVALID_PARAMS;
2240 }
2241
2242 ctx = &gsi_ctx->chan[chan_hdl];
2243 if (ctx->evtr) {
2244 slock = &ctx->evtr->ring.slock;
2245 info->evt_valid = true;
2246 } else {
2247 slock = &ctx->ring.slock;
2248 info->evt_valid = false;
2249 }
2250
2251 spin_lock_irqsave(slock, flags);
2252
2253 rp = gsi_readl(gsi_ctx->base +
2254 GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
2255 rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2256 GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
2257 ctx->ring.rp = rp;
2258 info->rp = rp;
2259
2260 wp = gsi_readl(gsi_ctx->base +
2261 GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
2262 wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2263 GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
2264 ctx->ring.wp = wp;
2265 info->wp = wp;
2266
2267 if (info->evt_valid) {
2268 rp = gsi_readl(gsi_ctx->base +
2269 GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
2270 rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2271 GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee)))
2272 << 32;
2273 info->evt_rp = rp;
2274
2275 wp = gsi_readl(gsi_ctx->base +
2276 GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
2277 wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2278 GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(ctx->evtr->id, ee)))
2279 << 32;
2280 info->evt_wp = wp;
2281 }
2282
2283 spin_unlock_irqrestore(slock, flags);
2284
2285 GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
2286 chan_hdl, info->rp, info->wp,
2287 info->evt_valid, info->evt_rp, info->evt_wp);
2288
2289 return GSI_STATUS_SUCCESS;
2290}
2291EXPORT_SYMBOL(gsi_query_channel_info);
2292
2293int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
2294{
2295 struct gsi_chan_ctx *ctx;
2296 spinlock_t *slock;
2297 unsigned long flags;
2298 uint64_t rp;
2299 uint64_t wp;
2300 int ee = gsi_ctx->per.ee;
2301
2302 if (!gsi_ctx) {
2303 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2304 return -GSI_STATUS_NODEV;
2305 }
2306
Amir Levy41644242016-11-03 15:38:09 +02002307 if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
Amir Levycdccd632016-10-30 09:36:41 +02002308 GSIERR("bad params chan_hdl=%lu is_empty=%p\n",
2309 chan_hdl, is_empty);
2310 return -GSI_STATUS_INVALID_PARAMS;
2311 }
2312
2313 ctx = &gsi_ctx->chan[chan_hdl];
2314
2315 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2316 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2317 return -GSI_STATUS_UNSUPPORTED_OP;
2318 }
2319
2320 if (ctx->evtr)
2321 slock = &ctx->evtr->ring.slock;
2322 else
2323 slock = &ctx->ring.slock;
2324
2325 spin_lock_irqsave(slock, flags);
2326
2327 rp = gsi_readl(gsi_ctx->base +
2328 GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
Skylar Chang8568eb92017-03-21 17:18:51 -07002329 rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
Amir Levycdccd632016-10-30 09:36:41 +02002330 ctx->ring.rp = rp;
2331
2332 wp = gsi_readl(gsi_ctx->base +
2333 GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
Skylar Chang8568eb92017-03-21 17:18:51 -07002334 wp |= ctx->ring.wp & 0xFFFFFFFF00000000;
Amir Levycdccd632016-10-30 09:36:41 +02002335 ctx->ring.wp = wp;
2336
2337 if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
2338 *is_empty = (ctx->ring.rp_local == rp) ? true : false;
2339 else
2340 *is_empty = (wp == rp) ? true : false;
2341
2342 spin_unlock_irqrestore(slock, flags);
2343
2344 GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
2345 chan_hdl, rp, wp, ctx->ring.rp_local);
2346
2347 return GSI_STATUS_SUCCESS;
2348}
2349EXPORT_SYMBOL(gsi_is_channel_empty);
2350
2351int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
2352 struct gsi_xfer_elem *xfer, bool ring_db)
2353{
2354 struct gsi_chan_ctx *ctx;
2355 uint16_t free;
2356 struct gsi_tre tre;
2357 struct gsi_tre *tre_ptr;
2358 uint16_t idx;
2359 uint64_t wp_rollback;
2360 int i;
2361 spinlock_t *slock;
2362 unsigned long flags;
2363
2364 if (!gsi_ctx) {
2365 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2366 return -GSI_STATUS_NODEV;
2367 }
2368
Amir Levy41644242016-11-03 15:38:09 +02002369 if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) {
Amir Levycdccd632016-10-30 09:36:41 +02002370 GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%p\n",
2371 chan_hdl, num_xfers, xfer);
2372 return -GSI_STATUS_INVALID_PARAMS;
2373 }
2374
2375 ctx = &gsi_ctx->chan[chan_hdl];
2376
2377 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2378 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2379 return -GSI_STATUS_UNSUPPORTED_OP;
2380 }
2381
2382 if (ctx->evtr)
2383 slock = &ctx->evtr->ring.slock;
2384 else
2385 slock = &ctx->ring.slock;
2386
2387 spin_lock_irqsave(slock, flags);
2388 __gsi_query_channel_free_re(ctx, &free);
2389
2390 if (num_xfers > free) {
2391 GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
2392 chan_hdl, num_xfers, free);
2393 spin_unlock_irqrestore(slock, flags);
2394 return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
2395 }
2396
2397 wp_rollback = ctx->ring.wp_local;
2398 for (i = 0; i < num_xfers; i++) {
2399 memset(&tre, 0, sizeof(tre));
2400 tre.buffer_ptr = xfer[i].addr;
2401 tre.buf_len = xfer[i].len;
2402 if (xfer[i].type == GSI_XFER_ELEM_DATA) {
2403 tre.re_type = GSI_RE_XFER;
2404 } else if (xfer[i].type == GSI_XFER_ELEM_IMME_CMD) {
2405 tre.re_type = GSI_RE_IMMD_CMD;
Skylar Changa7975cf2017-03-21 17:20:20 -07002406 } else if (xfer[i].type == GSI_XFER_ELEM_NOP) {
2407 tre.re_type = GSI_RE_NOP;
Amir Levycdccd632016-10-30 09:36:41 +02002408 } else {
2409 GSIERR("chan_hdl=%lu bad RE type=%u\n", chan_hdl,
2410 xfer[i].type);
2411 break;
2412 }
2413 tre.bei = (xfer[i].flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
2414 tre.ieot = (xfer[i].flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
2415 tre.ieob = (xfer[i].flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
2416 tre.chain = (xfer[i].flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
2417
2418 idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
2419 tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
2420 idx * ctx->ring.elem_sz);
2421
2422 /* write the TRE to ring */
2423 *tre_ptr = tre;
2424 ctx->user_data[idx] = xfer[i].xfer_user_data;
2425 gsi_incr_ring_wp(&ctx->ring);
2426 }
2427
2428 if (i != num_xfers) {
2429 /* reject all the xfers */
2430 ctx->ring.wp_local = wp_rollback;
2431 spin_unlock_irqrestore(slock, flags);
2432 return -GSI_STATUS_INVALID_PARAMS;
2433 }
2434
2435 ctx->stats.queued += num_xfers;
2436
2437 /* ensure TRE is set before ringing doorbell */
2438 wmb();
2439
2440 if (ring_db)
2441 gsi_ring_chan_doorbell(ctx);
2442
2443 spin_unlock_irqrestore(slock, flags);
2444
2445 return GSI_STATUS_SUCCESS;
2446}
2447EXPORT_SYMBOL(gsi_queue_xfer);
2448
2449int gsi_start_xfer(unsigned long chan_hdl)
2450{
2451 struct gsi_chan_ctx *ctx;
2452
2453 if (!gsi_ctx) {
2454 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2455 return -GSI_STATUS_NODEV;
2456 }
2457
Amir Levy41644242016-11-03 15:38:09 +02002458 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002459 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
2460 return -GSI_STATUS_INVALID_PARAMS;
2461 }
2462
2463 ctx = &gsi_ctx->chan[chan_hdl];
2464
2465 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2466 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2467 return -GSI_STATUS_UNSUPPORTED_OP;
2468 }
2469
2470 if (ctx->state != GSI_CHAN_STATE_STARTED) {
2471 GSIERR("bad state %d\n", ctx->state);
2472 return -GSI_STATUS_UNSUPPORTED_OP;
2473 }
2474
Skylar Chang8568eb92017-03-21 17:18:51 -07002475 if (ctx->ring.wp == ctx->ring.wp_local)
2476 return GSI_STATUS_SUCCESS;
2477
Amir Levycdccd632016-10-30 09:36:41 +02002478 gsi_ring_chan_doorbell(ctx);
2479
2480 return GSI_STATUS_SUCCESS;
2481};
2482EXPORT_SYMBOL(gsi_start_xfer);
2483
2484int gsi_poll_channel(unsigned long chan_hdl,
2485 struct gsi_chan_xfer_notify *notify)
2486{
2487 struct gsi_chan_ctx *ctx;
2488 uint64_t rp;
2489 int ee = gsi_ctx->per.ee;
2490 unsigned long flags;
2491
2492 if (!gsi_ctx) {
2493 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2494 return -GSI_STATUS_NODEV;
2495 }
2496
Amir Levy41644242016-11-03 15:38:09 +02002497 if (chan_hdl >= gsi_ctx->max_ch || !notify) {
Amir Levycdccd632016-10-30 09:36:41 +02002498 GSIERR("bad params chan_hdl=%lu notify=%p\n", chan_hdl, notify);
2499 return -GSI_STATUS_INVALID_PARAMS;
2500 }
2501
2502 ctx = &gsi_ctx->chan[chan_hdl];
2503
2504 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2505 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2506 return -GSI_STATUS_UNSUPPORTED_OP;
2507 }
2508
2509 if (!ctx->evtr) {
2510 GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
2511 return -GSI_STATUS_UNSUPPORTED_OP;
2512 }
2513
2514 spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
Skylar Chang8568eb92017-03-21 17:18:51 -07002515 if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
2516 /* update rp to see of we have anything new to process */
2517 rp = gsi_readl(gsi_ctx->base +
2518 GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
2519 rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
2520
2521 ctx->evtr->ring.rp = rp;
2522 }
2523
2524 if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
Amir Levycdccd632016-10-30 09:36:41 +02002525 spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
2526 ctx->stats.poll_empty++;
2527 return GSI_STATUS_POLL_EMPTY;
2528 }
2529
2530 gsi_process_evt_re(ctx->evtr, notify, false);
Amir Levycdccd632016-10-30 09:36:41 +02002531 spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
2532 ctx->stats.poll_ok++;
2533
2534 return GSI_STATUS_SUCCESS;
2535}
2536EXPORT_SYMBOL(gsi_poll_channel);
2537
2538int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
2539{
2540 struct gsi_chan_ctx *ctx;
2541 enum gsi_chan_mode curr;
2542 unsigned long flags;
2543
2544 if (!gsi_ctx) {
2545 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2546 return -GSI_STATUS_NODEV;
2547 }
2548
Amir Levy41644242016-11-03 15:38:09 +02002549 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002550 GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
2551 return -GSI_STATUS_INVALID_PARAMS;
2552 }
2553
2554 ctx = &gsi_ctx->chan[chan_hdl];
2555
2556 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2557 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2558 return -GSI_STATUS_UNSUPPORTED_OP;
2559 }
2560
2561 if (!ctx->evtr || !ctx->evtr->props.exclusive) {
2562 GSIERR("cannot configure mode on chan_hdl=%lu\n",
2563 chan_hdl);
2564 return -GSI_STATUS_UNSUPPORTED_OP;
2565 }
2566
2567 if (atomic_read(&ctx->poll_mode))
2568 curr = GSI_CHAN_MODE_POLL;
2569 else
2570 curr = GSI_CHAN_MODE_CALLBACK;
2571
2572 if (mode == curr) {
2573 GSIERR("already in requested mode %u chan_hdl=%lu\n",
2574 curr, chan_hdl);
2575 return -GSI_STATUS_UNSUPPORTED_OP;
2576 }
2577
2578 spin_lock_irqsave(&gsi_ctx->slock, flags);
2579 if (curr == GSI_CHAN_MODE_CALLBACK &&
2580 mode == GSI_CHAN_MODE_POLL) {
2581 __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
2582 ctx->stats.callback_to_poll++;
2583 }
2584
2585 if (curr == GSI_CHAN_MODE_POLL &&
2586 mode == GSI_CHAN_MODE_CALLBACK) {
2587 __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
2588 ctx->stats.poll_to_callback++;
2589 }
2590 atomic_set(&ctx->poll_mode, mode);
2591 spin_unlock_irqrestore(&gsi_ctx->slock, flags);
2592
2593 return GSI_STATUS_SUCCESS;
2594}
2595EXPORT_SYMBOL(gsi_config_channel_mode);
2596
2597int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
2598 union gsi_channel_scratch *scr)
2599{
2600 struct gsi_chan_ctx *ctx;
2601
2602 if (!gsi_ctx) {
2603 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2604 return -GSI_STATUS_NODEV;
2605 }
2606
2607 if (!props || !scr) {
2608 GSIERR("bad params props=%p scr=%p\n", props, scr);
2609 return -GSI_STATUS_INVALID_PARAMS;
2610 }
2611
Amir Levy41644242016-11-03 15:38:09 +02002612 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002613 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
2614 return -GSI_STATUS_INVALID_PARAMS;
2615 }
2616
2617 ctx = &gsi_ctx->chan[chan_hdl];
2618
2619 if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
2620 GSIERR("bad state %d\n", ctx->state);
2621 return -GSI_STATUS_UNSUPPORTED_OP;
2622 }
2623
2624 mutex_lock(&ctx->mlock);
2625 *props = ctx->props;
2626 *scr = ctx->scratch;
2627 mutex_unlock(&ctx->mlock);
2628
2629 return GSI_STATUS_SUCCESS;
2630}
2631EXPORT_SYMBOL(gsi_get_channel_cfg);
2632
2633int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
2634 union gsi_channel_scratch *scr)
2635{
2636 struct gsi_chan_ctx *ctx;
2637
2638 if (!gsi_ctx) {
2639 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2640 return -GSI_STATUS_NODEV;
2641 }
2642
2643 if (!props || gsi_validate_channel_props(props)) {
2644 GSIERR("bad params props=%p\n", props);
2645 return -GSI_STATUS_INVALID_PARAMS;
2646 }
2647
Amir Levy41644242016-11-03 15:38:09 +02002648 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002649 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
2650 return -GSI_STATUS_INVALID_PARAMS;
2651 }
2652
2653 ctx = &gsi_ctx->chan[chan_hdl];
2654
2655 if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
2656 GSIERR("bad state %d\n", ctx->state);
2657 return -GSI_STATUS_UNSUPPORTED_OP;
2658 }
2659
2660 if (ctx->props.ch_id != props->ch_id ||
2661 ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
2662 GSIERR("changing immutable fields not supported\n");
2663 return -GSI_STATUS_UNSUPPORTED_OP;
2664 }
2665
2666 mutex_lock(&ctx->mlock);
2667 ctx->props = *props;
2668 if (scr)
2669 ctx->scratch = *scr;
2670 gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
2671 ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
2672 gsi_init_chan_ring(&ctx->props, &ctx->ring);
2673
2674 /* restore scratch */
2675 __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
2676 mutex_unlock(&ctx->mlock);
2677
2678 return GSI_STATUS_SUCCESS;
2679}
2680EXPORT_SYMBOL(gsi_set_channel_cfg);
2681
2682static void gsi_configure_ieps(void *base)
2683{
2684 void __iomem *gsi_base = (void __iomem *)base;
2685
2686 gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
2687 gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
2688 gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
2689 gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
2690 gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
2691 gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
2692 gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
Amir Levy41644242016-11-03 15:38:09 +02002693 gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
2694 gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
2695 gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
Amir Levycdccd632016-10-30 09:36:41 +02002696 gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
2697 gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
2698 gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
2699}
2700
2701static void gsi_configure_bck_prs_matrix(void *base)
2702{
2703 void __iomem *gsi_base = (void __iomem *)base;
2704 /*
2705 * For now, these are default values. In the future, GSI FW image will
2706 * produce optimized back-pressure values based on the FW image.
2707 */
2708 gsi_writel(0xfffffffe,
2709 gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS);
2710 gsi_writel(0xffffffff,
2711 gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS);
2712 gsi_writel(0xffffffbf, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS);
2713 gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS);
2714 gsi_writel(0xffffefff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS);
2715 gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS);
2716 gsi_writel(0xffffefff,
2717 gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS);
2718 gsi_writel(0xffffffff,
2719 gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS);
2720 gsi_writel(0x00000000,
2721 gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
2722 gsi_writel(0x00000000,
2723 gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
Amir Levy41644242016-11-03 15:38:09 +02002724 gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
Amir Levycdccd632016-10-30 09:36:41 +02002725 gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
Amir Levy41644242016-11-03 15:38:09 +02002726 gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
Amir Levycdccd632016-10-30 09:36:41 +02002727 gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
2728 gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
2729 gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
2730 gsi_writel(0xffffffff, gsi_base + GSI_IC_READ_BCK_PRS_LSB_OFFS);
2731 gsi_writel(0xffffefff, gsi_base + GSI_IC_READ_BCK_PRS_MSB_OFFS);
2732 gsi_writel(0xffffffff, gsi_base + GSI_IC_WRITE_BCK_PRS_LSB_OFFS);
2733 gsi_writel(0xffffdfff, gsi_base + GSI_IC_WRITE_BCK_PRS_MSB_OFFS);
2734 gsi_writel(0xffffffff,
2735 gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS);
2736 gsi_writel(0xff03ffff,
2737 gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS);
2738}
2739
2740int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
2741 phys_addr_t per_base_addr)
2742{
2743 void __iomem *gsi_base;
2744
2745 gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
2746 if (!gsi_base) {
2747 GSIERR("ioremap failed for 0x%pa\n", &gsi_base_addr);
2748 return -GSI_STATUS_RES_ALLOC_FAILURE;
2749 }
2750 gsi_writel(0, gsi_base + GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS);
2751 gsi_writel(per_base_addr,
2752 gsi_base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS);
2753 gsi_configure_bck_prs_matrix((void *)gsi_base);
2754 gsi_configure_ieps((void *)gsi_base);
2755 iounmap(gsi_base);
2756
2757 return 0;
2758}
2759EXPORT_SYMBOL(gsi_configure_regs);
2760
Amir Levy85dcd172016-12-06 17:47:39 +02002761int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver)
Amir Levycdccd632016-10-30 09:36:41 +02002762{
2763 void __iomem *gsi_base;
2764 uint32_t value;
2765
Amir Levy85dcd172016-12-06 17:47:39 +02002766 if (ver <= GSI_VER_ERR || ver >= GSI_VER_MAX) {
2767 GSIERR("Incorrect version %d\n", ver);
2768 return -GSI_STATUS_ERROR;
2769 }
2770
Amir Levycdccd632016-10-30 09:36:41 +02002771 gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
2772 if (!gsi_base) {
2773 GSIERR("ioremap failed for 0x%pa\n", &gsi_base_addr);
2774 return -GSI_STATUS_RES_ALLOC_FAILURE;
2775 }
2776
2777 /* Enable the MCS and set to x2 clocks */
Amir Levy85dcd172016-12-06 17:47:39 +02002778 if (ver >= GSI_VER_1_2) {
Amir Levy41644242016-11-03 15:38:09 +02002779 value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
2780 GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
2781 gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
2782
2783 value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
2784 GSI_GSI_CFG_GSI_ENABLE_BMSK) |
2785 ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
2786 GSI_GSI_CFG_MCS_ENABLE_BMSK) |
2787 ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
2788 GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
2789 ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
2790 GSI_GSI_CFG_UC_IS_MCS_BMSK) |
2791 ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
2792 GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
2793 ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
2794 GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
2795 gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
2796 } else {
2797 value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
2798 GSI_GSI_CFG_GSI_ENABLE_BMSK) |
2799 ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
2800 GSI_GSI_CFG_MCS_ENABLE_BMSK) |
2801 ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
2802 GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
2803 ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
2804 GSI_GSI_CFG_UC_IS_MCS_BMSK));
2805 gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
2806 }
Amir Levycdccd632016-10-30 09:36:41 +02002807
2808 iounmap(gsi_base);
2809
2810 return 0;
2811
2812}
2813EXPORT_SYMBOL(gsi_enable_fw);
2814
Ghanim Fodi37b64952017-01-24 15:42:30 +02002815void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
2816 unsigned long *size)
2817{
2818 if (base_offset)
Ghanim Fodi62c62f02017-03-13 20:26:50 +02002819 *base_offset = GSI_GSI_INST_RAM_n_OFFS(0);
Ghanim Fodi37b64952017-01-24 15:42:30 +02002820 if (size)
Ghanim Fodi62c62f02017-03-13 20:26:50 +02002821 *size = GSI_GSI_INST_RAM_n_WORD_SZ *
2822 (GSI_GSI_INST_RAM_n_MAXn + 1);
Ghanim Fodi37b64952017-01-24 15:42:30 +02002823}
2824EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
2825
Skylar Changc9939cf2017-02-21 09:46:46 -08002826int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
2827{
2828 enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
2829 uint32_t val;
2830 int res;
2831
2832 if (!gsi_ctx) {
2833 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2834 return -GSI_STATUS_NODEV;
2835 }
2836
2837 if (chan_idx >= gsi_ctx->max_ch || !code) {
2838 GSIERR("bad params chan_idx=%d\n", chan_idx);
2839 return -GSI_STATUS_INVALID_PARAMS;
2840 }
2841
2842 mutex_lock(&gsi_ctx->mlock);
2843 reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
2844
2845 /* invalidate the response */
2846 gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
2847 GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
2848 gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
2849 gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
2850 GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
2851
2852 gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
2853 val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
2854 GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
2855 ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
2856 GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
2857 ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
2858 GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
2859 gsi_writel(val, gsi_ctx->base +
2860 GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
2861
2862 res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
2863 msecs_to_jiffies(GSI_CMD_TIMEOUT));
2864 if (res == 0) {
2865 GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
2866 res = -GSI_STATUS_TIMED_OUT;
2867 goto free_lock;
2868 }
2869
2870 gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
2871 GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
2872 if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
2873 GSIERR("No response received\n");
2874 res = -GSI_STATUS_ERROR;
2875 goto free_lock;
2876 }
2877
2878 res = GSI_STATUS_SUCCESS;
2879 *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
2880free_lock:
2881 mutex_unlock(&gsi_ctx->mlock);
2882
2883 return res;
2884}
2885EXPORT_SYMBOL(gsi_halt_channel_ee);
2886
Amir Levycdccd632016-10-30 09:36:41 +02002887static int msm_gsi_probe(struct platform_device *pdev)
2888{
2889 struct device *dev = &pdev->dev;
2890
2891 pr_debug("gsi_probe\n");
2892 gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
2893 if (!gsi_ctx) {
2894 dev_err(dev, "failed to allocated gsi context\n");
2895 return -ENOMEM;
2896 }
2897
Skylar Chang4d6a8fe2017-03-16 16:55:57 -07002898 gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
2899 "gsi", 0);
Skylar Chang664e2422017-04-03 10:38:53 -07002900 if (gsi_ctx->ipc_logbuf == NULL)
2901 GSIERR("failed to create IPC log, continue...\n");
Skylar Chang4d6a8fe2017-03-16 16:55:57 -07002902
Amir Levycdccd632016-10-30 09:36:41 +02002903 gsi_ctx->dev = dev;
Skylar Changc9939cf2017-02-21 09:46:46 -08002904 init_completion(&gsi_ctx->gen_ee_cmd_compl);
Amir Levycdccd632016-10-30 09:36:41 +02002905 gsi_debugfs_init();
2906
2907 return 0;
2908}
2909
2910static struct platform_driver msm_gsi_driver = {
2911 .probe = msm_gsi_probe,
2912 .driver = {
2913 .owner = THIS_MODULE,
2914 .name = "gsi",
2915 .of_match_table = msm_gsi_match,
2916 },
2917};
2918
2919/**
2920 * Module Init.
2921 */
2922static int __init gsi_init(void)
2923{
2924 pr_debug("gsi_init\n");
2925 return platform_driver_register(&msm_gsi_driver);
2926}
2927
2928arch_initcall(gsi_init);
2929
2930MODULE_LICENSE("GPL v2");
2931MODULE_DESCRIPTION("Generic Software Interface (GSI)");