blob: a617c9e8e11fda9c11587d306e3928ea114672f3 [file] [log] [blame]
Amir Levycdccd632016-10-30 09:36:41 +02001/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/of.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/log2.h>
17#include <linux/module.h>
18#include <linux/msm_gsi.h>
19#include <linux/platform_device.h>
Skylar Chang1feb2a32016-10-17 10:01:36 -070020#include <linux/delay.h>
Amir Levycdccd632016-10-30 09:36:41 +020021#include "gsi.h"
22#include "gsi_reg.h"
23
24#define GSI_CMD_TIMEOUT (5*HZ)
25#define GSI_STOP_CMD_TIMEOUT_MS 1
26#define GSI_MAX_CH_LOW_WEIGHT 15
27#define GSI_MHI_ER_START 10
28#define GSI_MHI_ER_END 16
29
Skylar Chang1feb2a32016-10-17 10:01:36 -070030#define GSI_RESET_WA_MIN_SLEEP 1000
31#define GSI_RESET_WA_MAX_SLEEP 2000
Amir Levycdccd632016-10-30 09:36:41 +020032static const struct of_device_id msm_gsi_match[] = {
33 { .compatible = "qcom,msm_gsi", },
34 { },
35};
36
37struct gsi_ctx *gsi_ctx;
38
39static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
40{
41 uint32_t curr;
42
43 curr = gsi_readl(gsi_ctx->base +
44 GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
45 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
46 GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
47}
48
49static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
50{
51 uint32_t curr;
52
53 curr = gsi_readl(gsi_ctx->base +
54 GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
55 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
56 GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
57}
58
59static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
60{
61 uint32_t curr;
62
63 curr = gsi_readl(gsi_ctx->base +
64 GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
65 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
66 GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
67}
68
69static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
70{
71 uint32_t curr;
72
73 curr = gsi_readl(gsi_ctx->base +
74 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
75 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
76 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
77}
78
79static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
80{
81 uint32_t curr;
82
83 curr = gsi_readl(gsi_ctx->base +
84 GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
85 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
86 GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
87}
88
89static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
90{
91 uint32_t curr;
92
93 curr = gsi_readl(gsi_ctx->base +
94 GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
95 gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
96 GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
97}
98
99static void gsi_handle_ch_ctrl(int ee)
100{
101 uint32_t ch;
102 int i;
103 uint32_t val;
104 struct gsi_chan_ctx *ctx;
105
106 ch = gsi_readl(gsi_ctx->base +
107 GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800108 gsi_writel(ch, gsi_ctx->base +
109 GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200110 GSIDBG("ch %x\n", ch);
111 for (i = 0; i < 32; i++) {
112 if ((1 << i) & ch) {
Skylar Chang22ecb822016-10-21 10:15:04 -0700113 if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) {
114 GSIERR("invalid channel %d\n", i);
115 break;
116 }
117
Amir Levycdccd632016-10-30 09:36:41 +0200118 ctx = &gsi_ctx->chan[i];
119 val = gsi_readl(gsi_ctx->base +
120 GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee));
121 ctx->state = (val &
122 GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
123 GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
124 GSIDBG("ch %u state updated to %u\n", i, ctx->state);
125 complete(&ctx->compl);
Skylar Chang22ecb822016-10-21 10:15:04 -0700126 gsi_ctx->ch_dbg[i].cmd_completed++;
Amir Levycdccd632016-10-30 09:36:41 +0200127 }
128 }
Amir Levycdccd632016-10-30 09:36:41 +0200129}
130
131static void gsi_handle_ev_ctrl(int ee)
132{
133 uint32_t ch;
134 int i;
135 uint32_t val;
136 struct gsi_evt_ctx *ctx;
137
138 ch = gsi_readl(gsi_ctx->base +
139 GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800140 gsi_writel(ch, gsi_ctx->base +
141 GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200142 GSIDBG("ev %x\n", ch);
143 for (i = 0; i < 32; i++) {
144 if ((1 << i) & ch) {
Skylar Chang22ecb822016-10-21 10:15:04 -0700145 if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
146 GSIERR("invalid event %d\n", i);
147 break;
148 }
149
Amir Levycdccd632016-10-30 09:36:41 +0200150 ctx = &gsi_ctx->evtr[i];
151 val = gsi_readl(gsi_ctx->base +
152 GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee));
153 ctx->state = (val &
154 GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK) >>
155 GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
156 GSIDBG("evt %u state updated to %u\n", i, ctx->state);
157 complete(&ctx->compl);
158 }
159 }
Amir Levycdccd632016-10-30 09:36:41 +0200160}
161
162static void gsi_handle_glob_err(uint32_t err)
163{
164 struct gsi_log_err *log;
165 struct gsi_chan_ctx *ch;
166 struct gsi_evt_ctx *ev;
167 struct gsi_chan_err_notify chan_notify;
168 struct gsi_evt_err_notify evt_notify;
169 struct gsi_per_notify per_notify;
170 uint32_t val;
171
172 log = (struct gsi_log_err *)&err;
173 GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
174 log->virt_idx);
175 GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
176 log->arg2, log->arg3);
177 switch (log->err_type) {
178 case GSI_ERR_TYPE_GLOB:
179 per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
180 per_notify.user_data = gsi_ctx->per.user_data;
181 per_notify.data.err_desc = err & 0xFFFF;
182 gsi_ctx->per.notify_cb(&per_notify);
183 break;
184 case GSI_ERR_TYPE_CHAN:
Amir Levy41644242016-11-03 15:38:09 +0200185 if (log->virt_idx >= gsi_ctx->max_ch) {
186 GSIERR("Unexpected ch %d\n", log->virt_idx);
187 WARN_ON(1);
188 return;
189 }
190
Amir Levycdccd632016-10-30 09:36:41 +0200191 ch = &gsi_ctx->chan[log->virt_idx];
192 chan_notify.chan_user_data = ch->props.chan_user_data;
193 chan_notify.err_desc = err & 0xFFFF;
194 if (log->code == GSI_INVALID_TRE_ERR) {
195 BUG_ON(log->ee != gsi_ctx->per.ee);
196 val = gsi_readl(gsi_ctx->base +
197 GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(log->virt_idx,
198 gsi_ctx->per.ee));
199 ch->state = (val &
200 GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
201 GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
202 GSIDBG("ch %u state updated to %u\n", log->virt_idx,
203 ch->state);
204 ch->stats.invalid_tre_error++;
205 BUG_ON(ch->state != GSI_CHAN_STATE_ERROR);
206 chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
207 } else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
208 BUG_ON(log->ee != gsi_ctx->per.ee);
209 chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
210 } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
211 BUG_ON(log->ee != gsi_ctx->per.ee);
212 chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
213 complete(&ch->compl);
214 } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
215 chan_notify.evt_id =
216 GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
217 } else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
218 BUG_ON(log->ee != gsi_ctx->per.ee);
219 chan_notify.evt_id =
220 GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
221 } else if (log->code == GSI_HWO_1_ERR) {
222 BUG_ON(log->ee != gsi_ctx->per.ee);
223 chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
224 } else {
225 BUG();
226 }
227 if (ch->props.err_cb)
228 ch->props.err_cb(&chan_notify);
229 else
230 WARN_ON(1);
231 break;
232 case GSI_ERR_TYPE_EVT:
Amir Levy41644242016-11-03 15:38:09 +0200233 if (log->virt_idx >= gsi_ctx->max_ev) {
234 GSIERR("Unexpected ev %d\n", log->virt_idx);
235 WARN_ON(1);
236 return;
237 }
238
Amir Levycdccd632016-10-30 09:36:41 +0200239 ev = &gsi_ctx->evtr[log->virt_idx];
240 evt_notify.user_data = ev->props.user_data;
241 evt_notify.err_desc = err & 0xFFFF;
242 if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
243 BUG_ON(log->ee != gsi_ctx->per.ee);
244 evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
245 } else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
246 BUG_ON(log->ee != gsi_ctx->per.ee);
247 evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
248 complete(&ev->compl);
249 } else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
250 evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
251 } else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
252 BUG_ON(log->ee != gsi_ctx->per.ee);
253 evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
254 } else {
255 BUG();
256 }
257 if (ev->props.err_cb)
258 ev->props.err_cb(&evt_notify);
259 else
260 WARN_ON(1);
261 break;
262 default:
263 WARN_ON(1);
264 }
265}
266
267static void gsi_handle_glob_ee(int ee)
268{
269 uint32_t val;
270 uint32_t err;
271 struct gsi_per_notify notify;
272 uint32_t clr = ~0;
273
274 val = gsi_readl(gsi_ctx->base +
275 GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(ee));
276
277 notify.user_data = gsi_ctx->per.user_data;
278
279 if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
280 err = gsi_readl(gsi_ctx->base +
281 GSI_EE_n_ERROR_LOG_OFFS(ee));
Amir Levy41644242016-11-03 15:38:09 +0200282 if (gsi_ctx->per.ver >= GSI_VER_1_2)
283 gsi_writel(0, gsi_ctx->base +
284 GSI_EE_n_ERROR_LOG_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200285 gsi_writel(clr, gsi_ctx->base +
286 GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
287 gsi_handle_glob_err(err);
288 }
289
290 if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK) {
291 notify.evt_id = GSI_PER_EVT_GLOB_GP1;
292 gsi_ctx->per.notify_cb(&notify);
293 }
294
295 if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
296 notify.evt_id = GSI_PER_EVT_GLOB_GP2;
297 gsi_ctx->per.notify_cb(&notify);
298 }
299
300 if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK) {
301 notify.evt_id = GSI_PER_EVT_GLOB_GP3;
302 gsi_ctx->per.notify_cb(&notify);
303 }
304
305 gsi_writel(val, gsi_ctx->base +
306 GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(ee));
307}
308
309static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
310{
311 ctx->wp_local += ctx->elem_sz;
312 if (ctx->wp_local == ctx->end)
313 ctx->wp_local = ctx->base;
314}
315
316static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
317{
318 ctx->rp_local += ctx->elem_sz;
319 if (ctx->rp_local == ctx->end)
320 ctx->rp_local = ctx->base;
321}
322
323uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
324{
325 BUG_ON(addr < ctx->base || addr >= ctx->end);
326
327 return (uint32_t)(addr - ctx->base)/ctx->elem_sz;
328}
329
330static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
331 struct gsi_chan_xfer_notify *notify, bool callback)
332{
333 uint32_t ch_id;
334 struct gsi_chan_ctx *ch_ctx;
335 uint16_t rp_idx;
336 uint64_t rp;
337
338 ch_id = evt->chid;
Amir Levy41644242016-11-03 15:38:09 +0200339 if (ch_id >= gsi_ctx->max_ch) {
340 GSIERR("Unexpected ch %d\n", ch_id);
341 WARN_ON(1);
342 return;
343 }
344
Amir Levycdccd632016-10-30 09:36:41 +0200345 ch_ctx = &gsi_ctx->chan[ch_id];
346 BUG_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI);
347 rp = evt->xfer_ptr;
348
349 while (ch_ctx->ring.rp_local != rp) {
350 gsi_incr_ring_rp(&ch_ctx->ring);
351 ch_ctx->stats.completed++;
352 }
353
354 /* the element at RP is also processed */
355 gsi_incr_ring_rp(&ch_ctx->ring);
356 ch_ctx->stats.completed++;
357
358 ch_ctx->ring.rp = ch_ctx->ring.rp_local;
359
360 rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
361 notify->xfer_user_data = ch_ctx->user_data[rp_idx];
362 notify->chan_user_data = ch_ctx->props.chan_user_data;
363 notify->evt_id = evt->code;
364 notify->bytes_xfered = evt->len;
365 if (callback)
366 ch_ctx->props.xfer_cb(notify);
367}
368
369static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
370 struct gsi_chan_xfer_notify *notify, bool callback)
371{
372 struct gsi_xfer_compl_evt *evt;
373 uint16_t idx;
374
375 idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.rp_local);
376 evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
377 idx * ctx->ring.elem_sz);
378 gsi_process_chan(evt, notify, callback);
379 gsi_incr_ring_rp(&ctx->ring);
380 /* recycle this element */
381 gsi_incr_ring_wp(&ctx->ring);
382 ctx->stats.completed++;
383}
384
385static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
386{
387 uint32_t val;
388
389 /* write order MUST be MSB followed by LSB */
390 val = ((ctx->ring.wp_local >> 32) &
391 GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
392 GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
393 gsi_writel(val, gsi_ctx->base +
394 GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id,
395 gsi_ctx->per.ee));
396
397 val = (ctx->ring.wp_local &
398 GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
399 GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
400 gsi_writel(val, gsi_ctx->base +
401 GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(ctx->id,
402 gsi_ctx->per.ee));
403}
404
405static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
406{
407 uint32_t val;
408
409 /* write order MUST be MSB followed by LSB */
410 val = ((ctx->ring.wp_local >> 32) &
411 GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
412 GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
413 gsi_writel(val, gsi_ctx->base +
414 GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
415 gsi_ctx->per.ee));
416
417 val = (ctx->ring.wp_local &
418 GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
419 GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
420 gsi_writel(val, gsi_ctx->base +
421 GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(ctx->props.ch_id,
422 gsi_ctx->per.ee));
423}
424
425static void gsi_handle_ieob(int ee)
426{
427 uint32_t ch;
428 int i;
429 uint64_t rp;
430 struct gsi_evt_ctx *ctx;
431 struct gsi_chan_xfer_notify notify;
432 unsigned long flags;
433 unsigned long cntr;
434 uint32_t msk;
435
436 ch = gsi_readl(gsi_ctx->base +
437 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee));
438 msk = gsi_readl(gsi_ctx->base +
439 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800440 gsi_writel(ch & msk, gsi_ctx->base +
441 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200442
443 for (i = 0; i < 32; i++) {
444 if ((1 << i) & ch & msk) {
Skylar Changcfd46c12016-11-18 10:56:51 -0800445 if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
446 GSIERR("invalid event %d\n", i);
447 break;
448 }
449
Amir Levycdccd632016-10-30 09:36:41 +0200450 ctx = &gsi_ctx->evtr[i];
451 BUG_ON(ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV);
452 spin_lock_irqsave(&ctx->ring.slock, flags);
453check_again:
454 cntr = 0;
455 rp = gsi_readl(gsi_ctx->base +
456 GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
457 rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
458 GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(i, ee))) << 32;
459 ctx->ring.rp = rp;
460 while (ctx->ring.rp_local != rp) {
461 ++cntr;
462 gsi_process_evt_re(ctx, &notify, true);
463 if (ctx->props.exclusive &&
464 atomic_read(&ctx->chan->poll_mode)) {
465 cntr = 0;
466 break;
467 }
468 }
469 gsi_ring_evt_doorbell(ctx);
470 if (cntr != 0)
471 goto check_again;
472 spin_unlock_irqrestore(&ctx->ring.slock, flags);
473 }
474 }
Amir Levycdccd632016-10-30 09:36:41 +0200475}
476
477static void gsi_handle_inter_ee_ch_ctrl(int ee)
478{
479 uint32_t ch;
480 int i;
481
482 ch = gsi_readl(gsi_ctx->base +
483 GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800484 gsi_writel(ch, gsi_ctx->base +
485 GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200486 for (i = 0; i < 32; i++) {
487 if ((1 << i) & ch) {
488 /* not currently expected */
489 GSIERR("ch %u was inter-EE changed\n", i);
490 }
491 }
Amir Levycdccd632016-10-30 09:36:41 +0200492}
493
494static void gsi_handle_inter_ee_ev_ctrl(int ee)
495{
496 uint32_t ch;
497 int i;
498
499 ch = gsi_readl(gsi_ctx->base +
500 GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(ee));
Skylar Changcfd46c12016-11-18 10:56:51 -0800501 gsi_writel(ch, gsi_ctx->base +
502 GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(ee));
Amir Levycdccd632016-10-30 09:36:41 +0200503 for (i = 0; i < 32; i++) {
504 if ((1 << i) & ch) {
505 /* not currently expected */
506 GSIERR("evt %u was inter-EE changed\n", i);
507 }
508 }
Amir Levycdccd632016-10-30 09:36:41 +0200509}
510
511static void gsi_handle_general(int ee)
512{
513 uint32_t val;
514 struct gsi_per_notify notify;
515
516 val = gsi_readl(gsi_ctx->base +
517 GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(ee));
518
519 notify.user_data = gsi_ctx->per.user_data;
520
521 if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK)
522 notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
523
524 if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK)
525 notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
526
527 if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK)
528 notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
529
530 if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK)
531 notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
532
533 if (gsi_ctx->per.notify_cb)
534 gsi_ctx->per.notify_cb(&notify);
535
536 gsi_writel(val, gsi_ctx->base +
537 GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(ee));
538}
539
540#define GSI_ISR_MAX_ITER 50
541
542static void gsi_handle_irq(void)
543{
544 uint32_t type;
545 int ee = gsi_ctx->per.ee;
546 unsigned long cnt = 0;
547
548 while (1) {
549 type = gsi_readl(gsi_ctx->base +
550 GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee));
551
552 if (!type)
553 break;
554
555 GSIDBG("type %x\n", type);
556
557 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
558 gsi_handle_ch_ctrl(ee);
559
560 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK)
561 gsi_handle_ev_ctrl(ee);
562
563 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK)
564 gsi_handle_glob_ee(ee);
565
566 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK)
567 gsi_handle_ieob(ee);
568
569 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK)
570 gsi_handle_inter_ee_ch_ctrl(ee);
571
572 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK)
573 gsi_handle_inter_ee_ev_ctrl(ee);
574
575 if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
576 gsi_handle_general(ee);
577
578 if (++cnt > GSI_ISR_MAX_ITER)
579 BUG();
580 }
581}
582
583static irqreturn_t gsi_isr(int irq, void *ctxt)
584{
585 BUG_ON(ctxt != gsi_ctx);
586
587 if (gsi_ctx->per.req_clk_cb) {
588 bool granted = false;
589
590 gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
591 if (granted) {
592 gsi_handle_irq();
593 gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
594 }
595 } else {
596 gsi_handle_irq();
597 }
598
599 return IRQ_HANDLED;
600}
601
Amir Levy41644242016-11-03 15:38:09 +0200602static uint32_t gsi_get_max_channels(enum gsi_ver ver)
603{
604 uint32_t reg;
605
606 switch (ver) {
607 case GSI_VER_1_0:
608 reg = gsi_readl(gsi_ctx->base +
609 GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
610 reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
611 GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
612 break;
613 case GSI_VER_1_2:
614 reg = gsi_readl(gsi_ctx->base +
615 GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
616 reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
617 GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
618 break;
619 case GSI_VER_1_3:
620 reg = gsi_readl(gsi_ctx->base +
621 GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
622 reg = (reg &
623 GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
624 GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
625 break;
626 default:
627 GSIERR("bad gsi version %d\n", ver);
628 WARN_ON(1);
629 reg = 0;
630 }
631
632 GSIDBG("max channels %d\n", reg);
633
634 return reg;
635}
636
637static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
638{
639 uint32_t reg;
640
641 switch (ver) {
642 case GSI_VER_1_0:
643 reg = gsi_readl(gsi_ctx->base +
644 GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
645 reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
646 GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
647 break;
648 case GSI_VER_1_2:
649 reg = gsi_readl(gsi_ctx->base +
650 GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
651 reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
652 GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
653 break;
654 case GSI_VER_1_3:
655 reg = gsi_readl(gsi_ctx->base +
656 GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
657 reg = (reg &
658 GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
659 GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
660 break;
661 default:
662 GSIERR("bad gsi version %d\n", ver);
663 WARN_ON(1);
664 reg = 0;
665 }
666
667 GSIDBG("max event rings %d\n", reg);
668
669 return reg;
670}
Amir Levycdccd632016-10-30 09:36:41 +0200671int gsi_complete_clk_grant(unsigned long dev_hdl)
672{
673 unsigned long flags;
674
675 if (!gsi_ctx) {
676 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
677 return -GSI_STATUS_NODEV;
678 }
679
680 if (!gsi_ctx->per_registered) {
681 GSIERR("no client registered\n");
682 return -GSI_STATUS_INVALID_PARAMS;
683 }
684
685 if (dev_hdl != (uintptr_t)gsi_ctx) {
686 GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%p\n", dev_hdl,
687 gsi_ctx);
688 return -GSI_STATUS_INVALID_PARAMS;
689 }
690
691 spin_lock_irqsave(&gsi_ctx->slock, flags);
692 gsi_handle_irq();
693 gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
694 spin_unlock_irqrestore(&gsi_ctx->slock, flags);
695
696 return GSI_STATUS_SUCCESS;
697}
698EXPORT_SYMBOL(gsi_complete_clk_grant);
699
700int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
701{
702 int res;
703 uint32_t val;
704
705 if (!gsi_ctx) {
706 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
707 return -GSI_STATUS_NODEV;
708 }
709
710 if (!props || !dev_hdl) {
711 GSIERR("bad params props=%p dev_hdl=%p\n", props, dev_hdl);
712 return -GSI_STATUS_INVALID_PARAMS;
713 }
714
Amir Levy41644242016-11-03 15:38:09 +0200715 if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
716 GSIERR("bad params gsi_ver=%d\n", props->ver);
717 return -GSI_STATUS_INVALID_PARAMS;
718 }
719
Amir Levycdccd632016-10-30 09:36:41 +0200720 if (!props->notify_cb) {
721 GSIERR("notify callback must be provided\n");
722 return -GSI_STATUS_INVALID_PARAMS;
723 }
724
725 if (props->req_clk_cb && !props->rel_clk_cb) {
726 GSIERR("rel callback must be provided\n");
727 return -GSI_STATUS_INVALID_PARAMS;
728 }
729
730 if (gsi_ctx->per_registered) {
731 GSIERR("per already registered\n");
732 return -GSI_STATUS_UNSUPPORTED_OP;
733 }
734
735 spin_lock_init(&gsi_ctx->slock);
736 if (props->intr == GSI_INTR_IRQ) {
737 if (!props->irq) {
738 GSIERR("bad irq specified %u\n", props->irq);
739 return -GSI_STATUS_INVALID_PARAMS;
740 }
741
742 res = devm_request_irq(gsi_ctx->dev, props->irq,
743 (irq_handler_t) gsi_isr,
744 props->req_clk_cb ? IRQF_TRIGGER_RISING :
745 IRQF_TRIGGER_HIGH,
746 "gsi",
747 gsi_ctx);
748 if (res) {
749 GSIERR("failed to register isr for %u\n", props->irq);
750 return -GSI_STATUS_ERROR;
751 }
752
753 res = enable_irq_wake(props->irq);
754 if (res)
755 GSIERR("failed to enable wake irq %u\n", props->irq);
756 else
757 GSIERR("GSI irq is wake enabled %u\n", props->irq);
758
759 } else {
760 GSIERR("do not support interrupt type %u\n", props->intr);
761 return -GSI_STATUS_UNSUPPORTED_OP;
762 }
763
764 gsi_ctx->base = devm_ioremap_nocache(gsi_ctx->dev, props->phys_addr,
765 props->size);
766 if (!gsi_ctx->base) {
767 GSIERR("failed to remap GSI HW\n");
768 devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
769 return -GSI_STATUS_RES_ALLOC_FAILURE;
770 }
771
772 gsi_ctx->per = *props;
773 gsi_ctx->per_registered = true;
774 mutex_init(&gsi_ctx->mlock);
775 atomic_set(&gsi_ctx->num_chan, 0);
776 atomic_set(&gsi_ctx->num_evt_ring, 0);
Amir Levy41644242016-11-03 15:38:09 +0200777 gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
778 if (gsi_ctx->max_ch == 0) {
779 devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
780 devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
781 GSIERR("failed to get max channels\n");
782 return -GSI_STATUS_ERROR;
783 }
784 gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
785 if (gsi_ctx->max_ev == 0) {
786 devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
787 devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
788 GSIERR("failed to get max event rings\n");
789 return -GSI_STATUS_ERROR;
790 }
791
792 /* bitmap is max events excludes reserved events */
793 gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
794 gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^
795 ((1 << GSI_MHI_ER_START) - 1);
Amir Levycdccd632016-10-30 09:36:41 +0200796
797 /*
798 * enable all interrupts but GSI_BREAK_POINT.
799 * Inter EE commands / interrupt are no supported.
800 */
801 __gsi_config_type_irq(props->ee, ~0, ~0);
802 __gsi_config_ch_irq(props->ee, ~0, ~0);
803 __gsi_config_evt_irq(props->ee, ~0, ~0);
804 __gsi_config_ieob_irq(props->ee, ~0, ~0);
805 __gsi_config_glob_irq(props->ee, ~0, ~0);
806 __gsi_config_gen_irq(props->ee, ~0,
807 ~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK);
808
809 gsi_writel(props->intr, gsi_ctx->base +
810 GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
811
812 val = gsi_readl(gsi_ctx->base +
813 GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
814 if (val & GSI_EE_n_GSI_STATUS_ENABLED_BMSK)
815 gsi_ctx->enabled = true;
816 else
817 GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
818
Amir Levy41644242016-11-03 15:38:09 +0200819 if (gsi_ctx->per.ver >= GSI_VER_1_2)
820 gsi_writel(0, gsi_ctx->base +
821 GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
822
Amir Levycdccd632016-10-30 09:36:41 +0200823 *dev_hdl = (uintptr_t)gsi_ctx;
824
825 return GSI_STATUS_SUCCESS;
826}
827EXPORT_SYMBOL(gsi_register_device);
828
829int gsi_write_device_scratch(unsigned long dev_hdl,
830 struct gsi_device_scratch *val)
831{
832 if (!gsi_ctx) {
833 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
834 return -GSI_STATUS_NODEV;
835 }
836
837 if (!gsi_ctx->per_registered) {
838 GSIERR("no client registered\n");
839 return -GSI_STATUS_INVALID_PARAMS;
840 }
841
842 if (dev_hdl != (uintptr_t)gsi_ctx) {
843 GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%p\n", dev_hdl,
844 gsi_ctx);
845 return -GSI_STATUS_INVALID_PARAMS;
846 }
847
848 if (val->max_usb_pkt_size_valid &&
849 val->max_usb_pkt_size != 1024 &&
850 val->max_usb_pkt_size != 512) {
851 GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
852 val->max_usb_pkt_size);
853 return -GSI_STATUS_INVALID_PARAMS;
854 }
855
856 mutex_lock(&gsi_ctx->mlock);
857 if (val->mhi_base_chan_idx_valid)
858 gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
859 val->mhi_base_chan_idx;
860 if (val->max_usb_pkt_size_valid)
861 gsi_ctx->scratch.word0.s.max_usb_pkt_size =
862 (val->max_usb_pkt_size == 1024) ? 1 : 0;
863 gsi_writel(gsi_ctx->scratch.word0.val,
864 gsi_ctx->base +
865 GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
866 mutex_unlock(&gsi_ctx->mlock);
867
868 return GSI_STATUS_SUCCESS;
869}
870EXPORT_SYMBOL(gsi_write_device_scratch);
871
872int gsi_deregister_device(unsigned long dev_hdl, bool force)
873{
874 if (!gsi_ctx) {
875 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
876 return -GSI_STATUS_NODEV;
877 }
878
879 if (!gsi_ctx->per_registered) {
880 GSIERR("no client registered\n");
881 return -GSI_STATUS_INVALID_PARAMS;
882 }
883
884 if (dev_hdl != (uintptr_t)gsi_ctx) {
885 GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%p\n", dev_hdl,
886 gsi_ctx);
887 return -GSI_STATUS_INVALID_PARAMS;
888 }
889
890 if (!force && atomic_read(&gsi_ctx->num_chan)) {
891 GSIERR("%u channels are allocated\n",
892 atomic_read(&gsi_ctx->num_chan));
893 return -GSI_STATUS_UNSUPPORTED_OP;
894 }
895
896 if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
897 GSIERR("%u evt rings are allocated\n",
898 atomic_read(&gsi_ctx->num_evt_ring));
899 return -GSI_STATUS_UNSUPPORTED_OP;
900 }
901
902 /* disable all interrupts */
903 __gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
904 __gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
905 __gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
906 __gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
907 __gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
908 __gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
909
910 devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
911 devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
912 memset(gsi_ctx, 0, sizeof(*gsi_ctx));
913
914 return GSI_STATUS_SUCCESS;
915}
916EXPORT_SYMBOL(gsi_deregister_device);
917
918static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
919 uint8_t evt_id, unsigned int ee)
920{
921 uint32_t val;
922
923 GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
924 props->re_size);
925
926 val = (((props->intf << GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT) &
927 GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK) |
928 ((props->intr << GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT) &
929 GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK) |
930 ((props->re_size << GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
931 & GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
932
933 gsi_writel(val, gsi_ctx->base +
934 GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(evt_id, ee));
935
936 val = (props->ring_len & GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK) <<
937 GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
938 gsi_writel(val, gsi_ctx->base +
939 GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
940
941 val = (props->ring_base_addr &
942 GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
943 GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
944 gsi_writel(val, gsi_ctx->base +
945 GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(evt_id, ee));
946
947 val = ((props->ring_base_addr >> 32) &
948 GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
949 GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
950 gsi_writel(val, gsi_ctx->base +
951 GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(evt_id, ee));
952
953 val = (((props->int_modt << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT) &
954 GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK) |
955 ((props->int_modc << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT) &
956 GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK));
957 gsi_writel(val, gsi_ctx->base +
958 GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(evt_id, ee));
959
960 val = (props->intvec & GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK) <<
961 GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT;
962 gsi_writel(val, gsi_ctx->base +
963 GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(evt_id, ee));
964
965 val = (props->msi_addr & GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK) <<
966 GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT;
967 gsi_writel(val, gsi_ctx->base +
968 GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_id, ee));
969
970 val = ((props->msi_addr >> 32) &
971 GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK) <<
972 GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT;
973 gsi_writel(val, gsi_ctx->base +
974 GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_id, ee));
975
976 val = (props->rp_update_addr &
977 GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK) <<
978 GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT;
979 gsi_writel(val, gsi_ctx->base +
980 GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_id, ee));
981
982 val = ((props->rp_update_addr >> 32) &
983 GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK) <<
984 GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT;
985 gsi_writel(val, gsi_ctx->base +
986 GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_id, ee));
987}
988
989static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
990 struct gsi_ring_ctx *ctx)
991{
992 ctx->base_va = (uintptr_t)props->ring_base_vaddr;
993 ctx->base = props->ring_base_addr;
994 ctx->wp = ctx->base;
995 ctx->rp = ctx->base;
996 ctx->wp_local = ctx->base;
997 ctx->rp_local = ctx->base;
998 ctx->len = props->ring_len;
999 ctx->elem_sz = props->re_size;
1000 ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
1001 ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
1002}
1003
1004static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
1005{
1006 unsigned long flags;
1007
1008 spin_lock_irqsave(&ctx->ring.slock, flags);
1009 memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
1010 ctx->ring.wp_local = ctx->ring.base +
1011 ctx->ring.max_num_elem * ctx->ring.elem_sz;
1012 gsi_ring_evt_doorbell(ctx);
1013 spin_unlock_irqrestore(&ctx->ring.slock, flags);
1014}
1015
1016static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
1017{
1018 uint64_t ra;
1019
1020 if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
1021 props->ring_len % 4) ||
1022 (props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
1023 props->ring_len % 16)) {
1024 GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
1025 props->ring_len, props->re_size);
1026 return -GSI_STATUS_INVALID_PARAMS;
1027 }
1028
1029 ra = props->ring_base_addr;
1030 do_div(ra, roundup_pow_of_two(props->ring_len));
1031
1032 if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
1033 GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
1034 props->ring_base_addr,
1035 roundup_pow_of_two(props->ring_len));
1036 return -GSI_STATUS_INVALID_PARAMS;
1037 }
1038
1039 if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
1040 !props->ring_base_vaddr) {
1041 GSIERR("protocol %u requires ring base VA\n", props->intf);
1042 return -GSI_STATUS_INVALID_PARAMS;
1043 }
1044
1045 if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
1046 (!props->evchid_valid ||
1047 props->evchid > GSI_MHI_ER_END ||
1048 props->evchid < GSI_MHI_ER_START)) {
1049 GSIERR("MHI requires evchid valid=%d val=%u\n",
1050 props->evchid_valid, props->evchid);
1051 return -GSI_STATUS_INVALID_PARAMS;
1052 }
1053
1054 if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
1055 props->evchid_valid) {
1056 GSIERR("protocol %u cannot specify evchid\n", props->intf);
1057 return -GSI_STATUS_INVALID_PARAMS;
1058 }
1059
1060 if (!props->err_cb) {
1061 GSIERR("err callback must be provided\n");
1062 return -GSI_STATUS_INVALID_PARAMS;
1063 }
1064
1065 return GSI_STATUS_SUCCESS;
1066}
1067
1068int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
1069 unsigned long *evt_ring_hdl)
1070{
1071 unsigned long evt_id;
1072 enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
1073 uint32_t val;
1074 struct gsi_evt_ctx *ctx;
1075 int res;
1076 int ee = gsi_ctx->per.ee;
1077 unsigned long flags;
1078
1079 if (!gsi_ctx) {
1080 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1081 return -GSI_STATUS_NODEV;
1082 }
1083
1084 if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
1085 GSIERR("bad params props=%p dev_hdl=0x%lx evt_ring_hdl=%p\n",
1086 props, dev_hdl, evt_ring_hdl);
1087 return -GSI_STATUS_INVALID_PARAMS;
1088 }
1089
1090 if (gsi_validate_evt_ring_props(props)) {
1091 GSIERR("invalid params\n");
1092 return -GSI_STATUS_INVALID_PARAMS;
1093 }
1094
1095 if (!props->evchid_valid) {
1096 mutex_lock(&gsi_ctx->mlock);
1097 evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
1098 sizeof(unsigned long) * BITS_PER_BYTE);
1099 if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
1100 GSIERR("failed to alloc event ID\n");
1101 mutex_unlock(&gsi_ctx->mlock);
1102 return -GSI_STATUS_RES_ALLOC_FAILURE;
1103 }
1104 set_bit(evt_id, &gsi_ctx->evt_bmap);
1105 mutex_unlock(&gsi_ctx->mlock);
1106 } else {
1107 evt_id = props->evchid;
1108 }
1109 GSIDBG("Using %lu as virt evt id\n", evt_id);
1110
1111 ctx = &gsi_ctx->evtr[evt_id];
1112 memset(ctx, 0, sizeof(*ctx));
1113 mutex_init(&ctx->mlock);
1114 init_completion(&ctx->compl);
1115 atomic_set(&ctx->chan_ref_cnt, 0);
1116 ctx->props = *props;
1117
1118 mutex_lock(&gsi_ctx->mlock);
1119 val = (((evt_id << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
1120 GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
1121 ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
1122 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
1123 gsi_writel(val, gsi_ctx->base +
1124 GSI_EE_n_EV_CH_CMD_OFFS(ee));
1125 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1126 if (res == 0) {
1127 GSIERR("evt_id=%lu timed out\n", evt_id);
1128 if (!props->evchid_valid)
1129 clear_bit(evt_id, &gsi_ctx->evt_bmap);
1130 mutex_unlock(&gsi_ctx->mlock);
1131 return -GSI_STATUS_TIMED_OUT;
1132 }
1133
1134 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1135 GSIERR("evt_id=%lu allocation failed state=%u\n",
1136 evt_id, ctx->state);
1137 if (!props->evchid_valid)
1138 clear_bit(evt_id, &gsi_ctx->evt_bmap);
1139 mutex_unlock(&gsi_ctx->mlock);
1140 return -GSI_STATUS_RES_ALLOC_FAILURE;
1141 }
1142
1143 gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
1144
1145 spin_lock_init(&ctx->ring.slock);
1146 gsi_init_evt_ring(props, &ctx->ring);
1147
1148 ctx->id = evt_id;
1149 *evt_ring_hdl = evt_id;
1150 atomic_inc(&gsi_ctx->num_evt_ring);
1151 if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
1152 gsi_prime_evt_ring(ctx);
1153 mutex_unlock(&gsi_ctx->mlock);
1154
1155 spin_lock_irqsave(&gsi_ctx->slock, flags);
1156 gsi_writel(1 << evt_id, gsi_ctx->base +
1157 GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
1158 if (props->intf != GSI_EVT_CHTYPE_GPI_EV)
1159 __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
1160 else
1161 __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
1162 spin_unlock_irqrestore(&gsi_ctx->slock, flags);
1163
1164 return GSI_STATUS_SUCCESS;
1165}
1166EXPORT_SYMBOL(gsi_alloc_evt_ring);
1167
1168static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
1169 union __packed gsi_evt_scratch val)
1170{
1171 gsi_writel(val.data.word1, gsi_ctx->base +
1172 GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(evt_ring_hdl,
1173 gsi_ctx->per.ee));
1174 gsi_writel(val.data.word2, gsi_ctx->base +
1175 GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(evt_ring_hdl,
1176 gsi_ctx->per.ee));
1177}
1178
1179int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
1180 union __packed gsi_evt_scratch val)
1181{
1182 struct gsi_evt_ctx *ctx;
1183
1184 if (!gsi_ctx) {
1185 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1186 return -GSI_STATUS_NODEV;
1187 }
1188
Amir Levy41644242016-11-03 15:38:09 +02001189 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001190 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1191 return -GSI_STATUS_INVALID_PARAMS;
1192 }
1193
1194 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1195
1196 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1197 GSIERR("bad state %d\n",
1198 gsi_ctx->evtr[evt_ring_hdl].state);
1199 return -GSI_STATUS_UNSUPPORTED_OP;
1200 }
1201
1202 mutex_lock(&ctx->mlock);
1203 ctx->scratch = val;
1204 __gsi_write_evt_ring_scratch(evt_ring_hdl, val);
1205 mutex_unlock(&ctx->mlock);
1206
1207 return GSI_STATUS_SUCCESS;
1208}
1209EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
1210
1211int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
1212{
1213 uint32_t val;
1214 enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
1215 struct gsi_evt_ctx *ctx;
1216 int res;
1217
1218 if (!gsi_ctx) {
1219 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1220 return -GSI_STATUS_NODEV;
1221 }
1222
Amir Levy41644242016-11-03 15:38:09 +02001223 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001224 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1225 return -GSI_STATUS_INVALID_PARAMS;
1226 }
1227
1228 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1229
1230 if (atomic_read(&ctx->chan_ref_cnt)) {
1231 GSIERR("%d channels still using this event ring\n",
1232 atomic_read(&ctx->chan_ref_cnt));
1233 return -GSI_STATUS_UNSUPPORTED_OP;
1234 }
1235
1236 /* TODO: add check for ERROR state */
1237 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1238 GSIERR("bad state %d\n", ctx->state);
1239 return -GSI_STATUS_UNSUPPORTED_OP;
1240 }
1241
1242 mutex_lock(&gsi_ctx->mlock);
1243 val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
1244 GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
1245 ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
1246 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
1247 gsi_writel(val, gsi_ctx->base +
1248 GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
1249 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1250 if (res == 0) {
1251 GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
1252 mutex_unlock(&gsi_ctx->mlock);
1253 return -GSI_STATUS_TIMED_OUT;
1254 }
1255
1256 if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
1257 GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
1258 ctx->state);
1259 BUG();
1260 }
1261 mutex_unlock(&gsi_ctx->mlock);
1262
1263 if (!ctx->props.evchid_valid) {
1264 mutex_lock(&gsi_ctx->mlock);
1265 clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
1266 mutex_unlock(&gsi_ctx->mlock);
1267 }
1268 atomic_dec(&gsi_ctx->num_evt_ring);
1269
1270 return GSI_STATUS_SUCCESS;
1271}
1272EXPORT_SYMBOL(gsi_dealloc_evt_ring);
1273
1274int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
1275 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1276{
1277 struct gsi_evt_ctx *ctx;
1278
1279 if (!gsi_ctx) {
1280 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1281 return -GSI_STATUS_NODEV;
1282 }
1283
1284 if (!db_addr_wp_msb || !db_addr_wp_lsb) {
1285 GSIERR("bad params msb=%p lsb=%p\n", db_addr_wp_msb,
1286 db_addr_wp_lsb);
1287 return -GSI_STATUS_INVALID_PARAMS;
1288 }
1289
Amir Levy41644242016-11-03 15:38:09 +02001290 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001291 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1292 return -GSI_STATUS_INVALID_PARAMS;
1293 }
1294
1295 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1296
1297 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1298 GSIERR("bad state %d\n",
1299 gsi_ctx->evtr[evt_ring_hdl].state);
1300 return -GSI_STATUS_UNSUPPORTED_OP;
1301 }
1302
1303 *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
1304 GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
1305 *db_addr_wp_msb = gsi_ctx->per.phys_addr +
1306 GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
1307
1308 return GSI_STATUS_SUCCESS;
1309}
1310EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
1311
1312int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
1313{
1314 uint32_t val;
1315 enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
1316 struct gsi_evt_ctx *ctx;
1317 int res;
1318
1319 if (!gsi_ctx) {
1320 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1321 return -GSI_STATUS_NODEV;
1322 }
1323
Amir Levy41644242016-11-03 15:38:09 +02001324 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001325 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1326 return -GSI_STATUS_INVALID_PARAMS;
1327 }
1328
1329 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1330
1331 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1332 GSIERR("bad state %d\n", ctx->state);
1333 return -GSI_STATUS_UNSUPPORTED_OP;
1334 }
1335
1336 mutex_lock(&gsi_ctx->mlock);
1337 val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
1338 GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
1339 ((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
1340 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
1341 gsi_writel(val, gsi_ctx->base +
1342 GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
1343 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1344 if (res == 0) {
1345 GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
1346 mutex_unlock(&gsi_ctx->mlock);
1347 return -GSI_STATUS_TIMED_OUT;
1348 }
1349
1350 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1351 GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
1352 ctx->state);
1353 BUG();
1354 }
1355
1356 gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
1357 gsi_init_evt_ring(&ctx->props, &ctx->ring);
1358
1359 /* restore scratch */
1360 __gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
1361
1362 if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
1363 gsi_prime_evt_ring(ctx);
1364 mutex_unlock(&gsi_ctx->mlock);
1365
1366 return GSI_STATUS_SUCCESS;
1367}
1368EXPORT_SYMBOL(gsi_reset_evt_ring);
1369
1370int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
1371 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1372{
1373 struct gsi_evt_ctx *ctx;
1374
1375 if (!gsi_ctx) {
1376 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1377 return -GSI_STATUS_NODEV;
1378 }
1379
1380 if (!props || !scr) {
1381 GSIERR("bad params props=%p scr=%p\n", props, scr);
1382 return -GSI_STATUS_INVALID_PARAMS;
1383 }
1384
Amir Levy41644242016-11-03 15:38:09 +02001385 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001386 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1387 return -GSI_STATUS_INVALID_PARAMS;
1388 }
1389
1390 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1391
1392 if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
1393 GSIERR("bad state %d\n", ctx->state);
1394 return -GSI_STATUS_UNSUPPORTED_OP;
1395 }
1396
1397 mutex_lock(&ctx->mlock);
1398 *props = ctx->props;
1399 *scr = ctx->scratch;
1400 mutex_unlock(&ctx->mlock);
1401
1402 return GSI_STATUS_SUCCESS;
1403}
1404EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
1405
1406int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
1407 struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
1408{
1409 struct gsi_evt_ctx *ctx;
1410
1411 if (!gsi_ctx) {
1412 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1413 return -GSI_STATUS_NODEV;
1414 }
1415
1416 if (!props || gsi_validate_evt_ring_props(props)) {
1417 GSIERR("bad params props=%p\n", props);
1418 return -GSI_STATUS_INVALID_PARAMS;
1419 }
1420
Amir Levy41644242016-11-03 15:38:09 +02001421 if (evt_ring_hdl >= gsi_ctx->max_ev) {
Amir Levycdccd632016-10-30 09:36:41 +02001422 GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
1423 return -GSI_STATUS_INVALID_PARAMS;
1424 }
1425
1426 ctx = &gsi_ctx->evtr[evt_ring_hdl];
1427
1428 if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
1429 GSIERR("bad state %d\n", ctx->state);
1430 return -GSI_STATUS_UNSUPPORTED_OP;
1431 }
1432
1433 if (ctx->props.exclusive != props->exclusive) {
1434 GSIERR("changing immutable fields not supported\n");
1435 return -GSI_STATUS_UNSUPPORTED_OP;
1436 }
1437
1438 mutex_lock(&ctx->mlock);
1439 ctx->props = *props;
1440 if (scr)
1441 ctx->scratch = *scr;
1442 mutex_unlock(&ctx->mlock);
1443
1444 return gsi_reset_evt_ring(evt_ring_hdl);
1445}
1446EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
1447
1448static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
1449 uint8_t erindex)
1450{
1451 uint32_t val;
1452
1453 val = (((props->prot << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT)
1454 & GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK) |
1455 ((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) &
1456 GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) |
1457 ((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) &
1458 GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) |
1459 ((props->re_size << GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
1460 & GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
1461 gsi_writel(val, gsi_ctx->base +
1462 GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(props->ch_id, ee));
1463
1464 val = (props->ring_len & GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK) <<
1465 GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
1466 gsi_writel(val, gsi_ctx->base +
1467 GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(props->ch_id, ee));
1468
1469 val = (props->ring_base_addr &
1470 GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
1471 GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
1472 gsi_writel(val, gsi_ctx->base +
1473 GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(props->ch_id, ee));
1474
1475 val = ((props->ring_base_addr >> 32) &
1476 GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
1477 GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
1478 gsi_writel(val, gsi_ctx->base +
1479 GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee));
1480
1481 val = (((props->low_weight << GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
1482 GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
1483 ((props->max_prefetch <<
1484 GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
1485 GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
1486 ((props->use_db_eng << GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
1487 GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK));
1488 gsi_writel(val, gsi_ctx->base +
1489 GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
1490}
1491
1492static void gsi_init_chan_ring(struct gsi_chan_props *props,
1493 struct gsi_ring_ctx *ctx)
1494{
1495 ctx->base_va = (uintptr_t)props->ring_base_vaddr;
1496 ctx->base = props->ring_base_addr;
1497 ctx->wp = ctx->base;
1498 ctx->rp = ctx->base;
1499 ctx->wp_local = ctx->base;
1500 ctx->rp_local = ctx->base;
1501 ctx->len = props->ring_len;
1502 ctx->elem_sz = props->re_size;
1503 ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
1504 ctx->end = ctx->base + (ctx->max_num_elem + 1) *
1505 ctx->elem_sz;
1506}
1507
1508static int gsi_validate_channel_props(struct gsi_chan_props *props)
1509{
1510 uint64_t ra;
1511
Amir Levy41644242016-11-03 15:38:09 +02001512 if (props->ch_id >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001513 GSIERR("ch_id %u invalid\n", props->ch_id);
1514 return -GSI_STATUS_INVALID_PARAMS;
1515 }
1516
1517 if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
1518 props->ring_len % 4) ||
1519 (props->re_size == GSI_CHAN_RE_SIZE_16B &&
1520 props->ring_len % 16) ||
1521 (props->re_size == GSI_CHAN_RE_SIZE_32B &&
1522 props->ring_len % 32)) {
1523 GSIERR("bad params ring_len %u not a multiple of re size %u\n",
1524 props->ring_len, props->re_size);
1525 return -GSI_STATUS_INVALID_PARAMS;
1526 }
1527
1528 ra = props->ring_base_addr;
1529 do_div(ra, roundup_pow_of_two(props->ring_len));
1530
1531 if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
1532 GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
1533 props->ring_base_addr,
1534 roundup_pow_of_two(props->ring_len));
1535 return -GSI_STATUS_INVALID_PARAMS;
1536 }
1537
1538 if (props->prot == GSI_CHAN_PROT_GPI &&
1539 !props->ring_base_vaddr) {
1540 GSIERR("protocol %u requires ring base VA\n", props->prot);
1541 return -GSI_STATUS_INVALID_PARAMS;
1542 }
1543
1544 if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
1545 GSIERR("invalid channel low weight %u\n", props->low_weight);
1546 return -GSI_STATUS_INVALID_PARAMS;
1547 }
1548
1549 if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
1550 GSIERR("xfer callback must be provided\n");
1551 return -GSI_STATUS_INVALID_PARAMS;
1552 }
1553
1554 if (!props->err_cb) {
1555 GSIERR("err callback must be provided\n");
1556 return -GSI_STATUS_INVALID_PARAMS;
1557 }
1558
1559 return GSI_STATUS_SUCCESS;
1560}
1561
1562int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
1563 unsigned long *chan_hdl)
1564{
1565 struct gsi_chan_ctx *ctx;
1566 uint32_t val;
1567 int res;
1568 int ee = gsi_ctx->per.ee;
1569 enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
1570 uint8_t erindex;
1571 void **user_data;
1572
1573 if (!gsi_ctx) {
1574 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1575 return -GSI_STATUS_NODEV;
1576 }
1577
1578 if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
1579 GSIERR("bad params props=%p dev_hdl=0x%lx chan_hdl=%p\n",
1580 props, dev_hdl, chan_hdl);
1581 return -GSI_STATUS_INVALID_PARAMS;
1582 }
1583
1584 if (gsi_validate_channel_props(props)) {
1585 GSIERR("bad params\n");
1586 return -GSI_STATUS_INVALID_PARAMS;
1587 }
1588
1589 if (props->evt_ring_hdl != ~0 &&
1590 atomic_read(&gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
1591 gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive) {
1592 GSIERR("evt ring=%lu already in exclusive use chan_hdl=%p\n",
1593 props->evt_ring_hdl, chan_hdl);
1594 return -GSI_STATUS_UNSUPPORTED_OP;
1595 }
1596
1597
1598 ctx = &gsi_ctx->chan[props->ch_id];
1599 if (ctx->allocated) {
1600 GSIERR("chan %d already allocated\n", props->ch_id);
1601 return -GSI_STATUS_NODEV;
1602 }
1603
1604 memset(ctx, 0, sizeof(*ctx));
1605 user_data = devm_kzalloc(gsi_ctx->dev,
1606 (props->ring_len / props->re_size) * sizeof(void *),
1607 GFP_KERNEL);
1608 if (user_data == NULL) {
1609 GSIERR("%s:%d gsi context not allocated\n", __func__, __LINE__);
1610 return -GSI_STATUS_RES_ALLOC_FAILURE;
1611 }
1612
1613 mutex_init(&ctx->mlock);
1614 init_completion(&ctx->compl);
1615 atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
1616 ctx->props = *props;
1617
1618 mutex_lock(&gsi_ctx->mlock);
Skylar Chang22ecb822016-10-21 10:15:04 -07001619 gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
Amir Levycdccd632016-10-30 09:36:41 +02001620 val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
1621 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
1622 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
1623 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
1624 gsi_writel(val, gsi_ctx->base +
1625 GSI_EE_n_GSI_CH_CMD_OFFS(ee));
1626 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1627 if (res == 0) {
1628 GSIERR("chan_hdl=%u timed out\n", props->ch_id);
1629 mutex_unlock(&gsi_ctx->mlock);
1630 devm_kfree(gsi_ctx->dev, user_data);
1631 return -GSI_STATUS_TIMED_OUT;
1632 }
1633 if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
1634 GSIERR("chan_hdl=%u allocation failed state=%d\n",
1635 props->ch_id, ctx->state);
1636 mutex_unlock(&gsi_ctx->mlock);
1637 devm_kfree(gsi_ctx->dev, user_data);
1638 return -GSI_STATUS_RES_ALLOC_FAILURE;
1639 }
1640 mutex_unlock(&gsi_ctx->mlock);
1641
1642 erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
1643 GSI_NO_EVT_ERINDEX;
1644 if (erindex != GSI_NO_EVT_ERINDEX) {
1645 ctx->evtr = &gsi_ctx->evtr[erindex];
1646 atomic_inc(&ctx->evtr->chan_ref_cnt);
1647 if (ctx->evtr->props.exclusive)
1648 ctx->evtr->chan = ctx;
1649 }
1650
1651 gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
1652
1653 spin_lock_init(&ctx->ring.slock);
1654 gsi_init_chan_ring(props, &ctx->ring);
1655 if (!props->max_re_expected)
1656 ctx->props.max_re_expected = ctx->ring.max_num_elem;
1657 ctx->user_data = user_data;
1658 *chan_hdl = props->ch_id;
1659 ctx->allocated = true;
1660 ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
1661 atomic_inc(&gsi_ctx->num_chan);
1662
1663 return GSI_STATUS_SUCCESS;
1664}
1665EXPORT_SYMBOL(gsi_alloc_channel);
1666
1667static void __gsi_write_channel_scratch(unsigned long chan_hdl,
1668 union __packed gsi_channel_scratch val)
1669{
1670 uint32_t reg;
1671
1672 gsi_writel(val.data.word1, gsi_ctx->base +
1673 GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
1674 gsi_ctx->per.ee));
1675 gsi_writel(val.data.word2, gsi_ctx->base +
1676 GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
1677 gsi_ctx->per.ee));
1678 gsi_writel(val.data.word3, gsi_ctx->base +
1679 GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
1680 gsi_ctx->per.ee));
1681 /* below sequence is not atomic. assumption is sequencer specific fields
1682 * will remain unchanged across this sequence
1683 */
1684 reg = gsi_readl(gsi_ctx->base +
1685 GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
1686 gsi_ctx->per.ee));
1687 reg &= 0xFFFF;
1688 reg |= (val.data.word4 & 0xFFFF0000);
1689 gsi_writel(reg, gsi_ctx->base +
1690 GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
1691 gsi_ctx->per.ee));
1692}
1693
1694int gsi_write_channel_scratch(unsigned long chan_hdl,
1695 union __packed gsi_channel_scratch val)
1696{
1697 struct gsi_chan_ctx *ctx;
1698
1699 if (!gsi_ctx) {
1700 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1701 return -GSI_STATUS_NODEV;
1702 }
1703
Amir Levy41644242016-11-03 15:38:09 +02001704 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001705 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1706 return -GSI_STATUS_INVALID_PARAMS;
1707 }
1708
1709 if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
1710 gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
1711 GSIERR("bad state %d\n",
1712 gsi_ctx->chan[chan_hdl].state);
1713 return -GSI_STATUS_UNSUPPORTED_OP;
1714 }
1715
1716 ctx = &gsi_ctx->chan[chan_hdl];
1717
1718 mutex_lock(&ctx->mlock);
1719 ctx->scratch = val;
1720 __gsi_write_channel_scratch(chan_hdl, val);
1721 mutex_unlock(&ctx->mlock);
1722
1723 return GSI_STATUS_SUCCESS;
1724}
1725EXPORT_SYMBOL(gsi_write_channel_scratch);
1726
1727int gsi_query_channel_db_addr(unsigned long chan_hdl,
1728 uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
1729{
1730 if (!gsi_ctx) {
1731 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1732 return -GSI_STATUS_NODEV;
1733 }
1734
1735 if (!db_addr_wp_msb || !db_addr_wp_lsb) {
1736 GSIERR("bad params msb=%p lsb=%p\n", db_addr_wp_msb,
1737 db_addr_wp_lsb);
1738 return -GSI_STATUS_INVALID_PARAMS;
1739 }
1740
Amir Levy41644242016-11-03 15:38:09 +02001741 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001742 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1743 return -GSI_STATUS_INVALID_PARAMS;
1744 }
1745
1746 if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
1747 GSIERR("bad state %d\n",
1748 gsi_ctx->chan[chan_hdl].state);
1749 return -GSI_STATUS_UNSUPPORTED_OP;
1750 }
1751
1752 *db_addr_wp_lsb = gsi_ctx->per.phys_addr +
1753 GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(chan_hdl, gsi_ctx->per.ee);
1754 *db_addr_wp_msb = gsi_ctx->per.phys_addr +
1755 GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(chan_hdl, gsi_ctx->per.ee);
1756
1757 return GSI_STATUS_SUCCESS;
1758}
1759EXPORT_SYMBOL(gsi_query_channel_db_addr);
1760
1761int gsi_start_channel(unsigned long chan_hdl)
1762{
1763 enum gsi_ch_cmd_opcode op = GSI_CH_START;
1764 int res;
1765 uint32_t val;
1766 struct gsi_chan_ctx *ctx;
1767
1768 if (!gsi_ctx) {
1769 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1770 return -GSI_STATUS_NODEV;
1771 }
1772
Amir Levy41644242016-11-03 15:38:09 +02001773 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001774 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1775 return -GSI_STATUS_INVALID_PARAMS;
1776 }
1777
1778 ctx = &gsi_ctx->chan[chan_hdl];
1779
1780 if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
1781 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
1782 ctx->state != GSI_CHAN_STATE_STOPPED) {
1783 GSIERR("bad state %d\n", ctx->state);
1784 return -GSI_STATUS_UNSUPPORTED_OP;
1785 }
1786
1787 mutex_lock(&gsi_ctx->mlock);
1788 init_completion(&ctx->compl);
1789
Skylar Chang22ecb822016-10-21 10:15:04 -07001790 gsi_ctx->ch_dbg[chan_hdl].ch_start++;
Amir Levycdccd632016-10-30 09:36:41 +02001791 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
1792 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
1793 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
1794 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
1795 gsi_writel(val, gsi_ctx->base +
1796 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
1797 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1798 if (res == 0) {
1799 GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
1800 mutex_unlock(&gsi_ctx->mlock);
1801 return -GSI_STATUS_TIMED_OUT;
1802 }
1803 if (ctx->state != GSI_CHAN_STATE_STARTED) {
1804 GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
1805 BUG();
1806 }
1807
1808 mutex_unlock(&gsi_ctx->mlock);
1809
1810 return GSI_STATUS_SUCCESS;
1811}
1812EXPORT_SYMBOL(gsi_start_channel);
1813
1814int gsi_stop_channel(unsigned long chan_hdl)
1815{
1816 enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
1817 int res;
1818 uint32_t val;
1819 struct gsi_chan_ctx *ctx;
1820
1821 if (!gsi_ctx) {
1822 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1823 return -GSI_STATUS_NODEV;
1824 }
1825
Amir Levy41644242016-11-03 15:38:09 +02001826 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001827 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1828 return -GSI_STATUS_INVALID_PARAMS;
1829 }
1830
1831 ctx = &gsi_ctx->chan[chan_hdl];
1832
1833 if (ctx->state == GSI_CHAN_STATE_STOPPED) {
1834 GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
1835 return GSI_STATUS_SUCCESS;
1836 }
1837
1838 if (ctx->state != GSI_CHAN_STATE_STARTED &&
1839 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
1840 ctx->state != GSI_CHAN_STATE_ERROR) {
1841 GSIERR("bad state %d\n", ctx->state);
1842 return -GSI_STATUS_UNSUPPORTED_OP;
1843 }
1844
1845 mutex_lock(&gsi_ctx->mlock);
1846 init_completion(&ctx->compl);
1847
Skylar Chang22ecb822016-10-21 10:15:04 -07001848 gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
Amir Levycdccd632016-10-30 09:36:41 +02001849 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
1850 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
1851 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
1852 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
1853 gsi_writel(val, gsi_ctx->base +
1854 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
1855 res = wait_for_completion_timeout(&ctx->compl,
1856 msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
1857 if (res == 0) {
1858 GSIDBG("chan_hdl=%lu timed out\n", chan_hdl);
1859 res = -GSI_STATUS_TIMED_OUT;
1860 goto free_lock;
1861 }
1862
1863 if (ctx->state != GSI_CHAN_STATE_STOPPED &&
1864 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
1865 GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
1866 res = -GSI_STATUS_BAD_STATE;
1867 goto free_lock;
1868 }
1869
1870 if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
1871 GSIERR("chan=%lu busy try again\n", chan_hdl);
1872 res = -GSI_STATUS_AGAIN;
1873 goto free_lock;
1874 }
1875
1876 res = GSI_STATUS_SUCCESS;
1877
1878free_lock:
1879 mutex_unlock(&gsi_ctx->mlock);
1880 return res;
1881}
1882EXPORT_SYMBOL(gsi_stop_channel);
1883
1884int gsi_stop_db_channel(unsigned long chan_hdl)
1885{
1886 enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
1887 int res;
1888 uint32_t val;
1889 struct gsi_chan_ctx *ctx;
1890
1891 if (!gsi_ctx) {
1892 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1893 return -GSI_STATUS_NODEV;
1894 }
1895
Amir Levy41644242016-11-03 15:38:09 +02001896 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001897 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1898 return -GSI_STATUS_INVALID_PARAMS;
1899 }
1900
1901 ctx = &gsi_ctx->chan[chan_hdl];
1902
1903 if (ctx->state == GSI_CHAN_STATE_STOPPED) {
1904 GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
1905 return GSI_STATUS_SUCCESS;
1906 }
1907
1908 if (ctx->state != GSI_CHAN_STATE_STARTED &&
1909 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
1910 GSIERR("bad state %d\n", ctx->state);
1911 return -GSI_STATUS_UNSUPPORTED_OP;
1912 }
1913
1914 mutex_lock(&gsi_ctx->mlock);
1915 init_completion(&ctx->compl);
1916
Skylar Chang22ecb822016-10-21 10:15:04 -07001917 gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
Amir Levycdccd632016-10-30 09:36:41 +02001918 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
1919 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
1920 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
1921 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
1922 gsi_writel(val, gsi_ctx->base +
1923 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
1924 res = wait_for_completion_timeout(&ctx->compl,
1925 msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
1926 if (res == 0) {
1927 GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
1928 res = -GSI_STATUS_TIMED_OUT;
1929 goto free_lock;
1930 }
1931
1932 if (ctx->state != GSI_CHAN_STATE_STOPPED &&
1933 ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
1934 GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
1935 res = -GSI_STATUS_BAD_STATE;
1936 goto free_lock;
1937 }
1938
1939 if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
1940 GSIERR("chan=%lu busy try again\n", chan_hdl);
1941 res = -GSI_STATUS_AGAIN;
1942 goto free_lock;
1943 }
1944
1945 res = GSI_STATUS_SUCCESS;
1946
1947free_lock:
1948 mutex_unlock(&gsi_ctx->mlock);
1949 return res;
1950}
1951EXPORT_SYMBOL(gsi_stop_db_channel);
1952
1953int gsi_reset_channel(unsigned long chan_hdl)
1954{
1955 enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
1956 int res;
1957 uint32_t val;
1958 struct gsi_chan_ctx *ctx;
1959 bool reset_done = false;
1960
1961 if (!gsi_ctx) {
1962 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
1963 return -GSI_STATUS_NODEV;
1964 }
1965
Amir Levy41644242016-11-03 15:38:09 +02001966 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02001967 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
1968 return -GSI_STATUS_INVALID_PARAMS;
1969 }
1970
1971 ctx = &gsi_ctx->chan[chan_hdl];
1972
1973 if (ctx->state != GSI_CHAN_STATE_STOPPED) {
1974 GSIERR("bad state %d\n", ctx->state);
1975 return -GSI_STATUS_UNSUPPORTED_OP;
1976 }
1977
1978 mutex_lock(&gsi_ctx->mlock);
1979
1980reset:
1981 init_completion(&ctx->compl);
Skylar Chang22ecb822016-10-21 10:15:04 -07001982 gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
Amir Levycdccd632016-10-30 09:36:41 +02001983 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
1984 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
1985 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
1986 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
1987 gsi_writel(val, gsi_ctx->base +
1988 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
1989 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
1990 if (res == 0) {
1991 GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
1992 mutex_unlock(&gsi_ctx->mlock);
1993 return -GSI_STATUS_TIMED_OUT;
1994 }
1995
1996 if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
1997 GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
1998 ctx->state);
1999 BUG();
2000 }
2001
2002 /* workaround: reset GSI producers again */
2003 if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
Skylar Chang1feb2a32016-10-17 10:01:36 -07002004 usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
Amir Levycdccd632016-10-30 09:36:41 +02002005 reset_done = true;
2006 goto reset;
2007 }
2008
2009 gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
2010 ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
2011 gsi_init_chan_ring(&ctx->props, &ctx->ring);
2012
2013 /* restore scratch */
2014 __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
2015
2016 mutex_unlock(&gsi_ctx->mlock);
2017
2018 return GSI_STATUS_SUCCESS;
2019}
2020EXPORT_SYMBOL(gsi_reset_channel);
2021
2022int gsi_dealloc_channel(unsigned long chan_hdl)
2023{
2024 enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
2025 int res;
2026 uint32_t val;
2027 struct gsi_chan_ctx *ctx;
2028
2029 if (!gsi_ctx) {
2030 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2031 return -GSI_STATUS_NODEV;
2032 }
2033
Amir Levy41644242016-11-03 15:38:09 +02002034 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002035 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
2036 return -GSI_STATUS_INVALID_PARAMS;
2037 }
2038
2039 ctx = &gsi_ctx->chan[chan_hdl];
2040
2041 if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
2042 GSIERR("bad state %d\n", ctx->state);
2043 return -GSI_STATUS_UNSUPPORTED_OP;
2044 }
2045
2046 mutex_lock(&gsi_ctx->mlock);
2047 init_completion(&ctx->compl);
2048
Skylar Chang22ecb822016-10-21 10:15:04 -07002049 gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
Amir Levycdccd632016-10-30 09:36:41 +02002050 val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
2051 GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
2052 ((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
2053 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
2054 gsi_writel(val, gsi_ctx->base +
2055 GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
2056 res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
2057 if (res == 0) {
2058 GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
2059 mutex_unlock(&gsi_ctx->mlock);
2060 return -GSI_STATUS_TIMED_OUT;
2061 }
2062 if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
2063 GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
2064 ctx->state);
2065 BUG();
2066 }
2067
2068 mutex_unlock(&gsi_ctx->mlock);
2069
2070 devm_kfree(gsi_ctx->dev, ctx->user_data);
2071 ctx->allocated = false;
2072 if (ctx->evtr)
2073 atomic_dec(&ctx->evtr->chan_ref_cnt);
2074 atomic_dec(&gsi_ctx->num_chan);
2075
2076 return GSI_STATUS_SUCCESS;
2077}
2078EXPORT_SYMBOL(gsi_dealloc_channel);
2079
2080void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
2081{
2082 unsigned long now = jiffies_to_msecs(jiffies);
2083 unsigned long elapsed;
2084
2085 if (used == 0) {
2086 elapsed = now - ctx->stats.dp.last_timestamp;
2087 if (ctx->stats.dp.empty_time < elapsed)
2088 ctx->stats.dp.empty_time = elapsed;
2089 }
2090
2091 if (used <= ctx->props.max_re_expected / 3)
2092 ++ctx->stats.dp.ch_below_lo;
2093 else if (used <= 2 * ctx->props.max_re_expected / 3)
2094 ++ctx->stats.dp.ch_below_hi;
2095 else
2096 ++ctx->stats.dp.ch_above_hi;
2097 ctx->stats.dp.last_timestamp = now;
2098}
2099
2100static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
2101 uint16_t *num_free_re)
2102{
2103 uint16_t start;
2104 uint16_t start_hw;
2105 uint16_t end;
2106 uint64_t rp;
2107 uint64_t rp_hw;
2108 int ee = gsi_ctx->per.ee;
2109 uint16_t used;
2110 uint16_t used_hw;
2111
2112 rp_hw = gsi_readl(gsi_ctx->base +
2113 GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
2114 rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
2115 GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee)))
2116 << 32;
2117
2118 if (!ctx->evtr) {
2119 rp = rp_hw;
2120 ctx->ring.rp = rp;
2121 } else {
2122 rp = ctx->ring.rp_local;
2123 }
2124
2125 start = gsi_find_idx_from_addr(&ctx->ring, rp);
2126 start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw);
2127 end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
2128
2129 if (end >= start)
2130 used = end - start;
2131 else
2132 used = ctx->ring.max_num_elem + 1 - (start - end);
2133
2134 if (end >= start_hw)
2135 used_hw = end - start_hw;
2136 else
2137 used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end);
2138
2139 *num_free_re = ctx->ring.max_num_elem - used;
2140 gsi_update_ch_dp_stats(ctx, used_hw);
2141}
2142
2143int gsi_query_channel_info(unsigned long chan_hdl,
2144 struct gsi_chan_info *info)
2145{
2146 struct gsi_chan_ctx *ctx;
2147 spinlock_t *slock;
2148 unsigned long flags;
2149 uint64_t rp;
2150 uint64_t wp;
2151 int ee = gsi_ctx->per.ee;
2152
2153 if (!gsi_ctx) {
2154 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2155 return -GSI_STATUS_NODEV;
2156 }
2157
Amir Levy41644242016-11-03 15:38:09 +02002158 if (chan_hdl >= gsi_ctx->max_ch || !info) {
Amir Levycdccd632016-10-30 09:36:41 +02002159 GSIERR("bad params chan_hdl=%lu info=%p\n", chan_hdl, info);
2160 return -GSI_STATUS_INVALID_PARAMS;
2161 }
2162
2163 ctx = &gsi_ctx->chan[chan_hdl];
2164 if (ctx->evtr) {
2165 slock = &ctx->evtr->ring.slock;
2166 info->evt_valid = true;
2167 } else {
2168 slock = &ctx->ring.slock;
2169 info->evt_valid = false;
2170 }
2171
2172 spin_lock_irqsave(slock, flags);
2173
2174 rp = gsi_readl(gsi_ctx->base +
2175 GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
2176 rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2177 GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
2178 ctx->ring.rp = rp;
2179 info->rp = rp;
2180
2181 wp = gsi_readl(gsi_ctx->base +
2182 GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
2183 wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2184 GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
2185 ctx->ring.wp = wp;
2186 info->wp = wp;
2187
2188 if (info->evt_valid) {
2189 rp = gsi_readl(gsi_ctx->base +
2190 GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
2191 rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2192 GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee)))
2193 << 32;
2194 info->evt_rp = rp;
2195
2196 wp = gsi_readl(gsi_ctx->base +
2197 GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
2198 wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2199 GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(ctx->evtr->id, ee)))
2200 << 32;
2201 info->evt_wp = wp;
2202 }
2203
2204 spin_unlock_irqrestore(slock, flags);
2205
2206 GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
2207 chan_hdl, info->rp, info->wp,
2208 info->evt_valid, info->evt_rp, info->evt_wp);
2209
2210 return GSI_STATUS_SUCCESS;
2211}
2212EXPORT_SYMBOL(gsi_query_channel_info);
2213
2214int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
2215{
2216 struct gsi_chan_ctx *ctx;
2217 spinlock_t *slock;
2218 unsigned long flags;
2219 uint64_t rp;
2220 uint64_t wp;
2221 int ee = gsi_ctx->per.ee;
2222
2223 if (!gsi_ctx) {
2224 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2225 return -GSI_STATUS_NODEV;
2226 }
2227
Amir Levy41644242016-11-03 15:38:09 +02002228 if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
Amir Levycdccd632016-10-30 09:36:41 +02002229 GSIERR("bad params chan_hdl=%lu is_empty=%p\n",
2230 chan_hdl, is_empty);
2231 return -GSI_STATUS_INVALID_PARAMS;
2232 }
2233
2234 ctx = &gsi_ctx->chan[chan_hdl];
2235
2236 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2237 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2238 return -GSI_STATUS_UNSUPPORTED_OP;
2239 }
2240
2241 if (ctx->evtr)
2242 slock = &ctx->evtr->ring.slock;
2243 else
2244 slock = &ctx->ring.slock;
2245
2246 spin_lock_irqsave(slock, flags);
2247
2248 rp = gsi_readl(gsi_ctx->base +
2249 GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
2250 rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2251 GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
2252 ctx->ring.rp = rp;
2253
2254 wp = gsi_readl(gsi_ctx->base +
2255 GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
2256 wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2257 GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
2258 ctx->ring.wp = wp;
2259
2260 if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
2261 *is_empty = (ctx->ring.rp_local == rp) ? true : false;
2262 else
2263 *is_empty = (wp == rp) ? true : false;
2264
2265 spin_unlock_irqrestore(slock, flags);
2266
2267 GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
2268 chan_hdl, rp, wp, ctx->ring.rp_local);
2269
2270 return GSI_STATUS_SUCCESS;
2271}
2272EXPORT_SYMBOL(gsi_is_channel_empty);
2273
2274int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
2275 struct gsi_xfer_elem *xfer, bool ring_db)
2276{
2277 struct gsi_chan_ctx *ctx;
2278 uint16_t free;
2279 struct gsi_tre tre;
2280 struct gsi_tre *tre_ptr;
2281 uint16_t idx;
2282 uint64_t wp_rollback;
2283 int i;
2284 spinlock_t *slock;
2285 unsigned long flags;
2286
2287 if (!gsi_ctx) {
2288 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2289 return -GSI_STATUS_NODEV;
2290 }
2291
Amir Levy41644242016-11-03 15:38:09 +02002292 if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) {
Amir Levycdccd632016-10-30 09:36:41 +02002293 GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%p\n",
2294 chan_hdl, num_xfers, xfer);
2295 return -GSI_STATUS_INVALID_PARAMS;
2296 }
2297
2298 ctx = &gsi_ctx->chan[chan_hdl];
2299
2300 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2301 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2302 return -GSI_STATUS_UNSUPPORTED_OP;
2303 }
2304
2305 if (ctx->evtr)
2306 slock = &ctx->evtr->ring.slock;
2307 else
2308 slock = &ctx->ring.slock;
2309
2310 spin_lock_irqsave(slock, flags);
2311 __gsi_query_channel_free_re(ctx, &free);
2312
2313 if (num_xfers > free) {
2314 GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
2315 chan_hdl, num_xfers, free);
2316 spin_unlock_irqrestore(slock, flags);
2317 return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
2318 }
2319
2320 wp_rollback = ctx->ring.wp_local;
2321 for (i = 0; i < num_xfers; i++) {
2322 memset(&tre, 0, sizeof(tre));
2323 tre.buffer_ptr = xfer[i].addr;
2324 tre.buf_len = xfer[i].len;
2325 if (xfer[i].type == GSI_XFER_ELEM_DATA) {
2326 tre.re_type = GSI_RE_XFER;
2327 } else if (xfer[i].type == GSI_XFER_ELEM_IMME_CMD) {
2328 tre.re_type = GSI_RE_IMMD_CMD;
2329 } else {
2330 GSIERR("chan_hdl=%lu bad RE type=%u\n", chan_hdl,
2331 xfer[i].type);
2332 break;
2333 }
2334 tre.bei = (xfer[i].flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
2335 tre.ieot = (xfer[i].flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
2336 tre.ieob = (xfer[i].flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
2337 tre.chain = (xfer[i].flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
2338
2339 idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
2340 tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
2341 idx * ctx->ring.elem_sz);
2342
2343 /* write the TRE to ring */
2344 *tre_ptr = tre;
2345 ctx->user_data[idx] = xfer[i].xfer_user_data;
2346 gsi_incr_ring_wp(&ctx->ring);
2347 }
2348
2349 if (i != num_xfers) {
2350 /* reject all the xfers */
2351 ctx->ring.wp_local = wp_rollback;
2352 spin_unlock_irqrestore(slock, flags);
2353 return -GSI_STATUS_INVALID_PARAMS;
2354 }
2355
2356 ctx->stats.queued += num_xfers;
2357
2358 /* ensure TRE is set before ringing doorbell */
2359 wmb();
2360
2361 if (ring_db)
2362 gsi_ring_chan_doorbell(ctx);
2363
2364 spin_unlock_irqrestore(slock, flags);
2365
2366 return GSI_STATUS_SUCCESS;
2367}
2368EXPORT_SYMBOL(gsi_queue_xfer);
2369
2370int gsi_start_xfer(unsigned long chan_hdl)
2371{
2372 struct gsi_chan_ctx *ctx;
2373
2374 if (!gsi_ctx) {
2375 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2376 return -GSI_STATUS_NODEV;
2377 }
2378
Amir Levy41644242016-11-03 15:38:09 +02002379 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002380 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
2381 return -GSI_STATUS_INVALID_PARAMS;
2382 }
2383
2384 ctx = &gsi_ctx->chan[chan_hdl];
2385
2386 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2387 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2388 return -GSI_STATUS_UNSUPPORTED_OP;
2389 }
2390
2391 if (ctx->state != GSI_CHAN_STATE_STARTED) {
2392 GSIERR("bad state %d\n", ctx->state);
2393 return -GSI_STATUS_UNSUPPORTED_OP;
2394 }
2395
2396 gsi_ring_chan_doorbell(ctx);
2397
2398 return GSI_STATUS_SUCCESS;
2399};
2400EXPORT_SYMBOL(gsi_start_xfer);
2401
2402int gsi_poll_channel(unsigned long chan_hdl,
2403 struct gsi_chan_xfer_notify *notify)
2404{
2405 struct gsi_chan_ctx *ctx;
2406 uint64_t rp;
2407 int ee = gsi_ctx->per.ee;
2408 unsigned long flags;
2409
2410 if (!gsi_ctx) {
2411 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2412 return -GSI_STATUS_NODEV;
2413 }
2414
Amir Levy41644242016-11-03 15:38:09 +02002415 if (chan_hdl >= gsi_ctx->max_ch || !notify) {
Amir Levycdccd632016-10-30 09:36:41 +02002416 GSIERR("bad params chan_hdl=%lu notify=%p\n", chan_hdl, notify);
2417 return -GSI_STATUS_INVALID_PARAMS;
2418 }
2419
2420 ctx = &gsi_ctx->chan[chan_hdl];
2421
2422 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2423 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2424 return -GSI_STATUS_UNSUPPORTED_OP;
2425 }
2426
2427 if (!ctx->evtr) {
2428 GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
2429 return -GSI_STATUS_UNSUPPORTED_OP;
2430 }
2431
2432 spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
2433 rp = gsi_readl(gsi_ctx->base +
2434 GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
2435 rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
2436 GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee))) << 32;
2437 ctx->evtr->ring.rp = rp;
2438 if (rp == ctx->evtr->ring.rp_local) {
2439 spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
2440 ctx->stats.poll_empty++;
2441 return GSI_STATUS_POLL_EMPTY;
2442 }
2443
2444 gsi_process_evt_re(ctx->evtr, notify, false);
2445 gsi_ring_evt_doorbell(ctx->evtr);
2446 spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
2447 ctx->stats.poll_ok++;
2448
2449 return GSI_STATUS_SUCCESS;
2450}
2451EXPORT_SYMBOL(gsi_poll_channel);
2452
2453int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
2454{
2455 struct gsi_chan_ctx *ctx;
2456 enum gsi_chan_mode curr;
2457 unsigned long flags;
2458
2459 if (!gsi_ctx) {
2460 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2461 return -GSI_STATUS_NODEV;
2462 }
2463
Amir Levy41644242016-11-03 15:38:09 +02002464 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002465 GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
2466 return -GSI_STATUS_INVALID_PARAMS;
2467 }
2468
2469 ctx = &gsi_ctx->chan[chan_hdl];
2470
2471 if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
2472 GSIERR("op not supported for protocol %u\n", ctx->props.prot);
2473 return -GSI_STATUS_UNSUPPORTED_OP;
2474 }
2475
2476 if (!ctx->evtr || !ctx->evtr->props.exclusive) {
2477 GSIERR("cannot configure mode on chan_hdl=%lu\n",
2478 chan_hdl);
2479 return -GSI_STATUS_UNSUPPORTED_OP;
2480 }
2481
2482 if (atomic_read(&ctx->poll_mode))
2483 curr = GSI_CHAN_MODE_POLL;
2484 else
2485 curr = GSI_CHAN_MODE_CALLBACK;
2486
2487 if (mode == curr) {
2488 GSIERR("already in requested mode %u chan_hdl=%lu\n",
2489 curr, chan_hdl);
2490 return -GSI_STATUS_UNSUPPORTED_OP;
2491 }
2492
2493 spin_lock_irqsave(&gsi_ctx->slock, flags);
2494 if (curr == GSI_CHAN_MODE_CALLBACK &&
2495 mode == GSI_CHAN_MODE_POLL) {
2496 __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
2497 ctx->stats.callback_to_poll++;
2498 }
2499
2500 if (curr == GSI_CHAN_MODE_POLL &&
2501 mode == GSI_CHAN_MODE_CALLBACK) {
2502 __gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
2503 ctx->stats.poll_to_callback++;
2504 }
2505 atomic_set(&ctx->poll_mode, mode);
2506 spin_unlock_irqrestore(&gsi_ctx->slock, flags);
2507
2508 return GSI_STATUS_SUCCESS;
2509}
2510EXPORT_SYMBOL(gsi_config_channel_mode);
2511
2512int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
2513 union gsi_channel_scratch *scr)
2514{
2515 struct gsi_chan_ctx *ctx;
2516
2517 if (!gsi_ctx) {
2518 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2519 return -GSI_STATUS_NODEV;
2520 }
2521
2522 if (!props || !scr) {
2523 GSIERR("bad params props=%p scr=%p\n", props, scr);
2524 return -GSI_STATUS_INVALID_PARAMS;
2525 }
2526
Amir Levy41644242016-11-03 15:38:09 +02002527 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002528 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
2529 return -GSI_STATUS_INVALID_PARAMS;
2530 }
2531
2532 ctx = &gsi_ctx->chan[chan_hdl];
2533
2534 if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
2535 GSIERR("bad state %d\n", ctx->state);
2536 return -GSI_STATUS_UNSUPPORTED_OP;
2537 }
2538
2539 mutex_lock(&ctx->mlock);
2540 *props = ctx->props;
2541 *scr = ctx->scratch;
2542 mutex_unlock(&ctx->mlock);
2543
2544 return GSI_STATUS_SUCCESS;
2545}
2546EXPORT_SYMBOL(gsi_get_channel_cfg);
2547
2548int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
2549 union gsi_channel_scratch *scr)
2550{
2551 struct gsi_chan_ctx *ctx;
2552
2553 if (!gsi_ctx) {
2554 pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
2555 return -GSI_STATUS_NODEV;
2556 }
2557
2558 if (!props || gsi_validate_channel_props(props)) {
2559 GSIERR("bad params props=%p\n", props);
2560 return -GSI_STATUS_INVALID_PARAMS;
2561 }
2562
Amir Levy41644242016-11-03 15:38:09 +02002563 if (chan_hdl >= gsi_ctx->max_ch) {
Amir Levycdccd632016-10-30 09:36:41 +02002564 GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
2565 return -GSI_STATUS_INVALID_PARAMS;
2566 }
2567
2568 ctx = &gsi_ctx->chan[chan_hdl];
2569
2570 if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
2571 GSIERR("bad state %d\n", ctx->state);
2572 return -GSI_STATUS_UNSUPPORTED_OP;
2573 }
2574
2575 if (ctx->props.ch_id != props->ch_id ||
2576 ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
2577 GSIERR("changing immutable fields not supported\n");
2578 return -GSI_STATUS_UNSUPPORTED_OP;
2579 }
2580
2581 mutex_lock(&ctx->mlock);
2582 ctx->props = *props;
2583 if (scr)
2584 ctx->scratch = *scr;
2585 gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
2586 ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
2587 gsi_init_chan_ring(&ctx->props, &ctx->ring);
2588
2589 /* restore scratch */
2590 __gsi_write_channel_scratch(chan_hdl, ctx->scratch);
2591 mutex_unlock(&ctx->mlock);
2592
2593 return GSI_STATUS_SUCCESS;
2594}
2595EXPORT_SYMBOL(gsi_set_channel_cfg);
2596
2597static void gsi_configure_ieps(void *base)
2598{
2599 void __iomem *gsi_base = (void __iomem *)base;
2600
2601 gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
2602 gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
2603 gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
2604 gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
2605 gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
2606 gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
2607 gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
Amir Levy41644242016-11-03 15:38:09 +02002608 gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
2609 gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
2610 gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
Amir Levycdccd632016-10-30 09:36:41 +02002611 gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
2612 gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
2613 gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
2614}
2615
2616static void gsi_configure_bck_prs_matrix(void *base)
2617{
2618 void __iomem *gsi_base = (void __iomem *)base;
2619 /*
2620 * For now, these are default values. In the future, GSI FW image will
2621 * produce optimized back-pressure values based on the FW image.
2622 */
2623 gsi_writel(0xfffffffe,
2624 gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS);
2625 gsi_writel(0xffffffff,
2626 gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS);
2627 gsi_writel(0xffffffbf, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS);
2628 gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS);
2629 gsi_writel(0xffffefff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS);
2630 gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS);
2631 gsi_writel(0xffffefff,
2632 gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS);
2633 gsi_writel(0xffffffff,
2634 gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS);
2635 gsi_writel(0x00000000,
2636 gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
2637 gsi_writel(0x00000000,
2638 gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
Amir Levy41644242016-11-03 15:38:09 +02002639 gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
Amir Levycdccd632016-10-30 09:36:41 +02002640 gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
Amir Levy41644242016-11-03 15:38:09 +02002641 gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
Amir Levycdccd632016-10-30 09:36:41 +02002642 gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
2643 gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
2644 gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
2645 gsi_writel(0xffffffff, gsi_base + GSI_IC_READ_BCK_PRS_LSB_OFFS);
2646 gsi_writel(0xffffefff, gsi_base + GSI_IC_READ_BCK_PRS_MSB_OFFS);
2647 gsi_writel(0xffffffff, gsi_base + GSI_IC_WRITE_BCK_PRS_LSB_OFFS);
2648 gsi_writel(0xffffdfff, gsi_base + GSI_IC_WRITE_BCK_PRS_MSB_OFFS);
2649 gsi_writel(0xffffffff,
2650 gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS);
2651 gsi_writel(0xff03ffff,
2652 gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS);
2653}
2654
2655int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
2656 phys_addr_t per_base_addr)
2657{
2658 void __iomem *gsi_base;
2659
2660 gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
2661 if (!gsi_base) {
2662 GSIERR("ioremap failed for 0x%pa\n", &gsi_base_addr);
2663 return -GSI_STATUS_RES_ALLOC_FAILURE;
2664 }
2665 gsi_writel(0, gsi_base + GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS);
2666 gsi_writel(per_base_addr,
2667 gsi_base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS);
2668 gsi_configure_bck_prs_matrix((void *)gsi_base);
2669 gsi_configure_ieps((void *)gsi_base);
2670 iounmap(gsi_base);
2671
2672 return 0;
2673}
2674EXPORT_SYMBOL(gsi_configure_regs);
2675
2676int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
2677{
2678 void __iomem *gsi_base;
2679 uint32_t value;
2680
2681 gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
2682 if (!gsi_base) {
2683 GSIERR("ioremap failed for 0x%pa\n", &gsi_base_addr);
2684 return -GSI_STATUS_RES_ALLOC_FAILURE;
2685 }
2686
2687 /* Enable the MCS and set to x2 clocks */
Amir Levy41644242016-11-03 15:38:09 +02002688 if (gsi_ctx->per.ver >= GSI_VER_1_2) {
2689 value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
2690 GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
2691 gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
2692
2693 value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
2694 GSI_GSI_CFG_GSI_ENABLE_BMSK) |
2695 ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
2696 GSI_GSI_CFG_MCS_ENABLE_BMSK) |
2697 ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
2698 GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
2699 ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
2700 GSI_GSI_CFG_UC_IS_MCS_BMSK) |
2701 ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
2702 GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
2703 ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
2704 GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
2705 gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
2706 } else {
2707 value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
2708 GSI_GSI_CFG_GSI_ENABLE_BMSK) |
2709 ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
2710 GSI_GSI_CFG_MCS_ENABLE_BMSK) |
2711 ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
2712 GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
2713 ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
2714 GSI_GSI_CFG_UC_IS_MCS_BMSK));
2715 gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
2716 }
Amir Levycdccd632016-10-30 09:36:41 +02002717
2718 iounmap(gsi_base);
2719
2720 return 0;
2721
2722}
2723EXPORT_SYMBOL(gsi_enable_fw);
2724
2725static int msm_gsi_probe(struct platform_device *pdev)
2726{
2727 struct device *dev = &pdev->dev;
2728
2729 pr_debug("gsi_probe\n");
2730 gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
2731 if (!gsi_ctx) {
2732 dev_err(dev, "failed to allocated gsi context\n");
2733 return -ENOMEM;
2734 }
2735
2736 gsi_ctx->dev = dev;
2737 gsi_debugfs_init();
2738
2739 return 0;
2740}
2741
2742static struct platform_driver msm_gsi_driver = {
2743 .probe = msm_gsi_probe,
2744 .driver = {
2745 .owner = THIS_MODULE,
2746 .name = "gsi",
2747 .of_match_table = msm_gsi_match,
2748 },
2749};
2750
2751/**
2752 * Module Init.
2753 */
2754static int __init gsi_init(void)
2755{
2756 pr_debug("gsi_init\n");
2757 return platform_driver_register(&msm_gsi_driver);
2758}
2759
2760arch_initcall(gsi_init);
2761
2762MODULE_LICENSE("GPL v2");
2763MODULE_DESCRIPTION("Generic Software Interface (GSI)");