blob: 16d3b1a7d62cd908932fcaa3f4ad724e6b510437 [file] [log] [blame]
Ian Munsief204e0b2014-10-08 19:55:02 +11001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/sched.h>
14#include <linux/mutex.h>
15#include <linux/mm.h>
16#include <linux/uaccess.h>
17#include <asm/synch.h>
Michael Neulingec249dd2015-05-27 16:07:16 +100018#include <misc/cxl-base.h>
Ian Munsief204e0b2014-10-08 19:55:02 +110019
20#include "cxl.h"
Ian Munsie9bcf28c2015-01-09 20:34:36 +110021#include "trace.h"
Ian Munsief204e0b2014-10-08 19:55:02 +110022
23static int afu_control(struct cxl_afu *afu, u64 command,
24 u64 result, u64 mask, bool enabled)
25{
26 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
27 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
Ian Munsie9bcf28c2015-01-09 20:34:36 +110028 int rc = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +110029
30 spin_lock(&afu->afu_cntl_lock);
31 pr_devel("AFU command starting: %llx\n", command);
32
Ian Munsie9bcf28c2015-01-09 20:34:36 +110033 trace_cxl_afu_ctrl(afu, command);
34
Ian Munsief204e0b2014-10-08 19:55:02 +110035 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
36
37 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
38 while ((AFU_Cntl & mask) != result) {
39 if (time_after_eq(jiffies, timeout)) {
40 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
Ian Munsie9bcf28c2015-01-09 20:34:36 +110041 rc = -EBUSY;
42 goto out;
Ian Munsief204e0b2014-10-08 19:55:02 +110043 }
Daniel Axtens0b3f9c72015-08-14 17:41:18 +100044
Frederic Barrat5be587b2016-03-04 12:26:28 +010045 if (!cxl_ops->link_ok(afu->adapter)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +100046 afu->enabled = enabled;
47 rc = -EIO;
48 goto out;
49 }
50
Rasmus Villemoesde369532015-06-11 13:27:52 +020051 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
Ian Munsief204e0b2014-10-08 19:55:02 +110052 AFU_Cntl | command);
53 cpu_relax();
54 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
55 };
56 pr_devel("AFU command complete: %llx\n", command);
57 afu->enabled = enabled;
Ian Munsie9bcf28c2015-01-09 20:34:36 +110058out:
59 trace_cxl_afu_ctrl_done(afu, command, rc);
Ian Munsief204e0b2014-10-08 19:55:02 +110060 spin_unlock(&afu->afu_cntl_lock);
61
Ian Munsie9bcf28c2015-01-09 20:34:36 +110062 return rc;
Ian Munsief204e0b2014-10-08 19:55:02 +110063}
64
65static int afu_enable(struct cxl_afu *afu)
66{
67 pr_devel("AFU enable request\n");
68
69 return afu_control(afu, CXL_AFU_Cntl_An_E,
70 CXL_AFU_Cntl_An_ES_Enabled,
71 CXL_AFU_Cntl_An_ES_MASK, true);
72}
73
74int cxl_afu_disable(struct cxl_afu *afu)
75{
76 pr_devel("AFU disable request\n");
77
78 return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled,
79 CXL_AFU_Cntl_An_ES_MASK, false);
80}
81
82/* This will disable as well as reset */
Frederic Barrat5be587b2016-03-04 12:26:28 +010083static int __cxl_afu_reset(struct cxl_afu *afu)
Ian Munsief204e0b2014-10-08 19:55:02 +110084{
85 pr_devel("AFU reset request\n");
86
87 return afu_control(afu, CXL_AFU_Cntl_An_RA,
88 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
89 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
90 false);
91}
92
Frederic Barrat5be587b2016-03-04 12:26:28 +010093static int cxl_afu_check_and_enable(struct cxl_afu *afu)
Ian Munsief204e0b2014-10-08 19:55:02 +110094{
Frederic Barrat5be587b2016-03-04 12:26:28 +010095 if (!cxl_ops->link_ok(afu->adapter)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +100096 WARN(1, "Refusing to enable afu while link down!\n");
97 return -EIO;
98 }
Ian Munsief204e0b2014-10-08 19:55:02 +110099 if (afu->enabled)
100 return 0;
101 return afu_enable(afu);
102}
103
104int cxl_psl_purge(struct cxl_afu *afu)
105{
106 u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
107 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
108 u64 dsisr, dar;
109 u64 start, end;
110 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100111 int rc = 0;
112
113 trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
Ian Munsief204e0b2014-10-08 19:55:02 +1100114
115 pr_devel("PSL purge request\n");
116
Frederic Barrat5be587b2016-03-04 12:26:28 +0100117 if (!cxl_ops->link_ok(afu->adapter)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000118 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
119 rc = -EIO;
120 goto out;
121 }
122
Ian Munsief204e0b2014-10-08 19:55:02 +1100123 if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
124 WARN(1, "psl_purge request while AFU not disabled!\n");
125 cxl_afu_disable(afu);
126 }
127
128 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
129 PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
130 start = local_clock();
131 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
132 while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
133 == CXL_PSL_SCNTL_An_Ps_Pending) {
134 if (time_after_eq(jiffies, timeout)) {
135 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100136 rc = -EBUSY;
137 goto out;
Ian Munsief204e0b2014-10-08 19:55:02 +1100138 }
Frederic Barrat5be587b2016-03-04 12:26:28 +0100139 if (!cxl_ops->link_ok(afu->adapter)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000140 rc = -EIO;
141 goto out;
142 }
143
Ian Munsief204e0b2014-10-08 19:55:02 +1100144 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
Rasmus Villemoesde369532015-06-11 13:27:52 +0200145 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
Ian Munsief204e0b2014-10-08 19:55:02 +1100146 if (dsisr & CXL_PSL_DSISR_TRANS) {
147 dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
Rasmus Villemoesde369532015-06-11 13:27:52 +0200148 dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
Ian Munsief204e0b2014-10-08 19:55:02 +1100149 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
150 } else if (dsisr) {
Rasmus Villemoesde369532015-06-11 13:27:52 +0200151 dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
Ian Munsief204e0b2014-10-08 19:55:02 +1100152 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
153 } else {
154 cpu_relax();
155 }
156 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
157 };
158 end = local_clock();
159 pr_devel("PSL purged in %lld ns\n", end - start);
160
161 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
162 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100163out:
164 trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
165 return rc;
Ian Munsief204e0b2014-10-08 19:55:02 +1100166}
167
168static int spa_max_procs(int spa_size)
169{
170 /*
171 * From the CAIA:
172 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
173 * Most of that junk is really just an overly-complicated way of saying
174 * the last 256 bytes are __aligned(128), so it's really:
175 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
176 * and
177 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
178 * so
179 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
180 * Ignore the alignment (which is safe in this case as long as we are
181 * careful with our rounding) and solve for n:
182 */
183 return ((spa_size / 8) - 96) / 17;
184}
185
Daniel Axtens051557722015-08-14 17:41:19 +1000186int cxl_alloc_spa(struct cxl_afu *afu)
Ian Munsief204e0b2014-10-08 19:55:02 +1100187{
Ian Munsief204e0b2014-10-08 19:55:02 +1100188 /* Work out how many pages to allocate */
189 afu->spa_order = 0;
190 do {
191 afu->spa_order++;
192 afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE;
193 afu->spa_max_procs = spa_max_procs(afu->spa_size);
194 } while (afu->spa_max_procs < afu->num_procs);
195
196 WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */
197
198 if (!(afu->spa = (struct cxl_process_element *)
199 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) {
200 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
201 return -ENOMEM;
202 }
203 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
204 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
205
Daniel Axtens051557722015-08-14 17:41:19 +1000206 return 0;
207}
208
209static void attach_spa(struct cxl_afu *afu)
210{
211 u64 spap;
212
Ian Munsief204e0b2014-10-08 19:55:02 +1100213 afu->sw_command_status = (__be64 *)((char *)afu->spa +
214 ((afu->spa_max_procs + 3) * 128));
215
216 spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr;
217 spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
218 spap |= CXL_PSL_SPAP_V;
219 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
220 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
Ian Munsief204e0b2014-10-08 19:55:02 +1100221}
222
Daniel Axtens051557722015-08-14 17:41:19 +1000223static inline void detach_spa(struct cxl_afu *afu)
Ian Munsief204e0b2014-10-08 19:55:02 +1100224{
Ian Munsiedb7933f2014-12-08 19:18:00 +1100225 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
Daniel Axtens051557722015-08-14 17:41:19 +1000226}
227
228void cxl_release_spa(struct cxl_afu *afu)
229{
230 if (afu->spa) {
231 free_pages((unsigned long) afu->spa, afu->spa_order);
232 afu->spa = NULL;
233 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100234}
235
236int cxl_tlb_slb_invalidate(struct cxl *adapter)
237{
238 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
239
240 pr_devel("CXL adapter wide TLBIA & SLBIA\n");
241
242 cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
243
244 cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
245 while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
246 if (time_after_eq(jiffies, timeout)) {
247 dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
248 return -EBUSY;
249 }
Frederic Barrat5be587b2016-03-04 12:26:28 +0100250 if (!cxl_ops->link_ok(adapter))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000251 return -EIO;
Ian Munsief204e0b2014-10-08 19:55:02 +1100252 cpu_relax();
253 }
254
255 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
256 while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
257 if (time_after_eq(jiffies, timeout)) {
258 dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
259 return -EBUSY;
260 }
Frederic Barrat5be587b2016-03-04 12:26:28 +0100261 if (!cxl_ops->link_ok(adapter))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000262 return -EIO;
Ian Munsief204e0b2014-10-08 19:55:02 +1100263 cpu_relax();
264 }
265 return 0;
266}
267
Ian Munsief204e0b2014-10-08 19:55:02 +1100268static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
269{
270 int rc;
271
272 /* 1. Disable SSTP by writing 0 to SSTP1[V] */
273 cxl_p2n_write(afu, CXL_SSTP1_An, 0);
274
275 /* 2. Invalidate all SLB entries */
276 if ((rc = cxl_afu_slbia(afu)))
277 return rc;
278
279 /* 3. Set SSTP0_An */
280 cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
281
282 /* 4. Set SSTP1_An */
283 cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
284
285 return 0;
286}
287
288/* Using per slice version may improve performance here. (ie. SLBIA_An) */
289static void slb_invalid(struct cxl_context *ctx)
290{
291 struct cxl *adapter = ctx->afu->adapter;
292 u64 slbia;
293
294 WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex));
295
296 cxl_p1_write(adapter, CXL_PSL_LBISEL,
297 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
298 be32_to_cpu(ctx->elem->lpid));
299 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
300
301 while (1) {
Frederic Barrat5be587b2016-03-04 12:26:28 +0100302 if (!cxl_ops->link_ok(adapter))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000303 break;
Ian Munsief204e0b2014-10-08 19:55:02 +1100304 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
305 if (!(slbia & CXL_TLB_SLB_P))
306 break;
307 cpu_relax();
308 }
309}
310
311static int do_process_element_cmd(struct cxl_context *ctx,
312 u64 cmd, u64 pe_state)
313{
314 u64 state;
Ian Munsiea98e6e92014-12-08 19:17:56 +1100315 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100316 int rc = 0;
317
318 trace_cxl_llcmd(ctx, cmd);
Ian Munsief204e0b2014-10-08 19:55:02 +1100319
320 WARN_ON(!ctx->afu->enabled);
321
322 ctx->elem->software_state = cpu_to_be32(pe_state);
323 smp_wmb();
324 *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
325 smp_mb();
326 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
327 while (1) {
Ian Munsiea98e6e92014-12-08 19:17:56 +1100328 if (time_after_eq(jiffies, timeout)) {
329 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100330 rc = -EBUSY;
331 goto out;
Ian Munsiea98e6e92014-12-08 19:17:56 +1100332 }
Frederic Barrat5be587b2016-03-04 12:26:28 +0100333 if (!cxl_ops->link_ok(ctx->afu->adapter)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000334 dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
335 rc = -EIO;
336 goto out;
337 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100338 state = be64_to_cpup(ctx->afu->sw_command_status);
339 if (state == ~0ULL) {
340 pr_err("cxl: Error adding process element to AFU\n");
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100341 rc = -1;
342 goto out;
Ian Munsief204e0b2014-10-08 19:55:02 +1100343 }
344 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
345 (cmd | (cmd >> 16) | ctx->pe))
346 break;
347 /*
348 * The command won't finish in the PSL if there are
349 * outstanding DSIs. Hence we need to yield here in
350 * case there are outstanding DSIs that we need to
351 * service. Tuning possiblity: we could wait for a
352 * while before sched
353 */
354 schedule();
355
356 }
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100357out:
358 trace_cxl_llcmd_done(ctx, cmd, rc);
359 return rc;
Ian Munsief204e0b2014-10-08 19:55:02 +1100360}
361
362static int add_process_element(struct cxl_context *ctx)
363{
364 int rc = 0;
365
366 mutex_lock(&ctx->afu->spa_mutex);
367 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
368 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
369 ctx->pe_inserted = true;
370 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
371 mutex_unlock(&ctx->afu->spa_mutex);
372 return rc;
373}
374
375static int terminate_process_element(struct cxl_context *ctx)
376{
377 int rc = 0;
378
379 /* fast path terminate if it's already invalid */
380 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
381 return rc;
382
383 mutex_lock(&ctx->afu->spa_mutex);
384 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000385 /* We could be asked to terminate when the hw is down. That
386 * should always succeed: it's not running if the hw has gone
387 * away and is being reset.
388 */
Frederic Barrat5be587b2016-03-04 12:26:28 +0100389 if (cxl_ops->link_ok(ctx->afu->adapter))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000390 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
391 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
Ian Munsief204e0b2014-10-08 19:55:02 +1100392 ctx->elem->software_state = 0; /* Remove Valid bit */
393 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
394 mutex_unlock(&ctx->afu->spa_mutex);
395 return rc;
396}
397
398static int remove_process_element(struct cxl_context *ctx)
399{
400 int rc = 0;
401
402 mutex_lock(&ctx->afu->spa_mutex);
403 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000404
405 /* We could be asked to remove when the hw is down. Again, if
406 * the hw is down, the PE is gone, so we succeed.
407 */
Frederic Barrat5be587b2016-03-04 12:26:28 +0100408 if (cxl_ops->link_ok(ctx->afu->adapter))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000409 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
410
411 if (!rc)
Ian Munsief204e0b2014-10-08 19:55:02 +1100412 ctx->pe_inserted = false;
413 slb_invalid(ctx);
414 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
415 mutex_unlock(&ctx->afu->spa_mutex);
416
417 return rc;
418}
419
420
Michael Neuling1a1a94b2015-05-27 16:07:10 +1000421void cxl_assign_psn_space(struct cxl_context *ctx)
Ian Munsief204e0b2014-10-08 19:55:02 +1100422{
423 if (!ctx->afu->pp_size || ctx->master) {
424 ctx->psn_phys = ctx->afu->psn_phys;
425 ctx->psn_size = ctx->afu->adapter->ps_size;
426 } else {
427 ctx->psn_phys = ctx->afu->psn_phys +
428 (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe);
429 ctx->psn_size = ctx->afu->pp_size;
430 }
431}
432
433static int activate_afu_directed(struct cxl_afu *afu)
434{
435 int rc;
436
437 dev_info(&afu->dev, "Activating AFU directed mode\n");
438
Christophe Lombard4108efb2015-10-07 16:07:40 +1100439 afu->num_procs = afu->max_procs_virtualised;
Daniel Axtens051557722015-08-14 17:41:19 +1000440 if (afu->spa == NULL) {
441 if (cxl_alloc_spa(afu))
442 return -ENOMEM;
443 }
444 attach_spa(afu);
Ian Munsief204e0b2014-10-08 19:55:02 +1100445
446 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
447 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
448 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
449
450 afu->current_mode = CXL_MODE_DIRECTED;
Ian Munsief204e0b2014-10-08 19:55:02 +1100451
452 if ((rc = cxl_chardev_m_afu_add(afu)))
453 return rc;
454
455 if ((rc = cxl_sysfs_afu_m_add(afu)))
456 goto err;
457
458 if ((rc = cxl_chardev_s_afu_add(afu)))
459 goto err1;
460
461 return 0;
462err1:
463 cxl_sysfs_afu_m_remove(afu);
464err:
465 cxl_chardev_afu_remove(afu);
466 return rc;
467}
468
469#ifdef CONFIG_CPU_LITTLE_ENDIAN
470#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
471#else
472#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
473#endif
474
Michael Neuling2f663522015-05-27 16:07:13 +1000475static u64 calculate_sr(struct cxl_context *ctx)
476{
477 u64 sr = 0;
478
Frederic Barrate606e032015-12-07 14:34:40 +0100479 set_endian(sr);
Michael Neuling2f663522015-05-27 16:07:13 +1000480 if (ctx->master)
481 sr |= CXL_PSL_SR_An_MP;
482 if (mfspr(SPRN_LPCR) & LPCR_TC)
483 sr |= CXL_PSL_SR_An_TC;
484 if (ctx->kernel) {
485 sr |= CXL_PSL_SR_An_R | (mfmsr() & MSR_SF);
486 sr |= CXL_PSL_SR_An_HV;
487 } else {
488 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
Michael Neuling2f663522015-05-27 16:07:13 +1000489 sr &= ~(CXL_PSL_SR_An_HV);
490 if (!test_tsk_thread_flag(current, TIF_32BIT))
491 sr |= CXL_PSL_SR_An_SF;
492 }
493 return sr;
494}
495
Ian Munsief204e0b2014-10-08 19:55:02 +1100496static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
497{
Michael Neuling2f663522015-05-27 16:07:13 +1000498 u32 pid;
Ian Munsief204e0b2014-10-08 19:55:02 +1100499 int r, result;
500
Michael Neuling1a1a94b2015-05-27 16:07:10 +1000501 cxl_assign_psn_space(ctx);
Ian Munsief204e0b2014-10-08 19:55:02 +1100502
503 ctx->elem->ctxtime = 0; /* disable */
504 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
505 ctx->elem->haurp = 0; /* disable */
506 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
507
Michael Neuling2f663522015-05-27 16:07:13 +1000508 pid = current->pid;
509 if (ctx->kernel)
510 pid = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +1100511 ctx->elem->common.tid = 0;
Michael Neuling2f663522015-05-27 16:07:13 +1000512 ctx->elem->common.pid = cpu_to_be32(pid);
513
514 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
Ian Munsief204e0b2014-10-08 19:55:02 +1100515
516 ctx->elem->common.csrp = 0; /* disable */
517 ctx->elem->common.aurp0 = 0; /* disable */
518 ctx->elem->common.aurp1 = 0; /* disable */
519
520 cxl_prefault(ctx, wed);
521
522 ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
523 ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
524
525 for (r = 0; r < CXL_IRQ_RANGES; r++) {
526 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
527 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
528 }
529
530 ctx->elem->common.amr = cpu_to_be64(amr);
531 ctx->elem->common.wed = cpu_to_be64(wed);
532
533 /* first guy needs to enable */
Frederic Barrat5be587b2016-03-04 12:26:28 +0100534 if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
Ian Munsief204e0b2014-10-08 19:55:02 +1100535 return result;
536
Daniel Axtens368857c2015-07-29 14:07:22 +1000537 return add_process_element(ctx);
Ian Munsief204e0b2014-10-08 19:55:02 +1100538}
539
540static int deactivate_afu_directed(struct cxl_afu *afu)
541{
542 dev_info(&afu->dev, "Deactivating AFU directed mode\n");
543
544 afu->current_mode = 0;
545 afu->num_procs = 0;
546
547 cxl_sysfs_afu_m_remove(afu);
548 cxl_chardev_afu_remove(afu);
549
Frederic Barrat5be587b2016-03-04 12:26:28 +0100550 cxl_ops->afu_reset(afu);
Ian Munsief204e0b2014-10-08 19:55:02 +1100551 cxl_afu_disable(afu);
552 cxl_psl_purge(afu);
553
Ian Munsief204e0b2014-10-08 19:55:02 +1100554 return 0;
555}
556
557static int activate_dedicated_process(struct cxl_afu *afu)
558{
559 dev_info(&afu->dev, "Activating dedicated process mode\n");
560
561 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
562
563 cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
564 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
565 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
566 cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
567 cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
568 cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
569
570 cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
571 cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
572 cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
573
574 afu->current_mode = CXL_MODE_DEDICATED;
575 afu->num_procs = 1;
576
577 return cxl_chardev_d_afu_add(afu);
578}
579
580static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
581{
582 struct cxl_afu *afu = ctx->afu;
Michael Neuling2f663522015-05-27 16:07:13 +1000583 u64 pid;
Ian Munsief204e0b2014-10-08 19:55:02 +1100584 int rc;
585
Michael Neuling2f663522015-05-27 16:07:13 +1000586 pid = (u64)current->pid << 32;
587 if (ctx->kernel)
588 pid = 0;
589 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
590
591 cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
Ian Munsief204e0b2014-10-08 19:55:02 +1100592
593 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
594 return rc;
595
596 cxl_prefault(ctx, wed);
597
598 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
599 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
600 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
601 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
602 ((u64)ctx->irqs.offset[3] & 0xffff));
603 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
604 (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
605 (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
606 (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
607 ((u64)ctx->irqs.range[3] & 0xffff));
608
609 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
610
611 /* master only context for dedicated */
Michael Neuling1a1a94b2015-05-27 16:07:10 +1000612 cxl_assign_psn_space(ctx);
Ian Munsief204e0b2014-10-08 19:55:02 +1100613
Frederic Barrat5be587b2016-03-04 12:26:28 +0100614 if ((rc = cxl_ops->afu_reset(afu)))
Ian Munsief204e0b2014-10-08 19:55:02 +1100615 return rc;
616
617 cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
618
619 return afu_enable(afu);
620}
621
622static int deactivate_dedicated_process(struct cxl_afu *afu)
623{
624 dev_info(&afu->dev, "Deactivating dedicated process mode\n");
625
626 afu->current_mode = 0;
627 afu->num_procs = 0;
628
629 cxl_chardev_afu_remove(afu);
630
631 return 0;
632}
633
Frederic Barrat5be587b2016-03-04 12:26:28 +0100634static int cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode)
Ian Munsief204e0b2014-10-08 19:55:02 +1100635{
636 if (mode == CXL_MODE_DIRECTED)
637 return deactivate_afu_directed(afu);
638 if (mode == CXL_MODE_DEDICATED)
639 return deactivate_dedicated_process(afu);
640 return 0;
641}
642
Frederic Barrat5be587b2016-03-04 12:26:28 +0100643static int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
Ian Munsief204e0b2014-10-08 19:55:02 +1100644{
645 if (!mode)
646 return 0;
647 if (!(mode & afu->modes_supported))
648 return -EINVAL;
649
Frederic Barrat5be587b2016-03-04 12:26:28 +0100650 if (!cxl_ops->link_ok(afu->adapter)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000651 WARN(1, "Device link is down, refusing to activate!\n");
652 return -EIO;
653 }
654
Ian Munsief204e0b2014-10-08 19:55:02 +1100655 if (mode == CXL_MODE_DIRECTED)
656 return activate_afu_directed(afu);
657 if (mode == CXL_MODE_DEDICATED)
658 return activate_dedicated_process(afu);
659
660 return -EINVAL;
661}
662
Frederic Barrat5be587b2016-03-04 12:26:28 +0100663static int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
Ian Munsief204e0b2014-10-08 19:55:02 +1100664{
Frederic Barrat5be587b2016-03-04 12:26:28 +0100665 if (!cxl_ops->link_ok(ctx->afu->adapter)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000666 WARN(1, "Device link is down, refusing to attach process!\n");
667 return -EIO;
668 }
669
Ian Munsief204e0b2014-10-08 19:55:02 +1100670 ctx->kernel = kernel;
671 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
672 return attach_afu_directed(ctx, wed, amr);
673
674 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
675 return attach_dedicated(ctx, wed, amr);
676
677 return -EINVAL;
678}
679
680static inline int detach_process_native_dedicated(struct cxl_context *ctx)
681{
Frederic Barrat5be587b2016-03-04 12:26:28 +0100682 cxl_ops->afu_reset(ctx->afu);
Ian Munsief204e0b2014-10-08 19:55:02 +1100683 cxl_afu_disable(ctx->afu);
684 cxl_psl_purge(ctx->afu);
685 return 0;
686}
687
Ian Munsief204e0b2014-10-08 19:55:02 +1100688static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
689{
690 if (!ctx->pe_inserted)
691 return 0;
692 if (terminate_process_element(ctx))
693 return -1;
694 if (remove_process_element(ctx))
695 return -1;
696
697 return 0;
698}
699
Frederic Barrat5be587b2016-03-04 12:26:28 +0100700static int cxl_detach_process(struct cxl_context *ctx)
Ian Munsief204e0b2014-10-08 19:55:02 +1100701{
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100702 trace_cxl_detach(ctx);
703
Ian Munsief204e0b2014-10-08 19:55:02 +1100704 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
705 return detach_process_native_dedicated(ctx);
706
707 return detach_process_native_afu_directed(ctx);
708}
709
Frederic Barratd56d3012016-03-04 12:26:26 +0100710static int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
Ian Munsief204e0b2014-10-08 19:55:02 +1100711{
712 u64 pidtid;
713
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000714 /* If the adapter has gone away, we can't get any meaningful
715 * information.
716 */
Frederic Barrat5be587b2016-03-04 12:26:28 +0100717 if (!cxl_ops->link_ok(afu->adapter))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000718 return -EIO;
719
Ian Munsiebc78b052014-11-14 17:37:50 +1100720 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
721 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
722 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
723 pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
Ian Munsief204e0b2014-10-08 19:55:02 +1100724 info->pid = pidtid >> 32;
725 info->tid = pidtid & 0xffffffff;
Ian Munsiebc78b052014-11-14 17:37:50 +1100726 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
727 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
Ian Munsief204e0b2014-10-08 19:55:02 +1100728
729 return 0;
730}
731
Frederic Barrat5be587b2016-03-04 12:26:28 +0100732static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
Frederic Barratd56d3012016-03-04 12:26:26 +0100733{
734 u64 fir1, fir2, fir_slice, serr, afu_debug;
735
736 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
737 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
738 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
739 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
740 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
741
742 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
743 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
744 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
745 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
746 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
747 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
748
749 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
750 cxl_stop_trace(ctx->afu->adapter);
751
Frederic Barrat5be587b2016-03-04 12:26:28 +0100752 return cxl_ops->ack_irq(ctx, 0, errstat);
Frederic Barratd56d3012016-03-04 12:26:26 +0100753}
754
755static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
756{
757 if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
758 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
759 else
760 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
761
762 return IRQ_HANDLED;
763}
764
765static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
766{
767 struct cxl_afu *afu = data;
768 struct cxl_context *ctx;
769 struct cxl_irq_info irq_info;
770 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
771 int ret;
772
773 if ((ret = cxl_get_irq(afu, &irq_info))) {
774 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
775 return fail_psl_irq(afu, &irq_info);
776 }
777
778 rcu_read_lock();
779 ctx = idr_find(&afu->contexts_idr, ph);
780 if (ctx) {
781 ret = cxl_irq(irq, ctx, &irq_info);
782 rcu_read_unlock();
783 return ret;
784 }
785 rcu_read_unlock();
786
787 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
788 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
789 " with outstanding transactions?)\n", ph, irq_info.dsisr,
790 irq_info.dar);
791 return fail_psl_irq(afu, &irq_info);
792}
793
794static irqreturn_t cxl_slice_irq_err(int irq, void *data)
795{
796 struct cxl_afu *afu = data;
797 u64 fir_slice, errstat, serr, afu_debug;
798
799 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
800
801 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
802 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
803 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
804 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
805 dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
806 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
807 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
808 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
809
810 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
811
812 return IRQ_HANDLED;
813}
814
815static irqreturn_t cxl_irq_err(int irq, void *data)
816{
817 struct cxl *adapter = data;
818 u64 fir1, fir2, err_ivte;
819
820 WARN(1, "CXL ERROR interrupt %i\n", irq);
821
822 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
823 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
824
825 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
826 cxl_stop_trace(adapter);
827
828 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
829 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
830
831 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
832
833 return IRQ_HANDLED;
834}
835
836int cxl_register_psl_err_irq(struct cxl *adapter)
837{
838 int rc;
839
840 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
841 dev_name(&adapter->dev));
842 if (!adapter->irq_name)
843 return -ENOMEM;
844
845 if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
846 &adapter->err_hwirq,
847 &adapter->err_virq,
848 adapter->irq_name))) {
849 kfree(adapter->irq_name);
850 adapter->irq_name = NULL;
851 return rc;
852 }
853
854 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
855
856 return 0;
857}
858
859void cxl_release_psl_err_irq(struct cxl *adapter)
860{
861 if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq))
862 return;
863
864 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
865 cxl_unmap_irq(adapter->err_virq, adapter);
Frederic Barrat5be587b2016-03-04 12:26:28 +0100866 cxl_ops->release_one_irq(adapter, adapter->err_hwirq);
Frederic Barratd56d3012016-03-04 12:26:26 +0100867 kfree(adapter->irq_name);
868}
869
870int cxl_register_serr_irq(struct cxl_afu *afu)
871{
872 u64 serr;
873 int rc;
874
875 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
876 dev_name(&afu->dev));
877 if (!afu->err_irq_name)
878 return -ENOMEM;
879
880 if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
881 &afu->serr_hwirq,
882 &afu->serr_virq, afu->err_irq_name))) {
883 kfree(afu->err_irq_name);
884 afu->err_irq_name = NULL;
885 return rc;
886 }
887
888 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
889 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
890 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
891
892 return 0;
893}
894
895void cxl_release_serr_irq(struct cxl_afu *afu)
896{
897 if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
898 return;
899
900 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
901 cxl_unmap_irq(afu->serr_virq, afu);
Frederic Barrat5be587b2016-03-04 12:26:28 +0100902 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
Frederic Barratd56d3012016-03-04 12:26:26 +0100903 kfree(afu->err_irq_name);
904}
905
906int cxl_register_psl_irq(struct cxl_afu *afu)
907{
908 int rc;
909
910 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
911 dev_name(&afu->dev));
912 if (!afu->psl_irq_name)
913 return -ENOMEM;
914
915 if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
916 &afu->psl_hwirq, &afu->psl_virq,
917 afu->psl_irq_name))) {
918 kfree(afu->psl_irq_name);
919 afu->psl_irq_name = NULL;
920 }
921 return rc;
922}
923
924void cxl_release_psl_irq(struct cxl_afu *afu)
925{
926 if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq))
927 return;
928
929 cxl_unmap_irq(afu->psl_virq, afu);
Frederic Barrat5be587b2016-03-04 12:26:28 +0100930 cxl_ops->release_one_irq(afu->adapter, afu->psl_hwirq);
Frederic Barratd56d3012016-03-04 12:26:26 +0100931 kfree(afu->psl_irq_name);
932}
933
Ian Munsief204e0b2014-10-08 19:55:02 +1100934static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
935{
936 u64 dsisr;
937
Rasmus Villemoesde369532015-06-11 13:27:52 +0200938 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
Ian Munsief204e0b2014-10-08 19:55:02 +1100939
940 /* Clear PSL_DSISR[PE] */
941 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
942 cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
943
944 /* Write 1s to clear error status bits */
945 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
946}
947
Frederic Barrat5be587b2016-03-04 12:26:28 +0100948static int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
Ian Munsief204e0b2014-10-08 19:55:02 +1100949{
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100950 trace_cxl_psl_irq_ack(ctx, tfc);
Ian Munsief204e0b2014-10-08 19:55:02 +1100951 if (tfc)
952 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
953 if (psl_reset_mask)
954 recover_psl_err(ctx->afu, psl_reset_mask);
955
956 return 0;
957}
958
959int cxl_check_error(struct cxl_afu *afu)
960{
961 return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
962}
Frederic Barratd56d3012016-03-04 12:26:26 +0100963
Frederic Barrat5be587b2016-03-04 12:26:28 +0100964static int cxl_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
Frederic Barratd56d3012016-03-04 12:26:26 +0100965{
Frederic Barrat5be587b2016-03-04 12:26:28 +0100966 if (unlikely(!cxl_ops->link_ok(afu->adapter)))
967 return -EIO;
968 if (unlikely(off >= afu->crs_len))
969 return -ERANGE;
970 *out = in_le64(afu->afu_desc_mmio + afu->crs_offset +
971 (cr * afu->crs_len) + off);
972 return 0;
Frederic Barratd56d3012016-03-04 12:26:26 +0100973}
974
Frederic Barrat5be587b2016-03-04 12:26:28 +0100975static int cxl_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
Frederic Barratd56d3012016-03-04 12:26:26 +0100976{
Frederic Barrat5be587b2016-03-04 12:26:28 +0100977 if (unlikely(!cxl_ops->link_ok(afu->adapter)))
978 return -EIO;
979 if (unlikely(off >= afu->crs_len))
980 return -ERANGE;
981 *out = in_le32(afu->afu_desc_mmio + afu->crs_offset +
982 (cr * afu->crs_len) + off);
983 return 0;
Frederic Barratd56d3012016-03-04 12:26:26 +0100984}
985
Frederic Barrat5be587b2016-03-04 12:26:28 +0100986static int cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
Frederic Barratd56d3012016-03-04 12:26:26 +0100987{
988 u64 aligned_off = off & ~0x3L;
989 u32 val;
Frederic Barrat5be587b2016-03-04 12:26:28 +0100990 int rc;
Frederic Barratd56d3012016-03-04 12:26:26 +0100991
Frederic Barrat5be587b2016-03-04 12:26:28 +0100992 rc = cxl_afu_cr_read32(afu, cr, aligned_off, &val);
993 if (!rc)
994 *out = (val >> ((off & 0x3) * 8)) & 0xffff;
995 return rc;
Frederic Barratd56d3012016-03-04 12:26:26 +0100996}
997
Frederic Barrat5be587b2016-03-04 12:26:28 +0100998static int cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
Frederic Barratd56d3012016-03-04 12:26:26 +0100999{
1000 u64 aligned_off = off & ~0x3L;
1001 u32 val;
Frederic Barrat5be587b2016-03-04 12:26:28 +01001002 int rc;
Frederic Barratd56d3012016-03-04 12:26:26 +01001003
Frederic Barrat5be587b2016-03-04 12:26:28 +01001004 rc = cxl_afu_cr_read32(afu, cr, aligned_off, &val);
1005 if (!rc)
1006 *out = (val >> ((off & 0x3) * 8)) & 0xff;
1007 return rc;
Frederic Barratd56d3012016-03-04 12:26:26 +01001008}
Frederic Barrat5be587b2016-03-04 12:26:28 +01001009
1010const struct cxl_backend_ops cxl_native_ops = {
1011 .module = THIS_MODULE,
1012 .adapter_reset = cxl_reset,
1013 .alloc_one_irq = cxl_alloc_one_irq,
1014 .release_one_irq = cxl_release_one_irq,
1015 .alloc_irq_ranges = cxl_alloc_irq_ranges,
1016 .release_irq_ranges = cxl_release_irq_ranges,
1017 .setup_irq = cxl_setup_irq,
1018 .handle_psl_slice_error = handle_psl_slice_error,
1019 .psl_interrupt = NULL,
1020 .ack_irq = cxl_ack_irq,
1021 .attach_process = cxl_attach_process,
1022 .detach_process = cxl_detach_process,
1023 .link_ok = cxl_adapter_link_ok,
1024 .release_afu = cxl_release_afu,
1025 .afu_read_err_buffer = cxl_afu_read_err_buffer,
1026 .afu_check_and_enable = cxl_afu_check_and_enable,
1027 .afu_activate_mode = cxl_afu_activate_mode,
1028 .afu_deactivate_mode = cxl_afu_deactivate_mode,
1029 .afu_reset = __cxl_afu_reset,
1030 .afu_cr_read8 = cxl_afu_cr_read8,
1031 .afu_cr_read16 = cxl_afu_cr_read16,
1032 .afu_cr_read32 = cxl_afu_cr_read32,
1033 .afu_cr_read64 = cxl_afu_cr_read64,
1034};