blob: 09a406058c4650ddf71114c26201889620003b9e [file] [log] [blame]
Ian Munsief204e0b2014-10-08 19:55:02 +11001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/interrupt.h>
11#include <linux/workqueue.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/slab.h>
15#include <linux/pid.h>
16#include <asm/cputable.h>
Michael Neulingec249dd2015-05-27 16:07:16 +100017#include <misc/cxl-base.h>
Ian Munsief204e0b2014-10-08 19:55:02 +110018
19#include "cxl.h"
Ian Munsie9bcf28c2015-01-09 20:34:36 +110020#include "trace.h"
Ian Munsief204e0b2014-10-08 19:55:02 +110021
22/* XXX: This is implementation specific */
23static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
24{
25 u64 fir1, fir2, fir_slice, serr, afu_debug;
26
27 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
28 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
29 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
30 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
31 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
32
Rasmus Villemoesde369532015-06-11 13:27:52 +020033 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
34 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
35 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
36 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
37 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
38 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
Ian Munsief204e0b2014-10-08 19:55:02 +110039
40 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
41 cxl_stop_trace(ctx->afu->adapter);
42
43 return cxl_ack_irq(ctx, 0, errstat);
44}
45
46irqreturn_t cxl_slice_irq_err(int irq, void *data)
47{
48 struct cxl_afu *afu = data;
49 u64 fir_slice, errstat, serr, afu_debug;
50
51 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
52
53 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
54 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
55 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
56 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
Rasmus Villemoesde369532015-06-11 13:27:52 +020057 dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
58 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
59 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
60 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
Ian Munsief204e0b2014-10-08 19:55:02 +110061
62 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
63
64 return IRQ_HANDLED;
65}
66
67static irqreturn_t cxl_irq_err(int irq, void *data)
68{
69 struct cxl *adapter = data;
70 u64 fir1, fir2, err_ivte;
71
72 WARN(1, "CXL ERROR interrupt %i\n", irq);
73
74 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
Rasmus Villemoesde369532015-06-11 13:27:52 +020075 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
Ian Munsief204e0b2014-10-08 19:55:02 +110076
77 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
78 cxl_stop_trace(adapter);
79
80 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
81 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
82
Rasmus Villemoesde369532015-06-11 13:27:52 +020083 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
Ian Munsief204e0b2014-10-08 19:55:02 +110084
85 return IRQ_HANDLED;
86}
87
88static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
89{
90 ctx->dsisr = dsisr;
91 ctx->dar = dar;
92 schedule_work(&ctx->fault_work);
93 return IRQ_HANDLED;
94}
95
Ian Munsiebc78b052014-11-14 17:37:50 +110096static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
Ian Munsief204e0b2014-10-08 19:55:02 +110097{
98 struct cxl_context *ctx = data;
Ian Munsief204e0b2014-10-08 19:55:02 +110099 u64 dsisr, dar;
Ian Munsief204e0b2014-10-08 19:55:02 +1100100
Ian Munsiebc78b052014-11-14 17:37:50 +1100101 dsisr = irq_info->dsisr;
102 dar = irq_info->dar;
Ian Munsief204e0b2014-10-08 19:55:02 +1100103
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100104 trace_cxl_psl_irq(ctx, irq, dsisr, dar);
105
Ian Munsief204e0b2014-10-08 19:55:02 +1100106 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
107
108 if (dsisr & CXL_PSL_DSISR_An_DS) {
109 /*
110 * We don't inherently need to sleep to handle this, but we do
111 * need to get a ref to the task's mm, which we can't do from
112 * irq context without the potential for a deadlock since it
113 * takes the task_lock. An alternate option would be to keep a
114 * reference to the task's mm the entire time it has cxl open,
115 * but to do that we need to solve the issue where we hold a
116 * ref to the mm, but the mm can hold a ref to the fd after an
117 * mmap preventing anything from being cleaned up.
118 */
119 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
120 return schedule_cxl_fault(ctx, dsisr, dar);
121 }
122
123 if (dsisr & CXL_PSL_DSISR_An_M)
124 pr_devel("CXL interrupt: PTE not found\n");
125 if (dsisr & CXL_PSL_DSISR_An_P)
126 pr_devel("CXL interrupt: Storage protection violation\n");
127 if (dsisr & CXL_PSL_DSISR_An_A)
128 pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
129 if (dsisr & CXL_PSL_DSISR_An_S)
130 pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
131 if (dsisr & CXL_PSL_DSISR_An_K)
132 pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
133
134 if (dsisr & CXL_PSL_DSISR_An_DM) {
135 /*
136 * In some cases we might be able to handle the fault
137 * immediately if hash_page would succeed, but we still need
138 * the task's mm, which as above we can't get without a lock
139 */
140 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
141 return schedule_cxl_fault(ctx, dsisr, dar);
142 }
143 if (dsisr & CXL_PSL_DSISR_An_ST)
144 WARN(1, "CXL interrupt: Segment Table PTE not found\n");
145 if (dsisr & CXL_PSL_DSISR_An_UR)
146 pr_devel("CXL interrupt: AURP PTE not found\n");
147 if (dsisr & CXL_PSL_DSISR_An_PE)
Ian Munsiebc78b052014-11-14 17:37:50 +1100148 return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
Ian Munsief204e0b2014-10-08 19:55:02 +1100149 if (dsisr & CXL_PSL_DSISR_An_AE) {
Rasmus Villemoesde369532015-06-11 13:27:52 +0200150 pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
Ian Munsief204e0b2014-10-08 19:55:02 +1100151
152 if (ctx->pending_afu_err) {
153 /*
154 * This shouldn't happen - the PSL treats these errors
155 * as fatal and will have reset the AFU, so there's not
156 * much point buffering multiple AFU errors.
157 * OTOH if we DO ever see a storm of these come in it's
158 * probably best that we log them somewhere:
159 */
160 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
Rasmus Villemoesde369532015-06-11 13:27:52 +0200161 "undelivered to pe %i: 0x%016llx\n",
Ian Munsiebc78b052014-11-14 17:37:50 +1100162 ctx->pe, irq_info->afu_err);
Ian Munsief204e0b2014-10-08 19:55:02 +1100163 } else {
164 spin_lock(&ctx->lock);
Ian Munsiebc78b052014-11-14 17:37:50 +1100165 ctx->afu_err = irq_info->afu_err;
Ian Munsief204e0b2014-10-08 19:55:02 +1100166 ctx->pending_afu_err = 1;
167 spin_unlock(&ctx->lock);
168
169 wake_up_all(&ctx->wq);
170 }
171
172 cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
Ian Munsiea6130ed2015-02-04 19:10:38 +1100173 return IRQ_HANDLED;
Ian Munsief204e0b2014-10-08 19:55:02 +1100174 }
175 if (dsisr & CXL_PSL_DSISR_An_OC)
176 pr_devel("CXL interrupt: OS Context Warning\n");
177
178 WARN(1, "Unhandled CXL PSL IRQ\n");
179 return IRQ_HANDLED;
180}
181
Ian Munsiebc78b052014-11-14 17:37:50 +1100182static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
183{
184 if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
185 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
186 else
187 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
188
189 return IRQ_HANDLED;
190}
191
Ian Munsief204e0b2014-10-08 19:55:02 +1100192static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
193{
194 struct cxl_afu *afu = data;
195 struct cxl_context *ctx;
Ian Munsiebc78b052014-11-14 17:37:50 +1100196 struct cxl_irq_info irq_info;
Ian Munsief204e0b2014-10-08 19:55:02 +1100197 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
198 int ret;
199
Ian Munsiebc78b052014-11-14 17:37:50 +1100200 if ((ret = cxl_get_irq(afu, &irq_info))) {
201 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
202 return fail_psl_irq(afu, &irq_info);
203 }
204
Ian Munsief204e0b2014-10-08 19:55:02 +1100205 rcu_read_lock();
206 ctx = idr_find(&afu->contexts_idr, ph);
207 if (ctx) {
Ian Munsiebc78b052014-11-14 17:37:50 +1100208 ret = cxl_irq(irq, ctx, &irq_info);
Ian Munsief204e0b2014-10-08 19:55:02 +1100209 rcu_read_unlock();
210 return ret;
211 }
212 rcu_read_unlock();
213
Rasmus Villemoesde369532015-06-11 13:27:52 +0200214 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
215 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
Ian Munsiebc78b052014-11-14 17:37:50 +1100216 " with outstanding transactions?)\n", ph, irq_info.dsisr,
217 irq_info.dar);
218 return fail_psl_irq(afu, &irq_info);
Ian Munsief204e0b2014-10-08 19:55:02 +1100219}
220
221static irqreturn_t cxl_irq_afu(int irq, void *data)
222{
223 struct cxl_context *ctx = data;
224 irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
225 int irq_off, afu_irq = 1;
226 __u16 range;
227 int r;
228
229 for (r = 1; r < CXL_IRQ_RANGES; r++) {
230 irq_off = hwirq - ctx->irqs.offset[r];
231 range = ctx->irqs.range[r];
232 if (irq_off >= 0 && irq_off < range) {
233 afu_irq += irq_off;
234 break;
235 }
236 afu_irq += range;
237 }
238 if (unlikely(r >= CXL_IRQ_RANGES)) {
239 WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
240 ctx->pe, irq, hwirq);
241 return IRQ_HANDLED;
242 }
243
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100244 trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
Ian Munsief204e0b2014-10-08 19:55:02 +1100245 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
246 afu_irq, ctx->pe, irq, hwirq);
247
248 if (unlikely(!ctx->irq_bitmap)) {
249 WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
250 return IRQ_HANDLED;
251 }
252 spin_lock(&ctx->lock);
253 set_bit(afu_irq - 1, ctx->irq_bitmap);
254 ctx->pending_irq = true;
255 spin_unlock(&ctx->lock);
256
257 wake_up_all(&ctx->wq);
258
259 return IRQ_HANDLED;
260}
261
262unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
Michael Neuling80fa93f2014-11-14 18:09:28 +1100263 irq_handler_t handler, void *cookie, const char *name)
Ian Munsief204e0b2014-10-08 19:55:02 +1100264{
265 unsigned int virq;
266 int result;
267
268 /* IRQ Domain? */
269 virq = irq_create_mapping(NULL, hwirq);
270 if (!virq) {
271 dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
272 return 0;
273 }
274
275 cxl_setup_irq(adapter, hwirq, virq);
276
277 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
278
Michael Neuling80fa93f2014-11-14 18:09:28 +1100279 result = request_irq(virq, handler, 0, name, cookie);
Ian Munsief204e0b2014-10-08 19:55:02 +1100280 if (result) {
281 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
282 return 0;
283 }
284
285 return virq;
286}
287
288void cxl_unmap_irq(unsigned int virq, void *cookie)
289{
290 free_irq(virq, cookie);
291 irq_dispose_mapping(virq);
292}
293
294static int cxl_register_one_irq(struct cxl *adapter,
295 irq_handler_t handler,
296 void *cookie,
297 irq_hw_number_t *dest_hwirq,
Michael Neuling80fa93f2014-11-14 18:09:28 +1100298 unsigned int *dest_virq,
299 const char *name)
Ian Munsief204e0b2014-10-08 19:55:02 +1100300{
301 int hwirq, virq;
302
303 if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
304 return hwirq;
305
Michael Neuling80fa93f2014-11-14 18:09:28 +1100306 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
Ian Munsief204e0b2014-10-08 19:55:02 +1100307 goto err;
308
309 *dest_hwirq = hwirq;
310 *dest_virq = virq;
311
312 return 0;
313
314err:
315 cxl_release_one_irq(adapter, hwirq);
316 return -ENOMEM;
317}
318
319int cxl_register_psl_err_irq(struct cxl *adapter)
320{
321 int rc;
322
Michael Neuling80fa93f2014-11-14 18:09:28 +1100323 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
324 dev_name(&adapter->dev));
325 if (!adapter->irq_name)
326 return -ENOMEM;
327
Ian Munsief204e0b2014-10-08 19:55:02 +1100328 if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
329 &adapter->err_hwirq,
Michael Neuling80fa93f2014-11-14 18:09:28 +1100330 &adapter->err_virq,
331 adapter->irq_name))) {
332 kfree(adapter->irq_name);
333 adapter->irq_name = NULL;
Ian Munsief204e0b2014-10-08 19:55:02 +1100334 return rc;
Michael Neuling80fa93f2014-11-14 18:09:28 +1100335 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100336
337 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
338
339 return 0;
340}
341
342void cxl_release_psl_err_irq(struct cxl *adapter)
343{
Daniel Axtense640d2f2015-08-14 17:41:20 +1000344 if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq))
345 return;
346
Ian Munsief204e0b2014-10-08 19:55:02 +1100347 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
348 cxl_unmap_irq(adapter->err_virq, adapter);
349 cxl_release_one_irq(adapter, adapter->err_hwirq);
Michael Neuling80fa93f2014-11-14 18:09:28 +1100350 kfree(adapter->irq_name);
Ian Munsief204e0b2014-10-08 19:55:02 +1100351}
352
353int cxl_register_serr_irq(struct cxl_afu *afu)
354{
355 u64 serr;
356 int rc;
357
Michael Neuling80fa93f2014-11-14 18:09:28 +1100358 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
359 dev_name(&afu->dev));
360 if (!afu->err_irq_name)
361 return -ENOMEM;
362
Ian Munsief204e0b2014-10-08 19:55:02 +1100363 if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
364 &afu->serr_hwirq,
Michael Neuling80fa93f2014-11-14 18:09:28 +1100365 &afu->serr_virq, afu->err_irq_name))) {
366 kfree(afu->err_irq_name);
367 afu->err_irq_name = NULL;
Ian Munsief204e0b2014-10-08 19:55:02 +1100368 return rc;
Michael Neuling80fa93f2014-11-14 18:09:28 +1100369 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100370
371 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
372 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
373 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
374
375 return 0;
376}
377
378void cxl_release_serr_irq(struct cxl_afu *afu)
379{
Daniel Axtense640d2f2015-08-14 17:41:20 +1000380 if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
381 return;
382
Ian Munsief204e0b2014-10-08 19:55:02 +1100383 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
384 cxl_unmap_irq(afu->serr_virq, afu);
385 cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
Michael Neuling80fa93f2014-11-14 18:09:28 +1100386 kfree(afu->err_irq_name);
Ian Munsief204e0b2014-10-08 19:55:02 +1100387}
388
389int cxl_register_psl_irq(struct cxl_afu *afu)
390{
Michael Neuling80fa93f2014-11-14 18:09:28 +1100391 int rc;
392
393 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
394 dev_name(&afu->dev));
395 if (!afu->psl_irq_name)
396 return -ENOMEM;
397
398 if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
399 &afu->psl_hwirq, &afu->psl_virq,
400 afu->psl_irq_name))) {
401 kfree(afu->psl_irq_name);
402 afu->psl_irq_name = NULL;
403 }
404 return rc;
Ian Munsief204e0b2014-10-08 19:55:02 +1100405}
406
407void cxl_release_psl_irq(struct cxl_afu *afu)
408{
Daniel Axtense640d2f2015-08-14 17:41:20 +1000409 if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq))
410 return;
411
Ian Munsief204e0b2014-10-08 19:55:02 +1100412 cxl_unmap_irq(afu->psl_virq, afu);
413 cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
Michael Neuling80fa93f2014-11-14 18:09:28 +1100414 kfree(afu->psl_irq_name);
415}
416
Andrew Donnellan8dde1522015-09-30 11:58:05 +1000417void afu_irq_name_free(struct cxl_context *ctx)
Michael Neuling80fa93f2014-11-14 18:09:28 +1100418{
419 struct cxl_irq_name *irq_name, *tmp;
420
421 list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
422 kfree(irq_name->name);
423 list_del(&irq_name->list);
424 kfree(irq_name);
425 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100426}
427
Michael Neulingc358d84b2015-05-27 16:07:12 +1000428int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
Ian Munsief204e0b2014-10-08 19:55:02 +1100429{
Michael Neuling80fa93f2014-11-14 18:09:28 +1100430 int rc, r, i, j = 1;
431 struct cxl_irq_name *irq_name;
Ian Munsief204e0b2014-10-08 19:55:02 +1100432
Vaibhav Jaina6897f32015-08-25 11:04:48 +0530433 /* Initialize the list head to hold irq names */
434 INIT_LIST_HEAD(&ctx->irq_names);
435
Ian Munsief204e0b2014-10-08 19:55:02 +1100436 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
437 return rc;
438
439 /* Multiplexed PSL Interrupt */
440 ctx->irqs.offset[0] = ctx->afu->psl_hwirq;
441 ctx->irqs.range[0] = 1;
442
443 ctx->irq_count = count;
444 ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
445 sizeof(*ctx->irq_bitmap), GFP_KERNEL);
446 if (!ctx->irq_bitmap)
Vaibhav Jaina6897f32015-08-25 11:04:48 +0530447 goto out;
Michael Neuling80fa93f2014-11-14 18:09:28 +1100448
449 /*
450 * Allocate names first. If any fail, bail out before allocating
451 * actual hardware IRQs.
452 */
Michael Neuling80fa93f2014-11-14 18:09:28 +1100453 for (r = 1; r < CXL_IRQ_RANGES; r++) {
Colin Ian Kingd3383aa2015-01-08 22:36:47 +0000454 for (i = 0; i < ctx->irqs.range[r]; i++) {
Michael Neuling80fa93f2014-11-14 18:09:28 +1100455 irq_name = kmalloc(sizeof(struct cxl_irq_name),
456 GFP_KERNEL);
457 if (!irq_name)
458 goto out;
459 irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
460 dev_name(&ctx->afu->dev),
461 ctx->pe, j);
462 if (!irq_name->name) {
463 kfree(irq_name);
464 goto out;
465 }
466 /* Add to tail so next look get the correct order */
467 list_add_tail(&irq_name->list, &ctx->irq_names);
468 j++;
469 }
470 }
Michael Neulingc358d84b2015-05-27 16:07:12 +1000471 return 0;
472
473out:
Vaibhav Jaina6897f32015-08-25 11:04:48 +0530474 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
Michael Neulingc358d84b2015-05-27 16:07:12 +1000475 afu_irq_name_free(ctx);
476 return -ENOMEM;
477}
478
Daniel Axtens3d6b0402015-08-07 13:18:18 +1000479static void afu_register_hwirqs(struct cxl_context *ctx)
Michael Neulingc358d84b2015-05-27 16:07:12 +1000480{
481 irq_hw_number_t hwirq;
482 struct cxl_irq_name *irq_name;
483 int r,i;
Michael Neuling80fa93f2014-11-14 18:09:28 +1100484
485 /* We've allocated all memory now, so let's do the irq allocations */
486 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
Ian Munsief204e0b2014-10-08 19:55:02 +1100487 for (r = 1; r < CXL_IRQ_RANGES; r++) {
488 hwirq = ctx->irqs.offset[r];
489 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
490 cxl_map_irq(ctx->afu->adapter, hwirq,
Michael Neuling80fa93f2014-11-14 18:09:28 +1100491 cxl_irq_afu, ctx, irq_name->name);
492 irq_name = list_next_entry(irq_name, list);
Ian Munsief204e0b2014-10-08 19:55:02 +1100493 }
494 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100495}
496
Michael Neulingc358d84b2015-05-27 16:07:12 +1000497int afu_register_irqs(struct cxl_context *ctx, u32 count)
498{
499 int rc;
500
501 rc = afu_allocate_irqs(ctx, count);
502 if (rc)
503 return rc;
504
505 afu_register_hwirqs(ctx);
506 return 0;
507 }
508
Michael Neuling64288322015-05-27 16:07:07 +1000509void afu_release_irqs(struct cxl_context *ctx, void *cookie)
Ian Munsief204e0b2014-10-08 19:55:02 +1100510{
511 irq_hw_number_t hwirq;
512 unsigned int virq;
513 int r, i;
514
515 for (r = 1; r < CXL_IRQ_RANGES; r++) {
516 hwirq = ctx->irqs.offset[r];
517 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
518 virq = irq_find_mapping(NULL, hwirq);
519 if (virq)
Michael Neuling64288322015-05-27 16:07:07 +1000520 cxl_unmap_irq(virq, cookie);
Ian Munsief204e0b2014-10-08 19:55:02 +1100521 }
522 }
523
Michael Neuling80fa93f2014-11-14 18:09:28 +1100524 afu_irq_name_free(ctx);
Ian Munsief204e0b2014-10-08 19:55:02 +1100525 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
Vaibhav Jain8c7dd082015-08-14 12:28:38 +0530526
Vaibhav Jain8c7dd082015-08-14 12:28:38 +0530527 ctx->irq_count = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +1100528}