blob: 0c77240ae2fce8cbb832388c20f294f49763e03e [file] [log] [blame]
Michael Neuling6f7f0b32015-05-27 16:07:18 +10001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/pci.h>
11#include <linux/slab.h>
12#include <linux/anon_inodes.h>
13#include <linux/file.h>
14#include <misc/cxl.h>
15
16#include "cxl.h"
17
18struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
19{
20 struct cxl_afu *afu;
21 struct cxl_context *ctx;
22 int rc;
23
24 afu = cxl_pci_to_afu(dev);
25
26 ctx = cxl_context_alloc();
27 if (IS_ERR(ctx))
28 return ctx;
29
30 /* Make it a slave context. We can promote it later? */
31 rc = cxl_context_init(ctx, afu, false, NULL);
32 if (rc) {
33 kfree(ctx);
34 return ERR_PTR(-ENOMEM);
35 }
36 cxl_assign_psn_space(ctx);
37
38 return ctx;
39}
40EXPORT_SYMBOL_GPL(cxl_dev_context_init);
41
42struct cxl_context *cxl_get_context(struct pci_dev *dev)
43{
44 return dev->dev.archdata.cxl_ctx;
45}
46EXPORT_SYMBOL_GPL(cxl_get_context);
47
48struct device *cxl_get_phys_dev(struct pci_dev *dev)
49{
50 struct cxl_afu *afu;
51
52 afu = cxl_pci_to_afu(dev);
53
54 return afu->adapter->dev.parent;
55}
56EXPORT_SYMBOL_GPL(cxl_get_phys_dev);
57
58int cxl_release_context(struct cxl_context *ctx)
59{
60 if (ctx->status != CLOSED)
61 return -EBUSY;
62
63 cxl_context_free(ctx);
64
65 return 0;
66}
67EXPORT_SYMBOL_GPL(cxl_release_context);
68
69int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
70{
71 if (num == 0)
72 num = ctx->afu->pp_irqs;
73 return afu_allocate_irqs(ctx, num);
74}
75EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
76
77void cxl_free_afu_irqs(struct cxl_context *ctx)
78{
79 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
80}
81EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
82
83static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
84{
85 __u16 range;
86 int r;
87
88 WARN_ON(num == 0);
89
90 for (r = 0; r < CXL_IRQ_RANGES; r++) {
91 range = ctx->irqs.range[r];
92 if (num < range) {
93 return ctx->irqs.offset[r] + num;
94 }
95 num -= range;
96 }
97 return 0;
98}
99
100int cxl_map_afu_irq(struct cxl_context *ctx, int num,
101 irq_handler_t handler, void *cookie, char *name)
102{
103 irq_hw_number_t hwirq;
104
105 /*
106 * Find interrupt we are to register.
107 */
108 hwirq = cxl_find_afu_irq(ctx, num);
109 if (!hwirq)
110 return -ENOENT;
111
112 return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
113}
114EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
115
116void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
117{
118 irq_hw_number_t hwirq;
119 unsigned int virq;
120
121 hwirq = cxl_find_afu_irq(ctx, num);
122 if (!hwirq)
123 return;
124
125 virq = irq_find_mapping(NULL, hwirq);
126 if (virq)
127 cxl_unmap_irq(virq, cookie);
128}
129EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
130
131/*
132 * Start a context
133 * Code here similar to afu_ioctl_start_work().
134 */
135int cxl_start_context(struct cxl_context *ctx, u64 wed,
136 struct task_struct *task)
137{
138 int rc = 0;
139 bool kernel = true;
140
141 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
142
143 mutex_lock(&ctx->status_mutex);
144 if (ctx->status == STARTED)
145 goto out; /* already started */
146
147 if (task) {
148 ctx->pid = get_task_pid(task, PIDTYPE_PID);
149 get_pid(ctx->pid);
150 kernel = false;
151 }
152
153 cxl_ctx_get();
154
155 if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {
156 put_pid(ctx->pid);
157 cxl_ctx_put();
158 goto out;
159 }
160
161 ctx->status = STARTED;
162 get_device(&ctx->afu->dev);
163out:
164 mutex_unlock(&ctx->status_mutex);
165 return rc;
166}
167EXPORT_SYMBOL_GPL(cxl_start_context);
168
169int cxl_process_element(struct cxl_context *ctx)
170{
171 return ctx->pe;
172}
173EXPORT_SYMBOL_GPL(cxl_process_element);
174
175/* Stop a context. Returns 0 on success, otherwise -Errno */
176int cxl_stop_context(struct cxl_context *ctx)
177{
178 int rc;
179
180 rc = __detach_context(ctx);
181 if (!rc)
182 put_device(&ctx->afu->dev);
183 return rc;
184}
185EXPORT_SYMBOL_GPL(cxl_stop_context);
186
187void cxl_set_master(struct cxl_context *ctx)
188{
189 ctx->master = true;
190 cxl_assign_psn_space(ctx);
191}
192EXPORT_SYMBOL_GPL(cxl_set_master);
193
194/* wrappers around afu_* file ops which are EXPORTED */
195int cxl_fd_open(struct inode *inode, struct file *file)
196{
197 return afu_open(inode, file);
198}
199EXPORT_SYMBOL_GPL(cxl_fd_open);
200int cxl_fd_release(struct inode *inode, struct file *file)
201{
202 return afu_release(inode, file);
203}
204EXPORT_SYMBOL_GPL(cxl_fd_release);
205long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
206{
207 return afu_ioctl(file, cmd, arg);
208}
209EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
210int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
211{
212 return afu_mmap(file, vm);
213}
214EXPORT_SYMBOL_GPL(cxl_fd_mmap);
215unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
216{
217 return afu_poll(file, poll);
218}
219EXPORT_SYMBOL_GPL(cxl_fd_poll);
220ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
221 loff_t *off)
222{
223 return afu_read(file, buf, count, off);
224}
225EXPORT_SYMBOL_GPL(cxl_fd_read);
226
227#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
228
229/* Get a struct file and fd for a context and attach the ops */
230struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
231 int *fd)
232{
233 struct file *file;
234 int rc, flags, fdtmp;
235
236 flags = O_RDWR | O_CLOEXEC;
237
238 /* This code is similar to anon_inode_getfd() */
239 rc = get_unused_fd_flags(flags);
240 if (rc < 0)
241 return ERR_PTR(rc);
242 fdtmp = rc;
243
244 /*
245 * Patch the file ops. Needs to be careful that this is rentrant safe.
246 */
247 if (fops) {
248 PATCH_FOPS(open);
249 PATCH_FOPS(poll);
250 PATCH_FOPS(read);
251 PATCH_FOPS(release);
252 PATCH_FOPS(unlocked_ioctl);
253 PATCH_FOPS(compat_ioctl);
254 PATCH_FOPS(mmap);
255 } else /* use default ops */
256 fops = (struct file_operations *)&afu_fops;
257
258 file = anon_inode_getfile("cxl", fops, ctx, flags);
259 if (IS_ERR(file))
260 put_unused_fd(fdtmp);
261 *fd = fdtmp;
262 return file;
263}
264EXPORT_SYMBOL_GPL(cxl_get_fd);
265
266struct cxl_context *cxl_fops_get_context(struct file *file)
267{
268 return file->private_data;
269}
270EXPORT_SYMBOL_GPL(cxl_fops_get_context);
271
272int cxl_start_work(struct cxl_context *ctx,
273 struct cxl_ioctl_start_work *work)
274{
275 int rc;
276
277 /* code taken from afu_ioctl_start_work */
278 if (!(work->flags & CXL_START_WORK_NUM_IRQS))
279 work->num_interrupts = ctx->afu->pp_irqs;
280 else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
281 (work->num_interrupts > ctx->afu->irqs_max)) {
282 return -EINVAL;
283 }
284
285 rc = afu_register_irqs(ctx, work->num_interrupts);
286 if (rc)
287 return rc;
288
289 rc = cxl_start_context(ctx, work->work_element_descriptor, current);
290 if (rc < 0) {
291 afu_release_irqs(ctx, ctx);
292 return rc;
293 }
294
295 return 0;
296}
297EXPORT_SYMBOL_GPL(cxl_start_work);
298
299void __iomem *cxl_psa_map(struct cxl_context *ctx)
300{
301 struct cxl_afu *afu = ctx->afu;
302 int rc;
303
304 rc = cxl_afu_check_and_enable(afu);
305 if (rc)
306 return NULL;
307
308 pr_devel("%s: psn_phys%llx size:%llx\n",
309 __func__, afu->psn_phys, afu->adapter->ps_size);
310 return ioremap(ctx->psn_phys, ctx->psn_size);
311}
312EXPORT_SYMBOL_GPL(cxl_psa_map);
313
314void cxl_psa_unmap(void __iomem *addr)
315{
316 iounmap(addr);
317}
318EXPORT_SYMBOL_GPL(cxl_psa_unmap);
319
320int cxl_afu_reset(struct cxl_context *ctx)
321{
322 struct cxl_afu *afu = ctx->afu;
323 int rc;
324
325 rc = __cxl_afu_reset(afu);
326 if (rc)
327 return rc;
328
329 return cxl_afu_check_and_enable(afu);
330}
331EXPORT_SYMBOL_GPL(cxl_afu_reset);