blob: 807582335a9848e2dc8c9f0eedb80105b5dc6fae [file] [log] [blame]
Michael Neuling6f7f0b32015-05-27 16:07:18 +10001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/pci.h>
11#include <linux/slab.h>
12#include <linux/anon_inodes.h>
13#include <linux/file.h>
14#include <misc/cxl.h>
Ian Munsie55e07662015-08-27 19:50:19 +100015#include <linux/fs.h>
Michael Neuling6f7f0b32015-05-27 16:07:18 +100016
17#include "cxl.h"
18
19struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
20{
Ian Munsie55e07662015-08-27 19:50:19 +100021 struct address_space *mapping;
Michael Neuling6f7f0b32015-05-27 16:07:18 +100022 struct cxl_afu *afu;
23 struct cxl_context *ctx;
24 int rc;
25
26 afu = cxl_pci_to_afu(dev);
27
28 ctx = cxl_context_alloc();
Ian Munsieaf2a50b2015-08-27 19:50:18 +100029 if (IS_ERR(ctx)) {
30 rc = PTR_ERR(ctx);
31 goto err_dev;
32 }
Michael Neuling6f7f0b32015-05-27 16:07:18 +100033
Ian Munsie55e07662015-08-27 19:50:19 +100034 ctx->kernelapi = true;
35
36 /*
37 * Make our own address space since we won't have one from the
38 * filesystem like the user api has, and even if we do associate a file
39 * with this context we don't want to use the global anonymous inode's
40 * address space as that can invalidate unrelated users:
41 */
42 mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
43 if (!mapping) {
44 rc = -ENOMEM;
Ian Munsieaf2a50b2015-08-27 19:50:18 +100045 goto err_ctx;
Ian Munsie55e07662015-08-27 19:50:19 +100046 }
47 address_space_init_once(mapping);
48
49 /* Make it a slave context. We can promote it later? */
50 rc = cxl_context_init(ctx, afu, false, mapping);
51 if (rc)
52 goto err_mapping;
53
Michael Neuling6f7f0b32015-05-27 16:07:18 +100054 return ctx;
Ian Munsieaf2a50b2015-08-27 19:50:18 +100055
Ian Munsie55e07662015-08-27 19:50:19 +100056err_mapping:
57 kfree(mapping);
Ian Munsieaf2a50b2015-08-27 19:50:18 +100058err_ctx:
59 kfree(ctx);
60err_dev:
Ian Munsieaf2a50b2015-08-27 19:50:18 +100061 return ERR_PTR(rc);
Michael Neuling6f7f0b32015-05-27 16:07:18 +100062}
63EXPORT_SYMBOL_GPL(cxl_dev_context_init);
64
65struct cxl_context *cxl_get_context(struct pci_dev *dev)
66{
67 return dev->dev.archdata.cxl_ctx;
68}
69EXPORT_SYMBOL_GPL(cxl_get_context);
70
Michael Neuling6f7f0b32015-05-27 16:07:18 +100071int cxl_release_context(struct cxl_context *ctx)
72{
Andrew Donnellan7c26b9c2015-08-19 09:27:18 +100073 if (ctx->status >= STARTED)
Michael Neuling6f7f0b32015-05-27 16:07:18 +100074 return -EBUSY;
75
76 cxl_context_free(ctx);
77
78 return 0;
79}
80EXPORT_SYMBOL_GPL(cxl_release_context);
81
Michael Neuling6f7f0b32015-05-27 16:07:18 +100082static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
83{
84 __u16 range;
85 int r;
86
Michael Neuling6f7f0b32015-05-27 16:07:18 +100087 for (r = 0; r < CXL_IRQ_RANGES; r++) {
88 range = ctx->irqs.range[r];
89 if (num < range) {
90 return ctx->irqs.offset[r] + num;
91 }
92 num -= range;
93 }
94 return 0;
95}
96
Frederic Barratd601ea92016-03-04 12:26:40 +010097int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
98{
99 int res;
100 irq_hw_number_t hwirq;
101
102 if (num == 0)
103 num = ctx->afu->pp_irqs;
104 res = afu_allocate_irqs(ctx, num);
105 if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) {
106 /* In a guest, the PSL interrupt is not multiplexed. It was
107 * allocated above, and we need to set its handler
108 */
109 hwirq = cxl_find_afu_irq(ctx, 0);
110 if (hwirq)
111 cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
112 }
113 return res;
114}
115EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
116
117void cxl_free_afu_irqs(struct cxl_context *ctx)
118{
119 irq_hw_number_t hwirq;
120 unsigned int virq;
121
122 if (!cpu_has_feature(CPU_FTR_HVMODE)) {
123 hwirq = cxl_find_afu_irq(ctx, 0);
124 if (hwirq) {
125 virq = irq_find_mapping(NULL, hwirq);
126 if (virq)
127 cxl_unmap_irq(virq, ctx);
128 }
129 }
130 afu_irq_name_free(ctx);
131 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
132}
133EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
134
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000135int cxl_map_afu_irq(struct cxl_context *ctx, int num,
136 irq_handler_t handler, void *cookie, char *name)
137{
138 irq_hw_number_t hwirq;
139
140 /*
141 * Find interrupt we are to register.
142 */
143 hwirq = cxl_find_afu_irq(ctx, num);
144 if (!hwirq)
145 return -ENOENT;
146
147 return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
148}
149EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
150
151void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
152{
153 irq_hw_number_t hwirq;
154 unsigned int virq;
155
156 hwirq = cxl_find_afu_irq(ctx, num);
157 if (!hwirq)
158 return;
159
160 virq = irq_find_mapping(NULL, hwirq);
161 if (virq)
162 cxl_unmap_irq(virq, cookie);
163}
164EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
165
166/*
167 * Start a context
168 * Code here similar to afu_ioctl_start_work().
169 */
170int cxl_start_context(struct cxl_context *ctx, u64 wed,
171 struct task_struct *task)
172{
173 int rc = 0;
174 bool kernel = true;
175
176 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
177
178 mutex_lock(&ctx->status_mutex);
179 if (ctx->status == STARTED)
180 goto out; /* already started */
181
182 if (task) {
183 ctx->pid = get_task_pid(task, PIDTYPE_PID);
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530184 ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000185 kernel = false;
186 }
187
188 cxl_ctx_get();
189
Frederic Barrat5be587b2016-03-04 12:26:28 +0100190 if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000191 put_pid(ctx->pid);
192 cxl_ctx_put();
193 goto out;
194 }
195
196 ctx->status = STARTED;
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000197out:
198 mutex_unlock(&ctx->status_mutex);
199 return rc;
200}
201EXPORT_SYMBOL_GPL(cxl_start_context);
202
203int cxl_process_element(struct cxl_context *ctx)
204{
Christophe Lombard14baf4d2016-03-04 12:26:36 +0100205 return ctx->external_pe;
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000206}
207EXPORT_SYMBOL_GPL(cxl_process_element);
208
209/* Stop a context. Returns 0 on success, otherwise -Errno */
210int cxl_stop_context(struct cxl_context *ctx)
211{
Michael Neuling3f8dc442015-07-07 11:01:17 +1000212 return __detach_context(ctx);
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000213}
214EXPORT_SYMBOL_GPL(cxl_stop_context);
215
216void cxl_set_master(struct cxl_context *ctx)
217{
218 ctx->master = true;
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000219}
220EXPORT_SYMBOL_GPL(cxl_set_master);
221
222/* wrappers around afu_* file ops which are EXPORTED */
223int cxl_fd_open(struct inode *inode, struct file *file)
224{
225 return afu_open(inode, file);
226}
227EXPORT_SYMBOL_GPL(cxl_fd_open);
228int cxl_fd_release(struct inode *inode, struct file *file)
229{
230 return afu_release(inode, file);
231}
232EXPORT_SYMBOL_GPL(cxl_fd_release);
233long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
234{
235 return afu_ioctl(file, cmd, arg);
236}
237EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
238int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
239{
240 return afu_mmap(file, vm);
241}
242EXPORT_SYMBOL_GPL(cxl_fd_mmap);
243unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
244{
245 return afu_poll(file, poll);
246}
247EXPORT_SYMBOL_GPL(cxl_fd_poll);
248ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
249 loff_t *off)
250{
251 return afu_read(file, buf, count, off);
252}
253EXPORT_SYMBOL_GPL(cxl_fd_read);
254
255#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
256
257/* Get a struct file and fd for a context and attach the ops */
258struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
259 int *fd)
260{
261 struct file *file;
262 int rc, flags, fdtmp;
263
264 flags = O_RDWR | O_CLOEXEC;
265
266 /* This code is similar to anon_inode_getfd() */
267 rc = get_unused_fd_flags(flags);
268 if (rc < 0)
269 return ERR_PTR(rc);
270 fdtmp = rc;
271
272 /*
273 * Patch the file ops. Needs to be careful that this is rentrant safe.
274 */
275 if (fops) {
276 PATCH_FOPS(open);
277 PATCH_FOPS(poll);
278 PATCH_FOPS(read);
279 PATCH_FOPS(release);
280 PATCH_FOPS(unlocked_ioctl);
281 PATCH_FOPS(compat_ioctl);
282 PATCH_FOPS(mmap);
283 } else /* use default ops */
284 fops = (struct file_operations *)&afu_fops;
285
286 file = anon_inode_getfile("cxl", fops, ctx, flags);
287 if (IS_ERR(file))
Ian Munsie55e07662015-08-27 19:50:19 +1000288 goto err_fd;
289
290 file->f_mapping = ctx->mapping;
291
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000292 *fd = fdtmp;
293 return file;
Ian Munsie55e07662015-08-27 19:50:19 +1000294
295err_fd:
296 put_unused_fd(fdtmp);
297 return NULL;
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000298}
299EXPORT_SYMBOL_GPL(cxl_get_fd);
300
301struct cxl_context *cxl_fops_get_context(struct file *file)
302{
303 return file->private_data;
304}
305EXPORT_SYMBOL_GPL(cxl_fops_get_context);
306
307int cxl_start_work(struct cxl_context *ctx,
308 struct cxl_ioctl_start_work *work)
309{
310 int rc;
311
312 /* code taken from afu_ioctl_start_work */
313 if (!(work->flags & CXL_START_WORK_NUM_IRQS))
314 work->num_interrupts = ctx->afu->pp_irqs;
315 else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
316 (work->num_interrupts > ctx->afu->irqs_max)) {
317 return -EINVAL;
318 }
319
320 rc = afu_register_irqs(ctx, work->num_interrupts);
321 if (rc)
322 return rc;
323
324 rc = cxl_start_context(ctx, work->work_element_descriptor, current);
325 if (rc < 0) {
326 afu_release_irqs(ctx, ctx);
327 return rc;
328 }
329
330 return 0;
331}
332EXPORT_SYMBOL_GPL(cxl_start_work);
333
334void __iomem *cxl_psa_map(struct cxl_context *ctx)
335{
Frederic Barratcca44c02016-03-04 12:26:27 +0100336 if (ctx->status != STARTED)
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000337 return NULL;
338
339 pr_devel("%s: psn_phys%llx size:%llx\n",
Frederic Barratcca44c02016-03-04 12:26:27 +0100340 __func__, ctx->psn_phys, ctx->psn_size);
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000341 return ioremap(ctx->psn_phys, ctx->psn_size);
342}
343EXPORT_SYMBOL_GPL(cxl_psa_map);
344
345void cxl_psa_unmap(void __iomem *addr)
346{
347 iounmap(addr);
348}
349EXPORT_SYMBOL_GPL(cxl_psa_unmap);
350
351int cxl_afu_reset(struct cxl_context *ctx)
352{
353 struct cxl_afu *afu = ctx->afu;
354 int rc;
355
Frederic Barrat5be587b2016-03-04 12:26:28 +0100356 rc = cxl_ops->afu_reset(afu);
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000357 if (rc)
358 return rc;
359
Frederic Barrat5be587b2016-03-04 12:26:28 +0100360 return cxl_ops->afu_check_and_enable(afu);
Michael Neuling6f7f0b32015-05-27 16:07:18 +1000361}
362EXPORT_SYMBOL_GPL(cxl_afu_reset);
Daniel Axtens13e68d82015-08-14 17:41:25 +1000363
364void cxl_perst_reloads_same_image(struct cxl_afu *afu,
365 bool perst_reloads_same_image)
366{
367 afu->adapter->perst_same_image = perst_reloads_same_image;
368}
369EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
Frederic Barratd601ea92016-03-04 12:26:40 +0100370
371ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
372{
373 struct cxl_afu *afu = cxl_pci_to_afu(dev);
374
375 return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
376}
377EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);