| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright 2014 IBM Corp. | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or | 
|  | 5 | * modify it under the terms of the GNU General Public License | 
|  | 6 | * as published by the Free Software Foundation; either version | 
|  | 7 | * 2 of the License, or (at your option) any later version. | 
|  | 8 | */ | 
|  | 9 |  | 
|  | 10 | #include <linux/pci.h> | 
|  | 11 | #include <linux/slab.h> | 
|  | 12 | #include <linux/anon_inodes.h> | 
|  | 13 | #include <linux/file.h> | 
|  | 14 | #include <misc/cxl.h> | 
| Ian Munsie | 55e0766 | 2015-08-27 19:50:19 +1000 | [diff] [blame] | 15 | #include <linux/fs.h> | 
| Ian Munsie | 317f5ef | 2016-07-14 07:17:07 +1000 | [diff] [blame] | 16 | #include <asm/pnv-pci.h> | 
| Ian Munsie | a2f67d5 | 2016-07-14 07:17:10 +1000 | [diff] [blame] | 17 | #include <linux/msi.h> | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 18 |  | 
|  | 19 | #include "cxl.h" | 
|  | 20 |  | 
|  | 21 | struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) | 
|  | 22 | { | 
| Ian Munsie | 55e0766 | 2015-08-27 19:50:19 +1000 | [diff] [blame] | 23 | struct address_space *mapping; | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 24 | struct cxl_afu *afu; | 
|  | 25 | struct cxl_context  *ctx; | 
|  | 26 | int rc; | 
|  | 27 |  | 
|  | 28 | afu = cxl_pci_to_afu(dev); | 
| Ian Munsie | 317f5ef | 2016-07-14 07:17:07 +1000 | [diff] [blame] | 29 | if (IS_ERR(afu)) | 
|  | 30 | return ERR_CAST(afu); | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 31 |  | 
|  | 32 | ctx = cxl_context_alloc(); | 
| Ian Munsie | af2a50b | 2015-08-27 19:50:18 +1000 | [diff] [blame] | 33 | if (IS_ERR(ctx)) { | 
|  | 34 | rc = PTR_ERR(ctx); | 
|  | 35 | goto err_dev; | 
|  | 36 | } | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 37 |  | 
| Ian Munsie | 55e0766 | 2015-08-27 19:50:19 +1000 | [diff] [blame] | 38 | ctx->kernelapi = true; | 
|  | 39 |  | 
|  | 40 | /* | 
|  | 41 | * Make our own address space since we won't have one from the | 
|  | 42 | * filesystem like the user api has, and even if we do associate a file | 
|  | 43 | * with this context we don't want to use the global anonymous inode's | 
|  | 44 | * address space as that can invalidate unrelated users: | 
|  | 45 | */ | 
|  | 46 | mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL); | 
|  | 47 | if (!mapping) { | 
|  | 48 | rc = -ENOMEM; | 
| Ian Munsie | af2a50b | 2015-08-27 19:50:18 +1000 | [diff] [blame] | 49 | goto err_ctx; | 
| Ian Munsie | 55e0766 | 2015-08-27 19:50:19 +1000 | [diff] [blame] | 50 | } | 
|  | 51 | address_space_init_once(mapping); | 
|  | 52 |  | 
|  | 53 | /* Make it a slave context.  We can promote it later? */ | 
|  | 54 | rc = cxl_context_init(ctx, afu, false, mapping); | 
|  | 55 | if (rc) | 
|  | 56 | goto err_mapping; | 
|  | 57 |  | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 58 | return ctx; | 
| Ian Munsie | af2a50b | 2015-08-27 19:50:18 +1000 | [diff] [blame] | 59 |  | 
| Ian Munsie | 55e0766 | 2015-08-27 19:50:19 +1000 | [diff] [blame] | 60 | err_mapping: | 
|  | 61 | kfree(mapping); | 
| Ian Munsie | af2a50b | 2015-08-27 19:50:18 +1000 | [diff] [blame] | 62 | err_ctx: | 
|  | 63 | kfree(ctx); | 
|  | 64 | err_dev: | 
| Ian Munsie | af2a50b | 2015-08-27 19:50:18 +1000 | [diff] [blame] | 65 | return ERR_PTR(rc); | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 66 | } | 
|  | 67 | EXPORT_SYMBOL_GPL(cxl_dev_context_init); | 
|  | 68 |  | 
|  | 69 | struct cxl_context *cxl_get_context(struct pci_dev *dev) | 
|  | 70 | { | 
|  | 71 | return dev->dev.archdata.cxl_ctx; | 
|  | 72 | } | 
|  | 73 | EXPORT_SYMBOL_GPL(cxl_get_context); | 
|  | 74 |  | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 75 | int cxl_release_context(struct cxl_context *ctx) | 
|  | 76 | { | 
| Andrew Donnellan | 7c26b9c | 2015-08-19 09:27:18 +1000 | [diff] [blame] | 77 | if (ctx->status >= STARTED) | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 78 | return -EBUSY; | 
|  | 79 |  | 
|  | 80 | cxl_context_free(ctx); | 
|  | 81 |  | 
|  | 82 | return 0; | 
|  | 83 | } | 
|  | 84 | EXPORT_SYMBOL_GPL(cxl_release_context); | 
|  | 85 |  | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 86 | static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) | 
|  | 87 | { | 
|  | 88 | __u16 range; | 
|  | 89 | int r; | 
|  | 90 |  | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 91 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | 
|  | 92 | range = ctx->irqs.range[r]; | 
|  | 93 | if (num < range) { | 
|  | 94 | return ctx->irqs.offset[r] + num; | 
|  | 95 | } | 
|  | 96 | num -= range; | 
|  | 97 | } | 
|  | 98 | return 0; | 
|  | 99 | } | 
|  | 100 |  | 
| Ian Munsie | cbce091 | 2016-07-14 07:17:09 +1000 | [diff] [blame] | 101 | int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq) | 
|  | 102 | { | 
|  | 103 | if (*ctx == NULL || *afu_irq == 0) { | 
|  | 104 | *afu_irq = 1; | 
|  | 105 | *ctx = cxl_get_context(pdev); | 
|  | 106 | } else { | 
|  | 107 | (*afu_irq)++; | 
|  | 108 | if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) { | 
|  | 109 | *ctx = list_next_entry(*ctx, extra_irq_contexts); | 
|  | 110 | *afu_irq = 1; | 
|  | 111 | } | 
|  | 112 | } | 
|  | 113 | return cxl_find_afu_irq(*ctx, *afu_irq); | 
|  | 114 | } | 
|  | 115 | /* Exported via cxl_base */ | 
| Michael Neuling | ad42de8 | 2016-06-24 08:47:07 +0200 | [diff] [blame] | 116 |  | 
|  | 117 | int cxl_set_priv(struct cxl_context *ctx, void *priv) | 
|  | 118 | { | 
|  | 119 | if (!ctx) | 
|  | 120 | return -EINVAL; | 
|  | 121 |  | 
|  | 122 | ctx->priv = priv; | 
|  | 123 |  | 
|  | 124 | return 0; | 
|  | 125 | } | 
|  | 126 | EXPORT_SYMBOL_GPL(cxl_set_priv); | 
|  | 127 |  | 
|  | 128 | void *cxl_get_priv(struct cxl_context *ctx) | 
|  | 129 | { | 
|  | 130 | if (!ctx) | 
|  | 131 | return ERR_PTR(-EINVAL); | 
|  | 132 |  | 
|  | 133 | return ctx->priv; | 
|  | 134 | } | 
|  | 135 | EXPORT_SYMBOL_GPL(cxl_get_priv); | 
|  | 136 |  | 
| Frederic Barrat | d601ea9 | 2016-03-04 12:26:40 +0100 | [diff] [blame] | 137 | int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) | 
|  | 138 | { | 
|  | 139 | int res; | 
|  | 140 | irq_hw_number_t hwirq; | 
|  | 141 |  | 
|  | 142 | if (num == 0) | 
|  | 143 | num = ctx->afu->pp_irqs; | 
|  | 144 | res = afu_allocate_irqs(ctx, num); | 
| Ian Munsie | 292841b | 2016-05-24 02:14:05 +1000 | [diff] [blame] | 145 | if (res) | 
|  | 146 | return res; | 
|  | 147 |  | 
|  | 148 | if (!cpu_has_feature(CPU_FTR_HVMODE)) { | 
| Frederic Barrat | d601ea9 | 2016-03-04 12:26:40 +0100 | [diff] [blame] | 149 | /* In a guest, the PSL interrupt is not multiplexed. It was | 
|  | 150 | * allocated above, and we need to set its handler | 
|  | 151 | */ | 
|  | 152 | hwirq = cxl_find_afu_irq(ctx, 0); | 
|  | 153 | if (hwirq) | 
|  | 154 | cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); | 
|  | 155 | } | 
| Ian Munsie | 292841b | 2016-05-24 02:14:05 +1000 | [diff] [blame] | 156 |  | 
|  | 157 | if (ctx->status == STARTED) { | 
|  | 158 | if (cxl_ops->update_ivtes) | 
|  | 159 | cxl_ops->update_ivtes(ctx); | 
|  | 160 | else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n"); | 
|  | 161 | } | 
|  | 162 |  | 
| Frederic Barrat | d601ea9 | 2016-03-04 12:26:40 +0100 | [diff] [blame] | 163 | return res; | 
|  | 164 | } | 
|  | 165 | EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); | 
|  | 166 |  | 
|  | 167 | void cxl_free_afu_irqs(struct cxl_context *ctx) | 
|  | 168 | { | 
|  | 169 | irq_hw_number_t hwirq; | 
|  | 170 | unsigned int virq; | 
|  | 171 |  | 
|  | 172 | if (!cpu_has_feature(CPU_FTR_HVMODE)) { | 
|  | 173 | hwirq = cxl_find_afu_irq(ctx, 0); | 
|  | 174 | if (hwirq) { | 
|  | 175 | virq = irq_find_mapping(NULL, hwirq); | 
|  | 176 | if (virq) | 
|  | 177 | cxl_unmap_irq(virq, ctx); | 
|  | 178 | } | 
|  | 179 | } | 
|  | 180 | afu_irq_name_free(ctx); | 
|  | 181 | cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); | 
|  | 182 | } | 
|  | 183 | EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); | 
|  | 184 |  | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 185 | int cxl_map_afu_irq(struct cxl_context *ctx, int num, | 
|  | 186 | irq_handler_t handler, void *cookie, char *name) | 
|  | 187 | { | 
|  | 188 | irq_hw_number_t hwirq; | 
|  | 189 |  | 
|  | 190 | /* | 
|  | 191 | * Find interrupt we are to register. | 
|  | 192 | */ | 
|  | 193 | hwirq = cxl_find_afu_irq(ctx, num); | 
|  | 194 | if (!hwirq) | 
|  | 195 | return -ENOENT; | 
|  | 196 |  | 
|  | 197 | return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name); | 
|  | 198 | } | 
|  | 199 | EXPORT_SYMBOL_GPL(cxl_map_afu_irq); | 
|  | 200 |  | 
|  | 201 | void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie) | 
|  | 202 | { | 
|  | 203 | irq_hw_number_t hwirq; | 
|  | 204 | unsigned int virq; | 
|  | 205 |  | 
|  | 206 | hwirq = cxl_find_afu_irq(ctx, num); | 
|  | 207 | if (!hwirq) | 
|  | 208 | return; | 
|  | 209 |  | 
|  | 210 | virq = irq_find_mapping(NULL, hwirq); | 
|  | 211 | if (virq) | 
|  | 212 | cxl_unmap_irq(virq, cookie); | 
|  | 213 | } | 
|  | 214 | EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq); | 
|  | 215 |  | 
|  | 216 | /* | 
|  | 217 | * Start a context | 
|  | 218 | * Code here similar to afu_ioctl_start_work(). | 
|  | 219 | */ | 
|  | 220 | int cxl_start_context(struct cxl_context *ctx, u64 wed, | 
|  | 221 | struct task_struct *task) | 
|  | 222 | { | 
|  | 223 | int rc = 0; | 
|  | 224 | bool kernel = true; | 
|  | 225 |  | 
|  | 226 | pr_devel("%s: pe: %i\n", __func__, ctx->pe); | 
|  | 227 |  | 
|  | 228 | mutex_lock(&ctx->status_mutex); | 
|  | 229 | if (ctx->status == STARTED) | 
|  | 230 | goto out; /* already started */ | 
|  | 231 |  | 
| Vaibhav Jain | 70b565b | 2016-10-14 15:08:36 +0530 | [diff] [blame] | 232 | /* | 
|  | 233 | * Increment the mapped context count for adapter. This also checks | 
|  | 234 | * if adapter_context_lock is taken. | 
|  | 235 | */ | 
|  | 236 | rc = cxl_adapter_context_get(ctx->afu->adapter); | 
|  | 237 | if (rc) | 
|  | 238 | goto out; | 
|  | 239 |  | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 240 | if (task) { | 
|  | 241 | ctx->pid = get_task_pid(task, PIDTYPE_PID); | 
| Vaibhav Jain | 7b8ad49 | 2015-11-24 16:26:18 +0530 | [diff] [blame] | 242 | ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID); | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 243 | kernel = false; | 
| Ian Munsie | 7a0d85d | 2016-05-06 17:46:36 +1000 | [diff] [blame] | 244 | ctx->real_mode = false; | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 245 | } | 
|  | 246 |  | 
|  | 247 | cxl_ctx_get(); | 
|  | 248 |  | 
| Frederic Barrat | 5be587b | 2016-03-04 12:26:28 +0100 | [diff] [blame] | 249 | if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { | 
| Vaibhav Jain | a05b82d | 2016-10-21 14:53:53 +0530 | [diff] [blame^] | 250 | put_pid(ctx->glpid); | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 251 | put_pid(ctx->pid); | 
| Vaibhav Jain | a05b82d | 2016-10-21 14:53:53 +0530 | [diff] [blame^] | 252 | ctx->glpid = ctx->pid = NULL; | 
| Vaibhav Jain | 70b565b | 2016-10-14 15:08:36 +0530 | [diff] [blame] | 253 | cxl_adapter_context_put(ctx->afu->adapter); | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 254 | cxl_ctx_put(); | 
|  | 255 | goto out; | 
|  | 256 | } | 
|  | 257 |  | 
|  | 258 | ctx->status = STARTED; | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 259 | out: | 
|  | 260 | mutex_unlock(&ctx->status_mutex); | 
|  | 261 | return rc; | 
|  | 262 | } | 
|  | 263 | EXPORT_SYMBOL_GPL(cxl_start_context); | 
|  | 264 |  | 
|  | 265 | int cxl_process_element(struct cxl_context *ctx) | 
|  | 266 | { | 
| Christophe Lombard | 14baf4d | 2016-03-04 12:26:36 +0100 | [diff] [blame] | 267 | return ctx->external_pe; | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 268 | } | 
|  | 269 | EXPORT_SYMBOL_GPL(cxl_process_element); | 
|  | 270 |  | 
|  | 271 | /* Stop a context.  Returns 0 on success, otherwise -Errno */ | 
|  | 272 | int cxl_stop_context(struct cxl_context *ctx) | 
|  | 273 | { | 
| Michael Neuling | 3f8dc44 | 2015-07-07 11:01:17 +1000 | [diff] [blame] | 274 | return __detach_context(ctx); | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 275 | } | 
|  | 276 | EXPORT_SYMBOL_GPL(cxl_stop_context); | 
|  | 277 |  | 
|  | 278 | void cxl_set_master(struct cxl_context *ctx) | 
|  | 279 | { | 
|  | 280 | ctx->master = true; | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 281 | } | 
|  | 282 | EXPORT_SYMBOL_GPL(cxl_set_master); | 
|  | 283 |  | 
| Ian Munsie | 7a0d85d | 2016-05-06 17:46:36 +1000 | [diff] [blame] | 284 | int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode) | 
|  | 285 | { | 
|  | 286 | if (ctx->status == STARTED) { | 
|  | 287 | /* | 
|  | 288 | * We could potentially update the PE and issue an update LLCMD | 
|  | 289 | * to support this, but it doesn't seem to have a good use case | 
|  | 290 | * since it's trivial to just create a second kernel context | 
|  | 291 | * with different translation modes, so until someone convinces | 
|  | 292 | * me otherwise: | 
|  | 293 | */ | 
|  | 294 | return -EBUSY; | 
|  | 295 | } | 
|  | 296 |  | 
|  | 297 | ctx->real_mode = real_mode; | 
|  | 298 | return 0; | 
|  | 299 | } | 
|  | 300 | EXPORT_SYMBOL_GPL(cxl_set_translation_mode); | 
|  | 301 |  | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 302 | /* wrappers around afu_* file ops which are EXPORTED */ | 
|  | 303 | int cxl_fd_open(struct inode *inode, struct file *file) | 
|  | 304 | { | 
|  | 305 | return afu_open(inode, file); | 
|  | 306 | } | 
|  | 307 | EXPORT_SYMBOL_GPL(cxl_fd_open); | 
|  | 308 | int cxl_fd_release(struct inode *inode, struct file *file) | 
|  | 309 | { | 
|  | 310 | return afu_release(inode, file); | 
|  | 311 | } | 
|  | 312 | EXPORT_SYMBOL_GPL(cxl_fd_release); | 
|  | 313 | long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 
|  | 314 | { | 
|  | 315 | return afu_ioctl(file, cmd, arg); | 
|  | 316 | } | 
|  | 317 | EXPORT_SYMBOL_GPL(cxl_fd_ioctl); | 
|  | 318 | int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm) | 
|  | 319 | { | 
|  | 320 | return afu_mmap(file, vm); | 
|  | 321 | } | 
|  | 322 | EXPORT_SYMBOL_GPL(cxl_fd_mmap); | 
|  | 323 | unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll) | 
|  | 324 | { | 
|  | 325 | return afu_poll(file, poll); | 
|  | 326 | } | 
|  | 327 | EXPORT_SYMBOL_GPL(cxl_fd_poll); | 
|  | 328 | ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count, | 
|  | 329 | loff_t *off) | 
|  | 330 | { | 
|  | 331 | return afu_read(file, buf, count, off); | 
|  | 332 | } | 
|  | 333 | EXPORT_SYMBOL_GPL(cxl_fd_read); | 
|  | 334 |  | 
|  | 335 | #define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME | 
|  | 336 |  | 
|  | 337 | /* Get a struct file and fd for a context and attach the ops */ | 
|  | 338 | struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops, | 
|  | 339 | int *fd) | 
|  | 340 | { | 
|  | 341 | struct file *file; | 
|  | 342 | int rc, flags, fdtmp; | 
|  | 343 |  | 
|  | 344 | flags = O_RDWR | O_CLOEXEC; | 
|  | 345 |  | 
|  | 346 | /* This code is similar to anon_inode_getfd() */ | 
|  | 347 | rc = get_unused_fd_flags(flags); | 
|  | 348 | if (rc < 0) | 
|  | 349 | return ERR_PTR(rc); | 
|  | 350 | fdtmp = rc; | 
|  | 351 |  | 
|  | 352 | /* | 
|  | 353 | * Patch the file ops.  Needs to be careful that this is rentrant safe. | 
|  | 354 | */ | 
|  | 355 | if (fops) { | 
|  | 356 | PATCH_FOPS(open); | 
|  | 357 | PATCH_FOPS(poll); | 
|  | 358 | PATCH_FOPS(read); | 
|  | 359 | PATCH_FOPS(release); | 
|  | 360 | PATCH_FOPS(unlocked_ioctl); | 
|  | 361 | PATCH_FOPS(compat_ioctl); | 
|  | 362 | PATCH_FOPS(mmap); | 
|  | 363 | } else /* use default ops */ | 
|  | 364 | fops = (struct file_operations *)&afu_fops; | 
|  | 365 |  | 
|  | 366 | file = anon_inode_getfile("cxl", fops, ctx, flags); | 
|  | 367 | if (IS_ERR(file)) | 
| Ian Munsie | 55e0766 | 2015-08-27 19:50:19 +1000 | [diff] [blame] | 368 | goto err_fd; | 
|  | 369 |  | 
|  | 370 | file->f_mapping = ctx->mapping; | 
|  | 371 |  | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 372 | *fd = fdtmp; | 
|  | 373 | return file; | 
| Ian Munsie | 55e0766 | 2015-08-27 19:50:19 +1000 | [diff] [blame] | 374 |  | 
|  | 375 | err_fd: | 
|  | 376 | put_unused_fd(fdtmp); | 
|  | 377 | return NULL; | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 378 | } | 
|  | 379 | EXPORT_SYMBOL_GPL(cxl_get_fd); | 
|  | 380 |  | 
|  | 381 | struct cxl_context *cxl_fops_get_context(struct file *file) | 
|  | 382 | { | 
|  | 383 | return file->private_data; | 
|  | 384 | } | 
|  | 385 | EXPORT_SYMBOL_GPL(cxl_fops_get_context); | 
|  | 386 |  | 
| Philippe Bergheaud | b810253 | 2016-06-23 15:03:53 +0200 | [diff] [blame] | 387 | void cxl_set_driver_ops(struct cxl_context *ctx, | 
|  | 388 | struct cxl_afu_driver_ops *ops) | 
|  | 389 | { | 
|  | 390 | WARN_ON(!ops->fetch_event || !ops->event_delivered); | 
|  | 391 | atomic_set(&ctx->afu_driver_events, 0); | 
|  | 392 | ctx->afu_driver_ops = ops; | 
|  | 393 | } | 
|  | 394 | EXPORT_SYMBOL_GPL(cxl_set_driver_ops); | 
|  | 395 |  | 
|  | 396 | void cxl_context_events_pending(struct cxl_context *ctx, | 
|  | 397 | unsigned int new_events) | 
|  | 398 | { | 
|  | 399 | atomic_add(new_events, &ctx->afu_driver_events); | 
|  | 400 | wake_up_all(&ctx->wq); | 
|  | 401 | } | 
|  | 402 | EXPORT_SYMBOL_GPL(cxl_context_events_pending); | 
|  | 403 |  | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 404 | int cxl_start_work(struct cxl_context *ctx, | 
|  | 405 | struct cxl_ioctl_start_work *work) | 
|  | 406 | { | 
|  | 407 | int rc; | 
|  | 408 |  | 
|  | 409 | /* code taken from afu_ioctl_start_work */ | 
|  | 410 | if (!(work->flags & CXL_START_WORK_NUM_IRQS)) | 
|  | 411 | work->num_interrupts = ctx->afu->pp_irqs; | 
|  | 412 | else if ((work->num_interrupts < ctx->afu->pp_irqs) || | 
|  | 413 | (work->num_interrupts > ctx->afu->irqs_max)) { | 
|  | 414 | return -EINVAL; | 
|  | 415 | } | 
|  | 416 |  | 
|  | 417 | rc = afu_register_irqs(ctx, work->num_interrupts); | 
|  | 418 | if (rc) | 
|  | 419 | return rc; | 
|  | 420 |  | 
|  | 421 | rc = cxl_start_context(ctx, work->work_element_descriptor, current); | 
|  | 422 | if (rc < 0) { | 
|  | 423 | afu_release_irqs(ctx, ctx); | 
|  | 424 | return rc; | 
|  | 425 | } | 
|  | 426 |  | 
|  | 427 | return 0; | 
|  | 428 | } | 
|  | 429 | EXPORT_SYMBOL_GPL(cxl_start_work); | 
|  | 430 |  | 
|  | 431 | void __iomem *cxl_psa_map(struct cxl_context *ctx) | 
|  | 432 | { | 
| Frederic Barrat | cca44c0 | 2016-03-04 12:26:27 +0100 | [diff] [blame] | 433 | if (ctx->status != STARTED) | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 434 | return NULL; | 
|  | 435 |  | 
|  | 436 | pr_devel("%s: psn_phys%llx size:%llx\n", | 
| Frederic Barrat | cca44c0 | 2016-03-04 12:26:27 +0100 | [diff] [blame] | 437 | __func__, ctx->psn_phys, ctx->psn_size); | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 438 | return ioremap(ctx->psn_phys, ctx->psn_size); | 
|  | 439 | } | 
|  | 440 | EXPORT_SYMBOL_GPL(cxl_psa_map); | 
|  | 441 |  | 
|  | 442 | void cxl_psa_unmap(void __iomem *addr) | 
|  | 443 | { | 
|  | 444 | iounmap(addr); | 
|  | 445 | } | 
|  | 446 | EXPORT_SYMBOL_GPL(cxl_psa_unmap); | 
|  | 447 |  | 
|  | 448 | int cxl_afu_reset(struct cxl_context *ctx) | 
|  | 449 | { | 
|  | 450 | struct cxl_afu *afu = ctx->afu; | 
|  | 451 | int rc; | 
|  | 452 |  | 
| Frederic Barrat | 5be587b | 2016-03-04 12:26:28 +0100 | [diff] [blame] | 453 | rc = cxl_ops->afu_reset(afu); | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 454 | if (rc) | 
|  | 455 | return rc; | 
|  | 456 |  | 
| Frederic Barrat | 5be587b | 2016-03-04 12:26:28 +0100 | [diff] [blame] | 457 | return cxl_ops->afu_check_and_enable(afu); | 
| Michael Neuling | 6f7f0b3 | 2015-05-27 16:07:18 +1000 | [diff] [blame] | 458 | } | 
|  | 459 | EXPORT_SYMBOL_GPL(cxl_afu_reset); | 
| Daniel Axtens | 13e68d8 | 2015-08-14 17:41:25 +1000 | [diff] [blame] | 460 |  | 
|  | 461 | void cxl_perst_reloads_same_image(struct cxl_afu *afu, | 
|  | 462 | bool perst_reloads_same_image) | 
|  | 463 | { | 
|  | 464 | afu->adapter->perst_same_image = perst_reloads_same_image; | 
|  | 465 | } | 
|  | 466 | EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); | 
| Frederic Barrat | d601ea9 | 2016-03-04 12:26:40 +0100 | [diff] [blame] | 467 |  | 
|  | 468 | ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) | 
|  | 469 | { | 
|  | 470 | struct cxl_afu *afu = cxl_pci_to_afu(dev); | 
| Ian Munsie | 317f5ef | 2016-07-14 07:17:07 +1000 | [diff] [blame] | 471 | if (IS_ERR(afu)) | 
|  | 472 | return -ENODEV; | 
| Frederic Barrat | d601ea9 | 2016-03-04 12:26:40 +0100 | [diff] [blame] | 473 |  | 
|  | 474 | return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); | 
|  | 475 | } | 
|  | 476 | EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); | 
| Ian Munsie | 79384e4 | 2016-07-14 07:17:08 +1000 | [diff] [blame] | 477 |  | 
|  | 478 | int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs) | 
|  | 479 | { | 
|  | 480 | struct cxl_afu *afu = cxl_pci_to_afu(dev); | 
|  | 481 | if (IS_ERR(afu)) | 
|  | 482 | return -ENODEV; | 
|  | 483 |  | 
|  | 484 | if (irqs > afu->adapter->user_irqs) | 
|  | 485 | return -EINVAL; | 
|  | 486 |  | 
|  | 487 | /* Limit user_irqs to prevent the user increasing this via sysfs */ | 
|  | 488 | afu->adapter->user_irqs = irqs; | 
|  | 489 | afu->irqs_max = irqs; | 
|  | 490 |  | 
|  | 491 | return 0; | 
|  | 492 | } | 
|  | 493 | EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process); | 
|  | 494 |  | 
|  | 495 | int cxl_get_max_irqs_per_process(struct pci_dev *dev) | 
|  | 496 | { | 
|  | 497 | struct cxl_afu *afu = cxl_pci_to_afu(dev); | 
|  | 498 | if (IS_ERR(afu)) | 
|  | 499 | return -ENODEV; | 
|  | 500 |  | 
|  | 501 | return afu->irqs_max; | 
|  | 502 | } | 
|  | 503 | EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process); | 
| Ian Munsie | a2f67d5 | 2016-07-14 07:17:10 +1000 | [diff] [blame] | 504 |  | 
|  | 505 | /* | 
|  | 506 | * This is a special interrupt allocation routine called from the PHB's MSI | 
|  | 507 | * setup function. When capi interrupts are allocated in this manner they must | 
|  | 508 | * still be associated with a running context, but since the MSI APIs have no | 
|  | 509 | * way to specify this we use the default context associated with the device. | 
|  | 510 | * | 
|  | 511 | * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU | 
|  | 512 | * interrupt number, so in order to overcome this their driver informs us of | 
|  | 513 | * the restriction by setting the maximum interrupts per context, and we | 
|  | 514 | * allocate additional contexts as necessary so that we can keep the AFU | 
|  | 515 | * interrupt number within the supported range. | 
|  | 516 | */ | 
|  | 517 | int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | 
|  | 518 | { | 
|  | 519 | struct cxl_context *ctx, *new_ctx, *default_ctx; | 
|  | 520 | int remaining; | 
|  | 521 | int rc; | 
|  | 522 |  | 
|  | 523 | ctx = default_ctx = cxl_get_context(pdev); | 
|  | 524 | if (WARN_ON(!default_ctx)) | 
|  | 525 | return -ENODEV; | 
|  | 526 |  | 
|  | 527 | remaining = nvec; | 
|  | 528 | while (remaining > 0) { | 
|  | 529 | rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max)); | 
|  | 530 | if (rc) { | 
|  | 531 | pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev)); | 
|  | 532 | return rc; | 
|  | 533 | } | 
|  | 534 | remaining -= ctx->afu->irqs_max; | 
|  | 535 |  | 
|  | 536 | if (ctx != default_ctx && default_ctx->status == STARTED) { | 
|  | 537 | WARN_ON(cxl_start_context(ctx, | 
|  | 538 | be64_to_cpu(default_ctx->elem->common.wed), | 
|  | 539 | NULL)); | 
|  | 540 | } | 
|  | 541 |  | 
|  | 542 | if (remaining > 0) { | 
|  | 543 | new_ctx = cxl_dev_context_init(pdev); | 
|  | 544 | if (!new_ctx) { | 
|  | 545 | pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev)); | 
|  | 546 | return -ENOSPC; | 
|  | 547 | } | 
|  | 548 | list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts); | 
|  | 549 | ctx = new_ctx; | 
|  | 550 | } | 
|  | 551 | } | 
|  | 552 |  | 
|  | 553 | return 0; | 
|  | 554 | } | 
|  | 555 | /* Exported via cxl_base */ | 
|  | 556 |  | 
|  | 557 | void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) | 
|  | 558 | { | 
|  | 559 | struct cxl_context *ctx, *pos, *tmp; | 
|  | 560 |  | 
|  | 561 | ctx = cxl_get_context(pdev); | 
|  | 562 | if (WARN_ON(!ctx)) | 
|  | 563 | return; | 
|  | 564 |  | 
|  | 565 | cxl_free_afu_irqs(ctx); | 
|  | 566 | list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) { | 
|  | 567 | cxl_stop_context(pos); | 
|  | 568 | cxl_free_afu_irqs(pos); | 
|  | 569 | list_del(&pos->extra_irq_contexts); | 
|  | 570 | cxl_release_context(pos); | 
|  | 571 | } | 
|  | 572 | } | 
|  | 573 | /* Exported via cxl_base */ |