blob: a9051512198ca3f317610f88303599cb60ca8e30 [file] [log] [blame]
Ian Munsief204e0b2014-10-08 19:55:02 +11001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/device.h>
14#include <linux/mutex.h>
15#include <linux/init.h>
16#include <linux/list.h>
17#include <linux/mm.h>
18#include <linux/of.h>
19#include <linux/slab.h>
20#include <linux/idr.h>
21#include <linux/pci.h>
22#include <asm/cputable.h>
Michael Neulingec249dd2015-05-27 16:07:16 +100023#include <misc/cxl-base.h>
Ian Munsief204e0b2014-10-08 19:55:02 +110024
25#include "cxl.h"
Ian Munsie9bcf28c2015-01-09 20:34:36 +110026#include "trace.h"
Ian Munsief204e0b2014-10-08 19:55:02 +110027
28static DEFINE_SPINLOCK(adapter_idr_lock);
29static DEFINE_IDR(cxl_adapter_idr);
30
31uint cxl_verbose;
32module_param_named(verbose, cxl_verbose, uint, 0600);
33MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
34
Frederic Barrat5be587b2016-03-04 12:26:28 +010035const struct cxl_backend_ops *cxl_ops;
36
Christophe Lombard86331862016-03-04 12:26:25 +010037int cxl_afu_slbia(struct cxl_afu *afu)
38{
39 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
40
41 pr_devel("cxl_afu_slbia issuing SLBIA command\n");
42 cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
43 while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
44 if (time_after_eq(jiffies, timeout)) {
45 dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
46 return -EBUSY;
47 }
48 /* If the adapter has gone down, we can assume that we
49 * will PERST it and that will invalidate everything.
50 */
Frederic Barrat5be587b2016-03-04 12:26:28 +010051 if (!cxl_ops->link_ok(afu->adapter))
Christophe Lombard86331862016-03-04 12:26:25 +010052 return -EIO;
53 cpu_relax();
54 }
55 return 0;
56}
57
Ian Munsief204e0b2014-10-08 19:55:02 +110058static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
59{
60 struct task_struct *task;
61 unsigned long flags;
62 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
63 pr_devel("%s unable to get task %i\n",
64 __func__, pid_nr(ctx->pid));
65 return;
66 }
67
68 if (task->mm != mm)
69 goto out_put;
70
71 pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__,
72 ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
73
74 spin_lock_irqsave(&ctx->sste_lock, flags);
Ian Munsie9bcf28c2015-01-09 20:34:36 +110075 trace_cxl_slbia(ctx);
Ian Munsief204e0b2014-10-08 19:55:02 +110076 memset(ctx->sstp, 0, ctx->sst_size);
77 spin_unlock_irqrestore(&ctx->sste_lock, flags);
78 mb();
79 cxl_afu_slbia(ctx->afu);
80out_put:
81 put_task_struct(task);
82}
83
84static inline void cxl_slbia_core(struct mm_struct *mm)
85{
86 struct cxl *adapter;
87 struct cxl_afu *afu;
88 struct cxl_context *ctx;
89 int card, slice, id;
90
91 pr_devel("%s called\n", __func__);
92
93 spin_lock(&adapter_idr_lock);
94 idr_for_each_entry(&cxl_adapter_idr, adapter, card) {
95 /* XXX: Make this lookup faster with link from mm to ctx */
96 spin_lock(&adapter->afu_list_lock);
97 for (slice = 0; slice < adapter->slices; slice++) {
98 afu = adapter->afu[slice];
Daniel Axtens2c069a12015-07-10 09:04:25 +100099 if (!afu || !afu->enabled)
Ian Munsief204e0b2014-10-08 19:55:02 +1100100 continue;
101 rcu_read_lock();
102 idr_for_each_entry(&afu->contexts_idr, ctx, id)
103 _cxl_slbia(ctx, mm);
104 rcu_read_unlock();
105 }
106 spin_unlock(&adapter->afu_list_lock);
107 }
108 spin_unlock(&adapter_idr_lock);
109}
110
111static struct cxl_calls cxl_calls = {
112 .cxl_slbia = cxl_slbia_core,
113 .owner = THIS_MODULE,
114};
115
116int cxl_alloc_sst(struct cxl_context *ctx)
117{
118 unsigned long vsid;
119 u64 ea_mask, size, sstp0, sstp1;
120
121 sstp0 = 0;
122 sstp1 = 0;
123
124 ctx->sst_size = PAGE_SIZE;
125 ctx->sst_lru = 0;
126 ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL);
127 if (!ctx->sstp) {
128 pr_err("cxl_alloc_sst: Unable to allocate segment table\n");
129 return -ENOMEM;
130 }
131 pr_devel("SSTP allocated at 0x%p\n", ctx->sstp);
132
133 vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12;
134
135 sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT;
136 sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50;
137
138 size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT;
139 if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) {
140 WARN(1, "Impossible segment table size\n");
141 return -EINVAL;
142 }
143 sstp0 |= size;
144
145 if (mmu_kernel_ssize == MMU_SEGSIZE_256M)
146 ea_mask = 0xfffff00ULL;
147 else
148 ea_mask = 0xffffffff00ULL;
149
150 sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */
151 sstp1 |= (vsid << (64-(50-14))) & ~ea_mask;
152 sstp1 |= (u64)ctx->sstp & ea_mask;
153 sstp1 |= CXL_SSTP1_An_V;
154
155 pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n",
156 (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1);
157
158 /* Store calculated sstp hardware points for use later */
159 ctx->sstp0 = sstp0;
160 ctx->sstp1 = sstp1;
161
162 return 0;
163}
164
165/* Find a CXL adapter by it's number and increase it's refcount */
166struct cxl *get_cxl_adapter(int num)
167{
168 struct cxl *adapter;
169
170 spin_lock(&adapter_idr_lock);
171 if ((adapter = idr_find(&cxl_adapter_idr, num)))
172 get_device(&adapter->dev);
173 spin_unlock(&adapter_idr_lock);
174
175 return adapter;
176}
177
Frederic Barratd56d3012016-03-04 12:26:26 +0100178static int cxl_alloc_adapter_nr(struct cxl *adapter)
Ian Munsief204e0b2014-10-08 19:55:02 +1100179{
180 int i;
181
182 idr_preload(GFP_KERNEL);
183 spin_lock(&adapter_idr_lock);
184 i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT);
185 spin_unlock(&adapter_idr_lock);
186 idr_preload_end();
187 if (i < 0)
188 return i;
189
190 adapter->adapter_num = i;
191
192 return 0;
193}
194
195void cxl_remove_adapter_nr(struct cxl *adapter)
196{
197 idr_remove(&cxl_adapter_idr, adapter->adapter_num);
198}
199
Christophe Lombard86331862016-03-04 12:26:25 +0100200struct cxl *cxl_alloc_adapter(void)
201{
202 struct cxl *adapter;
203
204 if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
205 return NULL;
206
207 spin_lock_init(&adapter->afu_list_lock);
208
209 if (cxl_alloc_adapter_nr(adapter))
210 goto err1;
211
212 if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
213 goto err2;
214
215 return adapter;
216
217err2:
218 cxl_remove_adapter_nr(adapter);
219err1:
220 kfree(adapter);
221 return NULL;
222}
223
224struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
225{
226 struct cxl_afu *afu;
227
228 if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
229 return NULL;
230
231 afu->adapter = adapter;
232 afu->dev.parent = &adapter->dev;
Frederic Barrat5be587b2016-03-04 12:26:28 +0100233 afu->dev.release = cxl_ops->release_afu;
Christophe Lombard86331862016-03-04 12:26:25 +0100234 afu->slice = slice;
235 idr_init(&afu->contexts_idr);
236 mutex_init(&afu->contexts_lock);
237 spin_lock_init(&afu->afu_cntl_lock);
238 mutex_init(&afu->spa_mutex);
239
240 afu->prefault_mode = CXL_PREFAULT_NONE;
241 afu->irqs_max = afu->adapter->user_irqs;
242
243 return afu;
244}
245
Ian Munsief204e0b2014-10-08 19:55:02 +1100246int cxl_afu_select_best_mode(struct cxl_afu *afu)
247{
248 if (afu->modes_supported & CXL_MODE_DIRECTED)
Frederic Barrat5be587b2016-03-04 12:26:28 +0100249 return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED);
Ian Munsief204e0b2014-10-08 19:55:02 +1100250
251 if (afu->modes_supported & CXL_MODE_DEDICATED)
Frederic Barrat5be587b2016-03-04 12:26:28 +0100252 return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED);
Ian Munsief204e0b2014-10-08 19:55:02 +1100253
254 dev_warn(&afu->dev, "No supported programming modes available\n");
255 /* We don't fail this so the user can inspect sysfs */
256 return 0;
257}
258
259static int __init init_cxl(void)
260{
261 int rc = 0;
262
263 if (!cpu_has_feature(CPU_FTR_HVMODE))
264 return -EPERM;
265
266 if ((rc = cxl_file_init()))
267 return rc;
268
269 cxl_debugfs_init();
270
271 if ((rc = register_cxl_calls(&cxl_calls)))
272 goto err;
273
Frederic Barrat5be587b2016-03-04 12:26:28 +0100274 cxl_ops = &cxl_native_ops;
Ian Munsief204e0b2014-10-08 19:55:02 +1100275 if ((rc = pci_register_driver(&cxl_pci_driver)))
276 goto err1;
277
278 return 0;
279err1:
280 unregister_cxl_calls(&cxl_calls);
281err:
282 cxl_debugfs_exit();
283 cxl_file_exit();
284
285 return rc;
286}
287
288static void exit_cxl(void)
289{
290 pci_unregister_driver(&cxl_pci_driver);
291
292 cxl_debugfs_exit();
293 cxl_file_exit();
294 unregister_cxl_calls(&cxl_calls);
Johannes Thumshirnb2a02ac2015-07-08 17:14:36 +0200295 idr_destroy(&cxl_adapter_idr);
Ian Munsief204e0b2014-10-08 19:55:02 +1100296}
297
298module_init(init_cxl);
299module_exit(exit_cxl);
300
301MODULE_DESCRIPTION("IBM Coherent Accelerator");
302MODULE_AUTHOR("Ian Munsie <imunsie@au1.ibm.com>");
303MODULE_LICENSE("GPL");