blob: 377e650a2a1dc3464fa0eb3f6fdf99493a15f32c [file] [log] [blame]
Ian Munsief204e0b2014-10-08 19:55:02 +11001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/workqueue.h>
11#include <linux/sched.h>
12#include <linux/pid.h>
13#include <linux/mm.h>
14#include <linux/moduleparam.h>
15
16#undef MODULE_PARAM_PREFIX
17#define MODULE_PARAM_PREFIX "cxl" "."
18#include <asm/current.h>
19#include <asm/copro.h>
20#include <asm/mmu.h>
21
22#include "cxl.h"
Ian Munsie9bcf28c2015-01-09 20:34:36 +110023#include "trace.h"
Ian Munsief204e0b2014-10-08 19:55:02 +110024
Ian Munsieeb01d4c2014-10-28 14:25:30 +110025static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
26{
27 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
28 (sste->esid_data == cpu_to_be64(slb->esid)));
29}
30
31/*
32 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
33 * the segment table.
34 */
Ian Munsieb03a7f52014-10-28 14:25:28 +110035static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
36 struct copro_slb *slb)
Ian Munsief204e0b2014-10-08 19:55:02 +110037{
Ian Munsieeb01d4c2014-10-28 14:25:30 +110038 struct cxl_sste *primary, *sste, *ret = NULL;
Ian Munsieb03a7f52014-10-28 14:25:28 +110039 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
Ian Munsie5100a9d2014-10-28 14:25:27 +110040 unsigned int entry;
Ian Munsieb03a7f52014-10-28 14:25:28 +110041 unsigned int hash;
Ian Munsief204e0b2014-10-08 19:55:02 +110042
Ian Munsieb03a7f52014-10-28 14:25:28 +110043 if (slb->vsid & SLB_VSID_B_1T)
44 hash = (slb->esid >> SID_SHIFT_1T) & mask;
45 else /* 256M */
46 hash = (slb->esid >> SID_SHIFT) & mask;
47
48 primary = ctx->sstp + (hash << 3);
49
50 for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
Ian Munsieeb01d4c2014-10-28 14:25:30 +110051 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
52 ret = sste;
53 if (sste_matches(sste, slb))
54 return NULL;
Ian Munsief204e0b2014-10-08 19:55:02 +110055 }
Ian Munsieeb01d4c2014-10-28 14:25:30 +110056 if (ret)
57 return ret;
Ian Munsieb03a7f52014-10-28 14:25:28 +110058
Ian Munsief204e0b2014-10-08 19:55:02 +110059 /* Nothing free, select an entry to cast out */
Ian Munsieeb01d4c2014-10-28 14:25:30 +110060 ret = primary + ctx->sst_lru;
Ian Munsieb03a7f52014-10-28 14:25:28 +110061 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
Ian Munsief204e0b2014-10-08 19:55:02 +110062
Ian Munsieeb01d4c2014-10-28 14:25:30 +110063 return ret;
Ian Munsief204e0b2014-10-08 19:55:02 +110064}
65
66static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
67{
68 /* mask is the group index, we search primary and secondary here. */
Ian Munsief204e0b2014-10-08 19:55:02 +110069 struct cxl_sste *sste;
Ian Munsief204e0b2014-10-08 19:55:02 +110070 unsigned long flags;
71
Ian Munsief204e0b2014-10-08 19:55:02 +110072 spin_lock_irqsave(&ctx->sste_lock, flags);
Ian Munsieb03a7f52014-10-28 14:25:28 +110073 sste = find_free_sste(ctx, slb);
Ian Munsieeb01d4c2014-10-28 14:25:30 +110074 if (!sste)
75 goto out_unlock;
Ian Munsief204e0b2014-10-08 19:55:02 +110076
77 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
78 sste - ctx->sstp, slb->vsid, slb->esid);
Ian Munsie9bcf28c2015-01-09 20:34:36 +110079 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
Ian Munsief204e0b2014-10-08 19:55:02 +110080
81 sste->vsid_data = cpu_to_be64(slb->vsid);
82 sste->esid_data = cpu_to_be64(slb->esid);
Ian Munsieeb01d4c2014-10-28 14:25:30 +110083out_unlock:
Ian Munsief204e0b2014-10-08 19:55:02 +110084 spin_unlock_irqrestore(&ctx->sste_lock, flags);
85}
86
87static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
88 u64 ea)
89{
90 struct copro_slb slb = {0,0};
91 int rc;
92
93 if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
94 cxl_load_segment(ctx, &slb);
95 }
96
97 return rc;
98}
99
100static void cxl_ack_ae(struct cxl_context *ctx)
101{
102 unsigned long flags;
103
Frederic Barrat5be587b2016-03-04 12:26:28 +0100104 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
Ian Munsief204e0b2014-10-08 19:55:02 +1100105
106 spin_lock_irqsave(&ctx->lock, flags);
107 ctx->pending_fault = true;
108 ctx->fault_addr = ctx->dar;
109 ctx->fault_dsisr = ctx->dsisr;
110 spin_unlock_irqrestore(&ctx->lock, flags);
111
112 wake_up_all(&ctx->wq);
113}
114
115static int cxl_handle_segment_miss(struct cxl_context *ctx,
116 struct mm_struct *mm, u64 ea)
117{
118 int rc;
119
120 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100121 trace_cxl_ste_miss(ctx, ea);
Ian Munsief204e0b2014-10-08 19:55:02 +1100122
123 if ((rc = cxl_fault_segment(ctx, mm, ea)))
124 cxl_ack_ae(ctx);
125 else {
126
127 mb(); /* Order seg table write to TFC MMIO write */
Frederic Barrat5be587b2016-03-04 12:26:28 +0100128 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
Ian Munsief204e0b2014-10-08 19:55:02 +1100129 }
130
131 return IRQ_HANDLED;
132}
133
134static void cxl_handle_page_fault(struct cxl_context *ctx,
135 struct mm_struct *mm, u64 dsisr, u64 dar)
136{
137 unsigned flt = 0;
138 int result;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530139 unsigned long access, flags, inv_flags = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +1100140
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100141 trace_cxl_pte_miss(ctx, dsisr, dar);
142
Ian Munsief204e0b2014-10-08 19:55:02 +1100143 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
144 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
145 return cxl_ack_ae(ctx);
146 }
147
148 /*
149 * update_mmu_cache() will not have loaded the hash since current->trap
150 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
151 */
Aneesh Kumar K.Vc7d54842016-04-29 23:25:30 +1000152 access = _PAGE_PRESENT | _PAGE_READ;
Ian Munsief204e0b2014-10-08 19:55:02 +1100153 if (dsisr & CXL_PSL_DSISR_An_S)
Aneesh Kumar K.Vc7d54842016-04-29 23:25:30 +1000154 access |= _PAGE_WRITE;
Aneesh Kumar K.Vac29c642016-04-29 23:25:34 +1000155
156 access |= _PAGE_PRIVILEGED;
Aneesh Kumar K.V3b1dbfa2016-04-20 03:59:47 -0400157 if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
Aneesh Kumar K.Vac29c642016-04-29 23:25:34 +1000158 access &= ~_PAGE_PRIVILEGED;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530159
160 if (dsisr & DSISR_NOHPTE)
161 inv_flags |= HPTE_NOHPTE_UPDATE;
162
Ian Munsief204e0b2014-10-08 19:55:02 +1100163 local_irq_save(flags);
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530164 hash_page_mm(mm, dar, access, 0x300, inv_flags);
Ian Munsief204e0b2014-10-08 19:55:02 +1100165 local_irq_restore(flags);
166
167 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
Frederic Barrat5be587b2016-03-04 12:26:28 +0100168 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
Ian Munsief204e0b2014-10-08 19:55:02 +1100169}
170
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530171/*
172 * Returns the mm_struct corresponding to the context ctx via ctx->pid
173 * In case the task has exited we use the task group leader accessible
174 * via ctx->glpid to find the next task in the thread group that has a
175 * valid mm_struct associated with it. If a task with valid mm_struct
176 * is found the ctx->pid is updated to use the task struct for subsequent
177 * translations. In case no valid mm_struct is found in the task group to
178 * service the fault a NULL is returned.
179 */
180static struct mm_struct *get_mem_context(struct cxl_context *ctx)
181{
182 struct task_struct *task = NULL;
183 struct mm_struct *mm = NULL;
184 struct pid *old_pid = ctx->pid;
185
186 if (old_pid == NULL) {
187 pr_warn("%s: Invalid context for pe=%d\n",
188 __func__, ctx->pe);
189 return NULL;
190 }
191
192 task = get_pid_task(old_pid, PIDTYPE_PID);
193
194 /*
195 * pid_alive may look racy but this saves us from costly
196 * get_task_mm when the task is a zombie. In worst case
197 * we may think a task is alive, which is about to die
198 * but get_task_mm will return NULL.
199 */
200 if (task != NULL && pid_alive(task))
201 mm = get_task_mm(task);
202
203 /* release the task struct that was taken earlier */
204 if (task)
205 put_task_struct(task);
206 else
207 pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
208 __func__, pid_nr(old_pid), ctx->pe);
209
210 /*
211 * If we couldn't find the mm context then use the group
212 * leader to iterate over the task group and find a task
213 * that gives us mm_struct.
214 */
215 if (unlikely(mm == NULL && ctx->glpid != NULL)) {
216
217 rcu_read_lock();
218 task = pid_task(ctx->glpid, PIDTYPE_PID);
219 if (task)
220 do {
221 mm = get_task_mm(task);
222 if (mm) {
223 ctx->pid = get_task_pid(task,
224 PIDTYPE_PID);
225 break;
226 }
227 task = next_thread(task);
228 } while (task && !thread_group_leader(task));
229 rcu_read_unlock();
230
231 /* check if we switched pid */
232 if (ctx->pid != old_pid) {
233 if (mm)
234 pr_devel("%s:pe=%i switch pid %i->%i\n",
235 __func__, ctx->pe, pid_nr(old_pid),
236 pid_nr(ctx->pid));
237 else
238 pr_devel("%s:Cannot find mm for pid=%i\n",
239 __func__, pid_nr(old_pid));
240
241 /* drop the reference to older pid */
242 put_pid(old_pid);
243 }
244 }
245
246 return mm;
247}
248
249
250
Ian Munsief204e0b2014-10-08 19:55:02 +1100251void cxl_handle_fault(struct work_struct *fault_work)
252{
253 struct cxl_context *ctx =
254 container_of(fault_work, struct cxl_context, fault_work);
255 u64 dsisr = ctx->dsisr;
256 u64 dar = ctx->dar;
Michael Neulinga6b07d82015-05-27 16:07:11 +1000257 struct mm_struct *mm = NULL;
Ian Munsief204e0b2014-10-08 19:55:02 +1100258
Frederic Barratea2d1f92016-03-04 12:26:30 +0100259 if (cpu_has_feature(CPU_FTR_HVMODE)) {
260 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
261 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
262 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
263 /* Most likely explanation is harmless - a dedicated
264 * process has detached and these were cleared by the
265 * PSL purge, but warn about it just in case
266 */
267 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
268 return;
269 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100270 }
271
Ian Munsie13da7042014-12-08 19:17:58 +1100272 /* Early return if the context is being / has been detached */
273 if (ctx->status == CLOSED) {
274 cxl_ack_ae(ctx);
275 return;
276 }
277
Ian Munsief204e0b2014-10-08 19:55:02 +1100278 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
279 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
280
Michael Neulinga6b07d82015-05-27 16:07:11 +1000281 if (!ctx->kernel) {
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530282
283 mm = get_mem_context(ctx);
284 /* indicates all the thread in task group have exited */
285 if (mm == NULL) {
286 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
287 __func__, ctx->pe, pid_nr(ctx->pid));
Michael Neulinga6b07d82015-05-27 16:07:11 +1000288 cxl_ack_ae(ctx);
289 return;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530290 } else {
291 pr_devel("Handling page fault for pe=%d pid=%i\n",
292 ctx->pe, pid_nr(ctx->pid));
Michael Neulinga6b07d82015-05-27 16:07:11 +1000293 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100294 }
295
296 if (dsisr & CXL_PSL_DSISR_An_DS)
297 cxl_handle_segment_miss(ctx, mm, dar);
298 else if (dsisr & CXL_PSL_DSISR_An_DM)
299 cxl_handle_page_fault(ctx, mm, dsisr, dar);
300 else
301 WARN(1, "cxl_handle_fault has nothing to handle\n");
302
Michael Neulinga6b07d82015-05-27 16:07:11 +1000303 if (mm)
304 mmput(mm);
Ian Munsief204e0b2014-10-08 19:55:02 +1100305}
306
307static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
308{
Ian Munsief204e0b2014-10-08 19:55:02 +1100309 struct mm_struct *mm;
310
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530311 mm = get_mem_context(ctx);
312 if (mm == NULL) {
Ian Munsief204e0b2014-10-08 19:55:02 +1100313 pr_devel("cxl_prefault_one unable to get mm %i\n",
314 pid_nr(ctx->pid));
Ian Munsief204e0b2014-10-08 19:55:02 +1100315 return;
316 }
317
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530318 cxl_fault_segment(ctx, mm, ea);
Ian Munsief204e0b2014-10-08 19:55:02 +1100319
320 mmput(mm);
Ian Munsief204e0b2014-10-08 19:55:02 +1100321}
322
323static u64 next_segment(u64 ea, u64 vsid)
324{
325 if (vsid & SLB_VSID_B_1T)
326 ea |= (1ULL << 40) - 1;
327 else
328 ea |= (1ULL << 28) - 1;
329
330 return ea + 1;
331}
332
333static void cxl_prefault_vma(struct cxl_context *ctx)
334{
335 u64 ea, last_esid = 0;
336 struct copro_slb slb;
337 struct vm_area_struct *vma;
338 int rc;
Ian Munsief204e0b2014-10-08 19:55:02 +1100339 struct mm_struct *mm;
340
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530341 mm = get_mem_context(ctx);
342 if (mm == NULL) {
Ian Munsief204e0b2014-10-08 19:55:02 +1100343 pr_devel("cxl_prefault_vm unable to get mm %i\n",
344 pid_nr(ctx->pid));
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530345 return;
Ian Munsief204e0b2014-10-08 19:55:02 +1100346 }
347
348 down_read(&mm->mmap_sem);
349 for (vma = mm->mmap; vma; vma = vma->vm_next) {
350 for (ea = vma->vm_start; ea < vma->vm_end;
351 ea = next_segment(ea, slb.vsid)) {
352 rc = copro_calculate_slb(mm, ea, &slb);
353 if (rc)
354 continue;
355
356 if (last_esid == slb.esid)
357 continue;
358
359 cxl_load_segment(ctx, &slb);
360 last_esid = slb.esid;
361 }
362 }
363 up_read(&mm->mmap_sem);
364
365 mmput(mm);
Ian Munsief204e0b2014-10-08 19:55:02 +1100366}
367
368void cxl_prefault(struct cxl_context *ctx, u64 wed)
369{
370 switch (ctx->afu->prefault_mode) {
371 case CXL_PREFAULT_WED:
372 cxl_prefault_one(ctx, wed);
373 break;
374 case CXL_PREFAULT_ALL:
375 cxl_prefault_vma(ctx);
376 break;
377 default:
378 break;
379 }
380}