blob: c79e39bad7a42673ed0a2273236519c654120f57 [file] [log] [blame]
Ian Munsief204e0b2014-10-08 19:55:02 +11001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/workqueue.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010011#include <linux/sched/signal.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010012#include <linux/sched/mm.h>
Ian Munsief204e0b2014-10-08 19:55:02 +110013#include <linux/pid.h>
14#include <linux/mm.h>
15#include <linux/moduleparam.h>
16
17#undef MODULE_PARAM_PREFIX
18#define MODULE_PARAM_PREFIX "cxl" "."
19#include <asm/current.h>
20#include <asm/copro.h>
21#include <asm/mmu.h>
22
23#include "cxl.h"
Ian Munsie9bcf28c2015-01-09 20:34:36 +110024#include "trace.h"
Ian Munsief204e0b2014-10-08 19:55:02 +110025
Ian Munsieeb01d4c2014-10-28 14:25:30 +110026static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
27{
28 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
29 (sste->esid_data == cpu_to_be64(slb->esid)));
30}
31
32/*
33 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
34 * the segment table.
35 */
Ian Munsieb03a7f52014-10-28 14:25:28 +110036static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
37 struct copro_slb *slb)
Ian Munsief204e0b2014-10-08 19:55:02 +110038{
Ian Munsieeb01d4c2014-10-28 14:25:30 +110039 struct cxl_sste *primary, *sste, *ret = NULL;
Ian Munsieb03a7f52014-10-28 14:25:28 +110040 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
Ian Munsie5100a9d2014-10-28 14:25:27 +110041 unsigned int entry;
Ian Munsieb03a7f52014-10-28 14:25:28 +110042 unsigned int hash;
Ian Munsief204e0b2014-10-08 19:55:02 +110043
Ian Munsieb03a7f52014-10-28 14:25:28 +110044 if (slb->vsid & SLB_VSID_B_1T)
45 hash = (slb->esid >> SID_SHIFT_1T) & mask;
46 else /* 256M */
47 hash = (slb->esid >> SID_SHIFT) & mask;
48
49 primary = ctx->sstp + (hash << 3);
50
51 for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
Ian Munsieeb01d4c2014-10-28 14:25:30 +110052 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
53 ret = sste;
54 if (sste_matches(sste, slb))
55 return NULL;
Ian Munsief204e0b2014-10-08 19:55:02 +110056 }
Ian Munsieeb01d4c2014-10-28 14:25:30 +110057 if (ret)
58 return ret;
Ian Munsieb03a7f52014-10-28 14:25:28 +110059
Ian Munsief204e0b2014-10-08 19:55:02 +110060 /* Nothing free, select an entry to cast out */
Ian Munsieeb01d4c2014-10-28 14:25:30 +110061 ret = primary + ctx->sst_lru;
Ian Munsieb03a7f52014-10-28 14:25:28 +110062 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
Ian Munsief204e0b2014-10-08 19:55:02 +110063
Ian Munsieeb01d4c2014-10-28 14:25:30 +110064 return ret;
Ian Munsief204e0b2014-10-08 19:55:02 +110065}
66
67static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
68{
69 /* mask is the group index, we search primary and secondary here. */
Ian Munsief204e0b2014-10-08 19:55:02 +110070 struct cxl_sste *sste;
Ian Munsief204e0b2014-10-08 19:55:02 +110071 unsigned long flags;
72
Ian Munsief204e0b2014-10-08 19:55:02 +110073 spin_lock_irqsave(&ctx->sste_lock, flags);
Ian Munsieb03a7f52014-10-28 14:25:28 +110074 sste = find_free_sste(ctx, slb);
Ian Munsieeb01d4c2014-10-28 14:25:30 +110075 if (!sste)
76 goto out_unlock;
Ian Munsief204e0b2014-10-08 19:55:02 +110077
78 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
79 sste - ctx->sstp, slb->vsid, slb->esid);
Ian Munsie9bcf28c2015-01-09 20:34:36 +110080 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
Ian Munsief204e0b2014-10-08 19:55:02 +110081
82 sste->vsid_data = cpu_to_be64(slb->vsid);
83 sste->esid_data = cpu_to_be64(slb->esid);
Ian Munsieeb01d4c2014-10-28 14:25:30 +110084out_unlock:
Ian Munsief204e0b2014-10-08 19:55:02 +110085 spin_unlock_irqrestore(&ctx->sste_lock, flags);
86}
87
88static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
89 u64 ea)
90{
91 struct copro_slb slb = {0,0};
92 int rc;
93
94 if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
95 cxl_load_segment(ctx, &slb);
96 }
97
98 return rc;
99}
100
101static void cxl_ack_ae(struct cxl_context *ctx)
102{
103 unsigned long flags;
104
Frederic Barrat5be587b2016-03-04 12:26:28 +0100105 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
Ian Munsief204e0b2014-10-08 19:55:02 +1100106
107 spin_lock_irqsave(&ctx->lock, flags);
108 ctx->pending_fault = true;
109 ctx->fault_addr = ctx->dar;
110 ctx->fault_dsisr = ctx->dsisr;
111 spin_unlock_irqrestore(&ctx->lock, flags);
112
113 wake_up_all(&ctx->wq);
114}
115
116static int cxl_handle_segment_miss(struct cxl_context *ctx,
117 struct mm_struct *mm, u64 ea)
118{
119 int rc;
120
121 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100122 trace_cxl_ste_miss(ctx, ea);
Ian Munsief204e0b2014-10-08 19:55:02 +1100123
124 if ((rc = cxl_fault_segment(ctx, mm, ea)))
125 cxl_ack_ae(ctx);
126 else {
127
128 mb(); /* Order seg table write to TFC MMIO write */
Frederic Barrat5be587b2016-03-04 12:26:28 +0100129 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
Ian Munsief204e0b2014-10-08 19:55:02 +1100130 }
131
132 return IRQ_HANDLED;
133}
134
135static void cxl_handle_page_fault(struct cxl_context *ctx,
136 struct mm_struct *mm, u64 dsisr, u64 dar)
137{
138 unsigned flt = 0;
139 int result;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530140 unsigned long access, flags, inv_flags = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +1100141
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100142 trace_cxl_pte_miss(ctx, dsisr, dar);
143
Ian Munsief204e0b2014-10-08 19:55:02 +1100144 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
145 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
146 return cxl_ack_ae(ctx);
147 }
148
Christophe Lombardf24be422017-04-12 16:34:07 +0200149 if (!radix_enabled()) {
150 /*
151 * update_mmu_cache() will not have loaded the hash since current->trap
152 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
153 */
154 access = _PAGE_PRESENT | _PAGE_READ;
155 if (dsisr & CXL_PSL_DSISR_An_S)
156 access |= _PAGE_WRITE;
Aneesh Kumar K.Vac29c642016-04-29 23:25:34 +1000157
Christophe Lombardf24be422017-04-12 16:34:07 +0200158 access |= _PAGE_PRIVILEGED;
159 if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
160 access &= ~_PAGE_PRIVILEGED;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530161
Christophe Lombardf24be422017-04-12 16:34:07 +0200162 if (dsisr & DSISR_NOHPTE)
163 inv_flags |= HPTE_NOHPTE_UPDATE;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530164
Christophe Lombardf24be422017-04-12 16:34:07 +0200165 local_irq_save(flags);
166 hash_page_mm(mm, dar, access, 0x300, inv_flags);
167 local_irq_restore(flags);
168 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100169 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
Frederic Barrat5be587b2016-03-04 12:26:28 +0100170 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
Ian Munsief204e0b2014-10-08 19:55:02 +1100171}
172
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530173/*
Christophe Lombard6dd2d232017-04-07 16:11:55 +0200174 * Returns the mm_struct corresponding to the context ctx.
175 * mm_users == 0, the context may be in the process of being closed.
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530176 */
177static struct mm_struct *get_mem_context(struct cxl_context *ctx)
178{
Christophe Lombard6dd2d232017-04-07 16:11:55 +0200179 if (ctx->mm == NULL)
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530180 return NULL;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530181
Christophe Lombard6dd2d232017-04-07 16:11:55 +0200182 if (!atomic_inc_not_zero(&ctx->mm->mm_users))
183 return NULL;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530184
Christophe Lombard6dd2d232017-04-07 16:11:55 +0200185 return ctx->mm;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530186}
187
Christophe Lombardf24be422017-04-12 16:34:07 +0200188static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
189{
Christophe Lombard797625d2017-06-13 17:41:05 +0200190 if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
Christophe Lombardf24be422017-04-12 16:34:07 +0200191 return true;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530192
Christophe Lombardf24be422017-04-12 16:34:07 +0200193 return false;
194}
195
196static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
197{
Christophe Lombard797625d2017-06-13 17:41:05 +0200198 u64 crs; /* Translation Checkout Response Status */
199
200 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
Christophe Lombardf24be422017-04-12 16:34:07 +0200201 return true;
202
Christophe Lombard797625d2017-06-13 17:41:05 +0200203 if (cxl_is_power9()) {
204 crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
205 if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
206 (crs == CXL_PSL9_DSISR_An_PF_RGC) ||
207 (crs == CXL_PSL9_DSISR_An_PF_RGP) ||
208 (crs == CXL_PSL9_DSISR_An_PF_HRH) ||
209 (crs == CXL_PSL9_DSISR_An_PF_STEG) ||
210 (crs == CXL_PSL9_DSISR_An_URTCH)) {
211 return true;
212 }
213 }
Christophe Lombardf24be422017-04-12 16:34:07 +0200214
215 return false;
216}
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530217
Ian Munsief204e0b2014-10-08 19:55:02 +1100218void cxl_handle_fault(struct work_struct *fault_work)
219{
220 struct cxl_context *ctx =
221 container_of(fault_work, struct cxl_context, fault_work);
222 u64 dsisr = ctx->dsisr;
223 u64 dar = ctx->dar;
Michael Neulinga6b07d82015-05-27 16:07:11 +1000224 struct mm_struct *mm = NULL;
Ian Munsief204e0b2014-10-08 19:55:02 +1100225
Frederic Barratea2d1f92016-03-04 12:26:30 +0100226 if (cpu_has_feature(CPU_FTR_HVMODE)) {
227 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
228 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
229 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
230 /* Most likely explanation is harmless - a dedicated
231 * process has detached and these were cleared by the
232 * PSL purge, but warn about it just in case
233 */
234 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
235 return;
236 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100237 }
238
Ian Munsie13da7042014-12-08 19:17:58 +1100239 /* Early return if the context is being / has been detached */
240 if (ctx->status == CLOSED) {
241 cxl_ack_ae(ctx);
242 return;
243 }
244
Ian Munsief204e0b2014-10-08 19:55:02 +1100245 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
246 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
247
Michael Neulinga6b07d82015-05-27 16:07:11 +1000248 if (!ctx->kernel) {
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530249
250 mm = get_mem_context(ctx);
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530251 if (mm == NULL) {
252 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
253 __func__, ctx->pe, pid_nr(ctx->pid));
Michael Neulinga6b07d82015-05-27 16:07:11 +1000254 cxl_ack_ae(ctx);
255 return;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530256 } else {
257 pr_devel("Handling page fault for pe=%d pid=%i\n",
258 ctx->pe, pid_nr(ctx->pid));
Michael Neulinga6b07d82015-05-27 16:07:11 +1000259 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100260 }
261
Christophe Lombardf24be422017-04-12 16:34:07 +0200262 if (cxl_is_segment_miss(ctx, dsisr))
Ian Munsief204e0b2014-10-08 19:55:02 +1100263 cxl_handle_segment_miss(ctx, mm, dar);
Christophe Lombardf24be422017-04-12 16:34:07 +0200264 else if (cxl_is_page_fault(ctx, dsisr))
Ian Munsief204e0b2014-10-08 19:55:02 +1100265 cxl_handle_page_fault(ctx, mm, dsisr, dar);
266 else
267 WARN(1, "cxl_handle_fault has nothing to handle\n");
268
Michael Neulinga6b07d82015-05-27 16:07:11 +1000269 if (mm)
270 mmput(mm);
Ian Munsief204e0b2014-10-08 19:55:02 +1100271}
272
273static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
274{
Ian Munsief204e0b2014-10-08 19:55:02 +1100275 struct mm_struct *mm;
276
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530277 mm = get_mem_context(ctx);
278 if (mm == NULL) {
Ian Munsief204e0b2014-10-08 19:55:02 +1100279 pr_devel("cxl_prefault_one unable to get mm %i\n",
280 pid_nr(ctx->pid));
Ian Munsief204e0b2014-10-08 19:55:02 +1100281 return;
282 }
283
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530284 cxl_fault_segment(ctx, mm, ea);
Ian Munsief204e0b2014-10-08 19:55:02 +1100285
286 mmput(mm);
Ian Munsief204e0b2014-10-08 19:55:02 +1100287}
288
289static u64 next_segment(u64 ea, u64 vsid)
290{
291 if (vsid & SLB_VSID_B_1T)
292 ea |= (1ULL << 40) - 1;
293 else
294 ea |= (1ULL << 28) - 1;
295
296 return ea + 1;
297}
298
299static void cxl_prefault_vma(struct cxl_context *ctx)
300{
301 u64 ea, last_esid = 0;
302 struct copro_slb slb;
303 struct vm_area_struct *vma;
304 int rc;
Ian Munsief204e0b2014-10-08 19:55:02 +1100305 struct mm_struct *mm;
306
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530307 mm = get_mem_context(ctx);
308 if (mm == NULL) {
Ian Munsief204e0b2014-10-08 19:55:02 +1100309 pr_devel("cxl_prefault_vm unable to get mm %i\n",
310 pid_nr(ctx->pid));
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530311 return;
Ian Munsief204e0b2014-10-08 19:55:02 +1100312 }
313
314 down_read(&mm->mmap_sem);
315 for (vma = mm->mmap; vma; vma = vma->vm_next) {
316 for (ea = vma->vm_start; ea < vma->vm_end;
317 ea = next_segment(ea, slb.vsid)) {
318 rc = copro_calculate_slb(mm, ea, &slb);
319 if (rc)
320 continue;
321
322 if (last_esid == slb.esid)
323 continue;
324
325 cxl_load_segment(ctx, &slb);
326 last_esid = slb.esid;
327 }
328 }
329 up_read(&mm->mmap_sem);
330
331 mmput(mm);
Ian Munsief204e0b2014-10-08 19:55:02 +1100332}
333
334void cxl_prefault(struct cxl_context *ctx, u64 wed)
335{
336 switch (ctx->afu->prefault_mode) {
337 case CXL_PREFAULT_WED:
338 cxl_prefault_one(ctx, wed);
339 break;
340 case CXL_PREFAULT_ALL:
341 cxl_prefault_vma(ctx);
342 break;
343 default:
344 break;
345 }
346}