blob: 6eed7d03e2b528e06c3361a869ddbf598f1b6177 [file] [log] [blame]
Ian Munsief204e0b2014-10-08 19:55:02 +11001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/workqueue.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010011#include <linux/sched/signal.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010012#include <linux/sched/mm.h>
Ian Munsief204e0b2014-10-08 19:55:02 +110013#include <linux/pid.h>
14#include <linux/mm.h>
15#include <linux/moduleparam.h>
16
17#undef MODULE_PARAM_PREFIX
18#define MODULE_PARAM_PREFIX "cxl" "."
19#include <asm/current.h>
20#include <asm/copro.h>
21#include <asm/mmu.h>
22
23#include "cxl.h"
Ian Munsie9bcf28c2015-01-09 20:34:36 +110024#include "trace.h"
Ian Munsief204e0b2014-10-08 19:55:02 +110025
Ian Munsieeb01d4c2014-10-28 14:25:30 +110026static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
27{
28 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
29 (sste->esid_data == cpu_to_be64(slb->esid)));
30}
31
32/*
33 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
34 * the segment table.
35 */
Ian Munsieb03a7f52014-10-28 14:25:28 +110036static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
37 struct copro_slb *slb)
Ian Munsief204e0b2014-10-08 19:55:02 +110038{
Ian Munsieeb01d4c2014-10-28 14:25:30 +110039 struct cxl_sste *primary, *sste, *ret = NULL;
Ian Munsieb03a7f52014-10-28 14:25:28 +110040 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
Ian Munsie5100a9d2014-10-28 14:25:27 +110041 unsigned int entry;
Ian Munsieb03a7f52014-10-28 14:25:28 +110042 unsigned int hash;
Ian Munsief204e0b2014-10-08 19:55:02 +110043
Ian Munsieb03a7f52014-10-28 14:25:28 +110044 if (slb->vsid & SLB_VSID_B_1T)
45 hash = (slb->esid >> SID_SHIFT_1T) & mask;
46 else /* 256M */
47 hash = (slb->esid >> SID_SHIFT) & mask;
48
49 primary = ctx->sstp + (hash << 3);
50
51 for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
Ian Munsieeb01d4c2014-10-28 14:25:30 +110052 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
53 ret = sste;
54 if (sste_matches(sste, slb))
55 return NULL;
Ian Munsief204e0b2014-10-08 19:55:02 +110056 }
Ian Munsieeb01d4c2014-10-28 14:25:30 +110057 if (ret)
58 return ret;
Ian Munsieb03a7f52014-10-28 14:25:28 +110059
Ian Munsief204e0b2014-10-08 19:55:02 +110060 /* Nothing free, select an entry to cast out */
Ian Munsieeb01d4c2014-10-28 14:25:30 +110061 ret = primary + ctx->sst_lru;
Ian Munsieb03a7f52014-10-28 14:25:28 +110062 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
Ian Munsief204e0b2014-10-08 19:55:02 +110063
Ian Munsieeb01d4c2014-10-28 14:25:30 +110064 return ret;
Ian Munsief204e0b2014-10-08 19:55:02 +110065}
66
67static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
68{
69 /* mask is the group index, we search primary and secondary here. */
Ian Munsief204e0b2014-10-08 19:55:02 +110070 struct cxl_sste *sste;
Ian Munsief204e0b2014-10-08 19:55:02 +110071 unsigned long flags;
72
Ian Munsief204e0b2014-10-08 19:55:02 +110073 spin_lock_irqsave(&ctx->sste_lock, flags);
Ian Munsieb03a7f52014-10-28 14:25:28 +110074 sste = find_free_sste(ctx, slb);
Ian Munsieeb01d4c2014-10-28 14:25:30 +110075 if (!sste)
76 goto out_unlock;
Ian Munsief204e0b2014-10-08 19:55:02 +110077
78 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
79 sste - ctx->sstp, slb->vsid, slb->esid);
Ian Munsie9bcf28c2015-01-09 20:34:36 +110080 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
Ian Munsief204e0b2014-10-08 19:55:02 +110081
82 sste->vsid_data = cpu_to_be64(slb->vsid);
83 sste->esid_data = cpu_to_be64(slb->esid);
Ian Munsieeb01d4c2014-10-28 14:25:30 +110084out_unlock:
Ian Munsief204e0b2014-10-08 19:55:02 +110085 spin_unlock_irqrestore(&ctx->sste_lock, flags);
86}
87
88static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
89 u64 ea)
90{
91 struct copro_slb slb = {0,0};
92 int rc;
93
94 if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
95 cxl_load_segment(ctx, &slb);
96 }
97
98 return rc;
99}
100
101static void cxl_ack_ae(struct cxl_context *ctx)
102{
103 unsigned long flags;
104
Frederic Barrat5be587b2016-03-04 12:26:28 +0100105 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
Ian Munsief204e0b2014-10-08 19:55:02 +1100106
107 spin_lock_irqsave(&ctx->lock, flags);
108 ctx->pending_fault = true;
109 ctx->fault_addr = ctx->dar;
110 ctx->fault_dsisr = ctx->dsisr;
111 spin_unlock_irqrestore(&ctx->lock, flags);
112
113 wake_up_all(&ctx->wq);
114}
115
116static int cxl_handle_segment_miss(struct cxl_context *ctx,
117 struct mm_struct *mm, u64 ea)
118{
119 int rc;
120
121 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100122 trace_cxl_ste_miss(ctx, ea);
Ian Munsief204e0b2014-10-08 19:55:02 +1100123
124 if ((rc = cxl_fault_segment(ctx, mm, ea)))
125 cxl_ack_ae(ctx);
126 else {
127
128 mb(); /* Order seg table write to TFC MMIO write */
Frederic Barrat5be587b2016-03-04 12:26:28 +0100129 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
Ian Munsief204e0b2014-10-08 19:55:02 +1100130 }
131
132 return IRQ_HANDLED;
133}
134
Christophe Lombard3ced8d72017-06-22 15:07:27 +0200135int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
Ian Munsief204e0b2014-10-08 19:55:02 +1100136{
137 unsigned flt = 0;
138 int result;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530139 unsigned long access, flags, inv_flags = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +1100140
141 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
142 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
Christophe Lombard3ced8d72017-06-22 15:07:27 +0200143 return result;
Ian Munsief204e0b2014-10-08 19:55:02 +1100144 }
145
Christophe Lombardf24be422017-04-12 16:34:07 +0200146 if (!radix_enabled()) {
147 /*
148 * update_mmu_cache() will not have loaded the hash since current->trap
149 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
150 */
151 access = _PAGE_PRESENT | _PAGE_READ;
152 if (dsisr & CXL_PSL_DSISR_An_S)
153 access |= _PAGE_WRITE;
Aneesh Kumar K.Vac29c642016-04-29 23:25:34 +1000154
Christophe Lombard3ced8d72017-06-22 15:07:27 +0200155 if (!mm && (REGION_ID(dar) != USER_REGION_ID))
156 access |= _PAGE_PRIVILEGED;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530157
Christophe Lombardf24be422017-04-12 16:34:07 +0200158 if (dsisr & DSISR_NOHPTE)
159 inv_flags |= HPTE_NOHPTE_UPDATE;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530160
Christophe Lombardf24be422017-04-12 16:34:07 +0200161 local_irq_save(flags);
162 hash_page_mm(mm, dar, access, 0x300, inv_flags);
163 local_irq_restore(flags);
164 }
Christophe Lombard3ced8d72017-06-22 15:07:27 +0200165 return 0;
166}
167
168static void cxl_handle_page_fault(struct cxl_context *ctx,
169 struct mm_struct *mm,
170 u64 dsisr, u64 dar)
171{
172 trace_cxl_pte_miss(ctx, dsisr, dar);
173
174 if (cxl_handle_mm_fault(mm, dsisr, dar)) {
175 cxl_ack_ae(ctx);
176 } else {
177 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
178 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
179 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100180}
181
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530182/*
Christophe Lombard6dd2d232017-04-07 16:11:55 +0200183 * Returns the mm_struct corresponding to the context ctx.
184 * mm_users == 0, the context may be in the process of being closed.
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530185 */
186static struct mm_struct *get_mem_context(struct cxl_context *ctx)
187{
Christophe Lombard6dd2d232017-04-07 16:11:55 +0200188 if (ctx->mm == NULL)
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530189 return NULL;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530190
Christophe Lombard6dd2d232017-04-07 16:11:55 +0200191 if (!atomic_inc_not_zero(&ctx->mm->mm_users))
192 return NULL;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530193
Christophe Lombard6dd2d232017-04-07 16:11:55 +0200194 return ctx->mm;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530195}
196
Christophe Lombardf24be422017-04-12 16:34:07 +0200197static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
198{
Christophe Lombard797625d2017-06-13 17:41:05 +0200199 if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
Christophe Lombardf24be422017-04-12 16:34:07 +0200200 return true;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530201
Christophe Lombardf24be422017-04-12 16:34:07 +0200202 return false;
203}
204
205static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
206{
Christophe Lombard797625d2017-06-13 17:41:05 +0200207 u64 crs; /* Translation Checkout Response Status */
208
209 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
Christophe Lombardf24be422017-04-12 16:34:07 +0200210 return true;
211
Christophe Lombard797625d2017-06-13 17:41:05 +0200212 if (cxl_is_power9()) {
213 crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
214 if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
215 (crs == CXL_PSL9_DSISR_An_PF_RGC) ||
216 (crs == CXL_PSL9_DSISR_An_PF_RGP) ||
217 (crs == CXL_PSL9_DSISR_An_PF_HRH) ||
218 (crs == CXL_PSL9_DSISR_An_PF_STEG) ||
219 (crs == CXL_PSL9_DSISR_An_URTCH)) {
220 return true;
221 }
222 }
Christophe Lombardf24be422017-04-12 16:34:07 +0200223
224 return false;
225}
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530226
Ian Munsief204e0b2014-10-08 19:55:02 +1100227void cxl_handle_fault(struct work_struct *fault_work)
228{
229 struct cxl_context *ctx =
230 container_of(fault_work, struct cxl_context, fault_work);
231 u64 dsisr = ctx->dsisr;
232 u64 dar = ctx->dar;
Michael Neulinga6b07d82015-05-27 16:07:11 +1000233 struct mm_struct *mm = NULL;
Ian Munsief204e0b2014-10-08 19:55:02 +1100234
Frederic Barratea2d1f92016-03-04 12:26:30 +0100235 if (cpu_has_feature(CPU_FTR_HVMODE)) {
236 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
237 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
238 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
239 /* Most likely explanation is harmless - a dedicated
240 * process has detached and these were cleared by the
241 * PSL purge, but warn about it just in case
242 */
243 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
244 return;
245 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100246 }
247
Ian Munsie13da7042014-12-08 19:17:58 +1100248 /* Early return if the context is being / has been detached */
249 if (ctx->status == CLOSED) {
250 cxl_ack_ae(ctx);
251 return;
252 }
253
Ian Munsief204e0b2014-10-08 19:55:02 +1100254 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
255 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
256
Michael Neulinga6b07d82015-05-27 16:07:11 +1000257 if (!ctx->kernel) {
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530258
259 mm = get_mem_context(ctx);
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530260 if (mm == NULL) {
261 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
262 __func__, ctx->pe, pid_nr(ctx->pid));
Michael Neulinga6b07d82015-05-27 16:07:11 +1000263 cxl_ack_ae(ctx);
264 return;
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530265 } else {
266 pr_devel("Handling page fault for pe=%d pid=%i\n",
267 ctx->pe, pid_nr(ctx->pid));
Michael Neulinga6b07d82015-05-27 16:07:11 +1000268 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100269 }
270
Christophe Lombardf24be422017-04-12 16:34:07 +0200271 if (cxl_is_segment_miss(ctx, dsisr))
Ian Munsief204e0b2014-10-08 19:55:02 +1100272 cxl_handle_segment_miss(ctx, mm, dar);
Christophe Lombardf24be422017-04-12 16:34:07 +0200273 else if (cxl_is_page_fault(ctx, dsisr))
Ian Munsief204e0b2014-10-08 19:55:02 +1100274 cxl_handle_page_fault(ctx, mm, dsisr, dar);
275 else
276 WARN(1, "cxl_handle_fault has nothing to handle\n");
277
Michael Neulinga6b07d82015-05-27 16:07:11 +1000278 if (mm)
279 mmput(mm);
Ian Munsief204e0b2014-10-08 19:55:02 +1100280}
281
282static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
283{
Ian Munsief204e0b2014-10-08 19:55:02 +1100284 struct mm_struct *mm;
285
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530286 mm = get_mem_context(ctx);
287 if (mm == NULL) {
Ian Munsief204e0b2014-10-08 19:55:02 +1100288 pr_devel("cxl_prefault_one unable to get mm %i\n",
289 pid_nr(ctx->pid));
Ian Munsief204e0b2014-10-08 19:55:02 +1100290 return;
291 }
292
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530293 cxl_fault_segment(ctx, mm, ea);
Ian Munsief204e0b2014-10-08 19:55:02 +1100294
295 mmput(mm);
Ian Munsief204e0b2014-10-08 19:55:02 +1100296}
297
298static u64 next_segment(u64 ea, u64 vsid)
299{
300 if (vsid & SLB_VSID_B_1T)
301 ea |= (1ULL << 40) - 1;
302 else
303 ea |= (1ULL << 28) - 1;
304
305 return ea + 1;
306}
307
308static void cxl_prefault_vma(struct cxl_context *ctx)
309{
310 u64 ea, last_esid = 0;
311 struct copro_slb slb;
312 struct vm_area_struct *vma;
313 int rc;
Ian Munsief204e0b2014-10-08 19:55:02 +1100314 struct mm_struct *mm;
315
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530316 mm = get_mem_context(ctx);
317 if (mm == NULL) {
Ian Munsief204e0b2014-10-08 19:55:02 +1100318 pr_devel("cxl_prefault_vm unable to get mm %i\n",
319 pid_nr(ctx->pid));
Vaibhav Jain7b8ad492015-11-24 16:26:18 +0530320 return;
Ian Munsief204e0b2014-10-08 19:55:02 +1100321 }
322
323 down_read(&mm->mmap_sem);
324 for (vma = mm->mmap; vma; vma = vma->vm_next) {
325 for (ea = vma->vm_start; ea < vma->vm_end;
326 ea = next_segment(ea, slb.vsid)) {
327 rc = copro_calculate_slb(mm, ea, &slb);
328 if (rc)
329 continue;
330
331 if (last_esid == slb.esid)
332 continue;
333
334 cxl_load_segment(ctx, &slb);
335 last_esid = slb.esid;
336 }
337 }
338 up_read(&mm->mmap_sem);
339
340 mmput(mm);
Ian Munsief204e0b2014-10-08 19:55:02 +1100341}
342
343void cxl_prefault(struct cxl_context *ctx, u64 wed)
344{
345 switch (ctx->afu->prefault_mode) {
346 case CXL_PREFAULT_WED:
347 cxl_prefault_one(ctx, wed);
348 break;
349 case CXL_PREFAULT_ALL:
350 cxl_prefault_vma(ctx);
351 break;
352 default:
353 break;
354 }
355}