powerpc/mm/cxl: Add the fault handling cpu to mm cpumask
[linux-2.6-block.git] / drivers / misc / cxl / fault.c
CommitLineData
f204e0b8
IM
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/workqueue.h>
174cd4b1 11#include <linux/sched/signal.h>
6e84f315 12#include <linux/sched/mm.h>
f204e0b8
IM
13#include <linux/pid.h>
14#include <linux/mm.h>
15#include <linux/moduleparam.h>
16
17#undef MODULE_PARAM_PREFIX
18#define MODULE_PARAM_PREFIX "cxl" "."
19#include <asm/current.h>
20#include <asm/copro.h>
21#include <asm/mmu.h>
22
23#include "cxl.h"
9bcf28cd 24#include "trace.h"
f204e0b8 25
eb01d4c2
IM
26static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
27{
28 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
29 (sste->esid_data == cpu_to_be64(slb->esid)));
30}
31
32/*
33 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
34 * the segment table.
35 */
b03a7f57
IM
36static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
37 struct copro_slb *slb)
f204e0b8 38{
eb01d4c2 39 struct cxl_sste *primary, *sste, *ret = NULL;
b03a7f57 40 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
5100a9d6 41 unsigned int entry;
b03a7f57
IM
42 unsigned int hash;
43
44 if (slb->vsid & SLB_VSID_B_1T)
45 hash = (slb->esid >> SID_SHIFT_1T) & mask;
46 else /* 256M */
47 hash = (slb->esid >> SID_SHIFT) & mask;
f204e0b8 48
b03a7f57
IM
49 primary = ctx->sstp + (hash << 3);
50
51 for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
eb01d4c2
IM
52 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
53 ret = sste;
54 if (sste_matches(sste, slb))
55 return NULL;
f204e0b8 56 }
eb01d4c2
IM
57 if (ret)
58 return ret;
b03a7f57 59
f204e0b8 60 /* Nothing free, select an entry to cast out */
eb01d4c2 61 ret = primary + ctx->sst_lru;
b03a7f57 62 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
f204e0b8 63
eb01d4c2 64 return ret;
f204e0b8
IM
65}
66
67static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
68{
69 /* mask is the group index, we search primary and secondary here. */
f204e0b8 70 struct cxl_sste *sste;
f204e0b8
IM
71 unsigned long flags;
72
f204e0b8 73 spin_lock_irqsave(&ctx->sste_lock, flags);
b03a7f57 74 sste = find_free_sste(ctx, slb);
eb01d4c2
IM
75 if (!sste)
76 goto out_unlock;
f204e0b8
IM
77
78 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
79 sste - ctx->sstp, slb->vsid, slb->esid);
9bcf28cd 80 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
f204e0b8
IM
81
82 sste->vsid_data = cpu_to_be64(slb->vsid);
83 sste->esid_data = cpu_to_be64(slb->esid);
eb01d4c2 84out_unlock:
f204e0b8
IM
85 spin_unlock_irqrestore(&ctx->sste_lock, flags);
86}
87
88static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
89 u64 ea)
90{
91 struct copro_slb slb = {0,0};
92 int rc;
93
94 if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
95 cxl_load_segment(ctx, &slb);
96 }
97
98 return rc;
99}
100
101static void cxl_ack_ae(struct cxl_context *ctx)
102{
103 unsigned long flags;
104
5be587b1 105 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
f204e0b8
IM
106
107 spin_lock_irqsave(&ctx->lock, flags);
108 ctx->pending_fault = true;
109 ctx->fault_addr = ctx->dar;
110 ctx->fault_dsisr = ctx->dsisr;
111 spin_unlock_irqrestore(&ctx->lock, flags);
112
113 wake_up_all(&ctx->wq);
114}
115
116static int cxl_handle_segment_miss(struct cxl_context *ctx,
117 struct mm_struct *mm, u64 ea)
118{
119 int rc;
120
121 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
9bcf28cd 122 trace_cxl_ste_miss(ctx, ea);
f204e0b8
IM
123
124 if ((rc = cxl_fault_segment(ctx, mm, ea)))
125 cxl_ack_ae(ctx);
126 else {
127
128 mb(); /* Order seg table write to TFC MMIO write */
5be587b1 129 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
f204e0b8
IM
130 }
131
132 return IRQ_HANDLED;
133}
134
3ced8d73 135int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
f204e0b8
IM
136{
137 unsigned flt = 0;
138 int result;
aefa5688 139 unsigned long access, flags, inv_flags = 0;
f204e0b8 140
0f4bc093
AK
141 /*
142 * Add the fault handling cpu to task mm cpumask so that we
143 * can do a safe lockless page table walk when inserting the
144 * hash page table entry.
145 */
146 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
f204e0b8
IM
147 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
148 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
3ced8d73 149 return result;
f204e0b8
IM
150 }
151
f24be42a
CL
152 if (!radix_enabled()) {
153 /*
154 * update_mmu_cache() will not have loaded the hash since current->trap
155 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
156 */
157 access = _PAGE_PRESENT | _PAGE_READ;
158 if (dsisr & CXL_PSL_DSISR_An_S)
159 access |= _PAGE_WRITE;
160
3ced8d73
CL
161 if (!mm && (REGION_ID(dar) != USER_REGION_ID))
162 access |= _PAGE_PRIVILEGED;
f24be42a
CL
163
164 if (dsisr & DSISR_NOHPTE)
165 inv_flags |= HPTE_NOHPTE_UPDATE;
166
167 local_irq_save(flags);
168 hash_page_mm(mm, dar, access, 0x300, inv_flags);
169 local_irq_restore(flags);
170 }
3ced8d73
CL
171 return 0;
172}
173
174static void cxl_handle_page_fault(struct cxl_context *ctx,
175 struct mm_struct *mm,
176 u64 dsisr, u64 dar)
177{
178 trace_cxl_pte_miss(ctx, dsisr, dar);
179
180 if (cxl_handle_mm_fault(mm, dsisr, dar)) {
181 cxl_ack_ae(ctx);
182 } else {
183 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
184 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
185 }
f204e0b8
IM
186}
187
7b8ad495 188/*
6dd2d234
CL
189 * Returns the mm_struct corresponding to the context ctx.
190 * mm_users == 0, the context may be in the process of being closed.
7b8ad495
VJ
191 */
192static struct mm_struct *get_mem_context(struct cxl_context *ctx)
193{
6dd2d234 194 if (ctx->mm == NULL)
7b8ad495 195 return NULL;
7b8ad495 196
6dd2d234
CL
197 if (!atomic_inc_not_zero(&ctx->mm->mm_users))
198 return NULL;
7b8ad495 199
6dd2d234 200 return ctx->mm;
7b8ad495
VJ
201}
202
f24be42a
CL
203static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
204{
797625de 205 if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
f24be42a
CL
206 return true;
207
208 return false;
209}
210
211static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
212{
797625de 213 u64 crs; /* Translation Checkout Response Status */
f24be42a 214
797625de 215 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
f24be42a 216 return true;
7b8ad495 217
797625de
CL
218 if (cxl_is_power9()) {
219 crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
220 if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
221 (crs == CXL_PSL9_DSISR_An_PF_RGC) ||
222 (crs == CXL_PSL9_DSISR_An_PF_RGP) ||
223 (crs == CXL_PSL9_DSISR_An_PF_HRH) ||
224 (crs == CXL_PSL9_DSISR_An_PF_STEG) ||
225 (crs == CXL_PSL9_DSISR_An_URTCH)) {
226 return true;
227 }
228 }
229
f24be42a
CL
230 return false;
231}
7b8ad495 232
f204e0b8
IM
233void cxl_handle_fault(struct work_struct *fault_work)
234{
235 struct cxl_context *ctx =
236 container_of(fault_work, struct cxl_context, fault_work);
237 u64 dsisr = ctx->dsisr;
238 u64 dar = ctx->dar;
a6b07d82 239 struct mm_struct *mm = NULL;
f204e0b8 240
ea2d1f95
FB
241 if (cpu_has_feature(CPU_FTR_HVMODE)) {
242 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
243 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
244 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
245 /* Most likely explanation is harmless - a dedicated
246 * process has detached and these were cleared by the
247 * PSL purge, but warn about it just in case
248 */
249 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
250 return;
251 }
f204e0b8
IM
252 }
253
13da7046
IM
254 /* Early return if the context is being / has been detached */
255 if (ctx->status == CLOSED) {
256 cxl_ack_ae(ctx);
257 return;
258 }
259
f204e0b8
IM
260 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
261 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
262
a6b07d82 263 if (!ctx->kernel) {
7b8ad495
VJ
264
265 mm = get_mem_context(ctx);
7b8ad495
VJ
266 if (mm == NULL) {
267 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
268 __func__, ctx->pe, pid_nr(ctx->pid));
a6b07d82
MN
269 cxl_ack_ae(ctx);
270 return;
7b8ad495
VJ
271 } else {
272 pr_devel("Handling page fault for pe=%d pid=%i\n",
273 ctx->pe, pid_nr(ctx->pid));
a6b07d82 274 }
f204e0b8
IM
275 }
276
f24be42a 277 if (cxl_is_segment_miss(ctx, dsisr))
f204e0b8 278 cxl_handle_segment_miss(ctx, mm, dar);
f24be42a 279 else if (cxl_is_page_fault(ctx, dsisr))
f204e0b8
IM
280 cxl_handle_page_fault(ctx, mm, dsisr, dar);
281 else
282 WARN(1, "cxl_handle_fault has nothing to handle\n");
283
a6b07d82
MN
284 if (mm)
285 mmput(mm);
f204e0b8
IM
286}
287
288static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
289{
f204e0b8
IM
290 struct mm_struct *mm;
291
7b8ad495
VJ
292 mm = get_mem_context(ctx);
293 if (mm == NULL) {
f204e0b8
IM
294 pr_devel("cxl_prefault_one unable to get mm %i\n",
295 pid_nr(ctx->pid));
f204e0b8
IM
296 return;
297 }
298
7b8ad495 299 cxl_fault_segment(ctx, mm, ea);
f204e0b8
IM
300
301 mmput(mm);
f204e0b8
IM
302}
303
304static u64 next_segment(u64 ea, u64 vsid)
305{
306 if (vsid & SLB_VSID_B_1T)
307 ea |= (1ULL << 40) - 1;
308 else
309 ea |= (1ULL << 28) - 1;
310
311 return ea + 1;
312}
313
314static void cxl_prefault_vma(struct cxl_context *ctx)
315{
316 u64 ea, last_esid = 0;
317 struct copro_slb slb;
318 struct vm_area_struct *vma;
319 int rc;
f204e0b8
IM
320 struct mm_struct *mm;
321
7b8ad495
VJ
322 mm = get_mem_context(ctx);
323 if (mm == NULL) {
f204e0b8
IM
324 pr_devel("cxl_prefault_vm unable to get mm %i\n",
325 pid_nr(ctx->pid));
7b8ad495 326 return;
f204e0b8
IM
327 }
328
329 down_read(&mm->mmap_sem);
330 for (vma = mm->mmap; vma; vma = vma->vm_next) {
331 for (ea = vma->vm_start; ea < vma->vm_end;
332 ea = next_segment(ea, slb.vsid)) {
333 rc = copro_calculate_slb(mm, ea, &slb);
334 if (rc)
335 continue;
336
337 if (last_esid == slb.esid)
338 continue;
339
340 cxl_load_segment(ctx, &slb);
341 last_esid = slb.esid;
342 }
343 }
344 up_read(&mm->mmap_sem);
345
346 mmput(mm);
f204e0b8
IM
347}
348
349void cxl_prefault(struct cxl_context *ctx, u64 wed)
350{
351 switch (ctx->afu->prefault_mode) {
352 case CXL_PREFAULT_WED:
353 cxl_prefault_one(ctx, wed);
354 break;
355 case CXL_PREFAULT_ALL:
356 cxl_prefault_vma(ctx);
357 break;
358 default:
359 break;
360 }
361}