2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/workqueue.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/mm.h>
13 #include <linux/pid.h>
15 #include <linux/moduleparam.h>
17 #undef MODULE_PARAM_PREFIX
18 #define MODULE_PARAM_PREFIX "cxl" "."
19 #include <asm/current.h>
20 #include <asm/copro.h>
26 static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
28 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
29 (sste->esid_data == cpu_to_be64(slb->esid)));
33 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
36 static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
37 struct copro_slb *slb)
39 struct cxl_sste *primary, *sste, *ret = NULL;
40 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
44 if (slb->vsid & SLB_VSID_B_1T)
45 hash = (slb->esid >> SID_SHIFT_1T) & mask;
47 hash = (slb->esid >> SID_SHIFT) & mask;
49 primary = ctx->sstp + (hash << 3);
51 for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
52 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
54 if (sste_matches(sste, slb))
60 /* Nothing free, select an entry to cast out */
61 ret = primary + ctx->sst_lru;
62 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
67 static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
69 /* mask is the group index, we search primary and secondary here. */
70 struct cxl_sste *sste;
73 spin_lock_irqsave(&ctx->sste_lock, flags);
74 sste = find_free_sste(ctx, slb);
78 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
79 sste - ctx->sstp, slb->vsid, slb->esid);
80 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
82 sste->vsid_data = cpu_to_be64(slb->vsid);
83 sste->esid_data = cpu_to_be64(slb->esid);
85 spin_unlock_irqrestore(&ctx->sste_lock, flags);
88 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
91 struct copro_slb slb = {0,0};
94 if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
95 cxl_load_segment(ctx, &slb);
101 static void cxl_ack_ae(struct cxl_context *ctx)
105 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
107 spin_lock_irqsave(&ctx->lock, flags);
108 ctx->pending_fault = true;
109 ctx->fault_addr = ctx->dar;
110 ctx->fault_dsisr = ctx->dsisr;
111 spin_unlock_irqrestore(&ctx->lock, flags);
113 wake_up_all(&ctx->wq);
116 static int cxl_handle_segment_miss(struct cxl_context *ctx,
117 struct mm_struct *mm, u64 ea)
121 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
122 trace_cxl_ste_miss(ctx, ea);
124 if ((rc = cxl_fault_segment(ctx, mm, ea)))
128 mb(); /* Order seg table write to TFC MMIO write */
129 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
135 int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
139 unsigned long access, flags, inv_flags = 0;
141 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
142 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
146 if (!radix_enabled()) {
148 * update_mmu_cache() will not have loaded the hash since current->trap
149 * is not a 0x400 or 0x300, so just call hash_page_mm() here.
151 access = _PAGE_PRESENT | _PAGE_READ;
152 if (dsisr & CXL_PSL_DSISR_An_S)
153 access |= _PAGE_WRITE;
155 if (!mm && (REGION_ID(dar) != USER_REGION_ID))
156 access |= _PAGE_PRIVILEGED;
158 if (dsisr & DSISR_NOHPTE)
159 inv_flags |= HPTE_NOHPTE_UPDATE;
161 local_irq_save(flags);
162 hash_page_mm(mm, dar, access, 0x300, inv_flags);
163 local_irq_restore(flags);
168 static void cxl_handle_page_fault(struct cxl_context *ctx,
169 struct mm_struct *mm,
172 trace_cxl_pte_miss(ctx, dsisr, dar);
174 if (cxl_handle_mm_fault(mm, dsisr, dar)) {
177 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
178 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
183 * Returns the mm_struct corresponding to the context ctx.
184 * mm_users == 0, the context may be in the process of being closed.
186 static struct mm_struct *get_mem_context(struct cxl_context *ctx)
191 if (!atomic_inc_not_zero(&ctx->mm->mm_users))
197 static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
199 if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
205 static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
207 u64 crs; /* Translation Checkout Response Status */
209 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
212 if (cxl_is_power9()) {
213 crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
214 if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
215 (crs == CXL_PSL9_DSISR_An_PF_RGC) ||
216 (crs == CXL_PSL9_DSISR_An_PF_RGP) ||
217 (crs == CXL_PSL9_DSISR_An_PF_HRH) ||
218 (crs == CXL_PSL9_DSISR_An_PF_STEG) ||
219 (crs == CXL_PSL9_DSISR_An_URTCH)) {
227 void cxl_handle_fault(struct work_struct *fault_work)
229 struct cxl_context *ctx =
230 container_of(fault_work, struct cxl_context, fault_work);
231 u64 dsisr = ctx->dsisr;
233 struct mm_struct *mm = NULL;
235 if (cpu_has_feature(CPU_FTR_HVMODE)) {
236 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
237 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
238 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
239 /* Most likely explanation is harmless - a dedicated
240 * process has detached and these were cleared by the
241 * PSL purge, but warn about it just in case
243 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
248 /* Early return if the context is being / has been detached */
249 if (ctx->status == CLOSED) {
254 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
255 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
259 mm = get_mem_context(ctx);
261 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
262 __func__, ctx->pe, pid_nr(ctx->pid));
266 pr_devel("Handling page fault for pe=%d pid=%i\n",
267 ctx->pe, pid_nr(ctx->pid));
271 if (cxl_is_segment_miss(ctx, dsisr))
272 cxl_handle_segment_miss(ctx, mm, dar);
273 else if (cxl_is_page_fault(ctx, dsisr))
274 cxl_handle_page_fault(ctx, mm, dsisr, dar);
276 WARN(1, "cxl_handle_fault has nothing to handle\n");
282 static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
284 struct mm_struct *mm;
286 mm = get_mem_context(ctx);
288 pr_devel("cxl_prefault_one unable to get mm %i\n",
293 cxl_fault_segment(ctx, mm, ea);
298 static u64 next_segment(u64 ea, u64 vsid)
300 if (vsid & SLB_VSID_B_1T)
301 ea |= (1ULL << 40) - 1;
303 ea |= (1ULL << 28) - 1;
308 static void cxl_prefault_vma(struct cxl_context *ctx)
310 u64 ea, last_esid = 0;
311 struct copro_slb slb;
312 struct vm_area_struct *vma;
314 struct mm_struct *mm;
316 mm = get_mem_context(ctx);
318 pr_devel("cxl_prefault_vm unable to get mm %i\n",
323 down_read(&mm->mmap_sem);
324 for (vma = mm->mmap; vma; vma = vma->vm_next) {
325 for (ea = vma->vm_start; ea < vma->vm_end;
326 ea = next_segment(ea, slb.vsid)) {
327 rc = copro_calculate_slb(mm, ea, &slb);
331 if (last_esid == slb.esid)
334 cxl_load_segment(ctx, &slb);
335 last_esid = slb.esid;
338 up_read(&mm->mmap_sem);
343 void cxl_prefault(struct cxl_context *ctx, u64 wed)
345 switch (ctx->afu->prefault_mode) {
346 case CXL_PREFAULT_WED:
347 cxl_prefault_one(ctx, wed);
349 case CXL_PREFAULT_ALL:
350 cxl_prefault_vma(ctx);