cxl: Define process problem state area at attach time only
[linux-2.6-block.git] / drivers / misc / cxl / irq.c
CommitLineData
f204e0b8
IM
1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/interrupt.h>
11#include <linux/workqueue.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/slab.h>
15#include <linux/pid.h>
16#include <asm/cputable.h>
ec249dd8 17#include <misc/cxl-base.h>
f204e0b8
IM
18
19#include "cxl.h"
9bcf28cd 20#include "trace.h"
f204e0b8 21
f204e0b8
IM
22static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
23{
24 ctx->dsisr = dsisr;
25 ctx->dar = dar;
26 schedule_work(&ctx->fault_work);
27 return IRQ_HANDLED;
28}
29
86331862 30irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
f204e0b8
IM
31{
32 struct cxl_context *ctx = data;
f204e0b8 33 u64 dsisr, dar;
f204e0b8 34
bc78b05b
IM
35 dsisr = irq_info->dsisr;
36 dar = irq_info->dar;
f204e0b8 37
9bcf28cd
IM
38 trace_cxl_psl_irq(ctx, irq, dsisr, dar);
39
f204e0b8
IM
40 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
41
42 if (dsisr & CXL_PSL_DSISR_An_DS) {
43 /*
44 * We don't inherently need to sleep to handle this, but we do
45 * need to get a ref to the task's mm, which we can't do from
46 * irq context without the potential for a deadlock since it
47 * takes the task_lock. An alternate option would be to keep a
48 * reference to the task's mm the entire time it has cxl open,
49 * but to do that we need to solve the issue where we hold a
50 * ref to the mm, but the mm can hold a ref to the fd after an
51 * mmap preventing anything from being cleaned up.
52 */
53 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
54 return schedule_cxl_fault(ctx, dsisr, dar);
55 }
56
57 if (dsisr & CXL_PSL_DSISR_An_M)
58 pr_devel("CXL interrupt: PTE not found\n");
59 if (dsisr & CXL_PSL_DSISR_An_P)
60 pr_devel("CXL interrupt: Storage protection violation\n");
61 if (dsisr & CXL_PSL_DSISR_An_A)
62 pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
63 if (dsisr & CXL_PSL_DSISR_An_S)
64 pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
65 if (dsisr & CXL_PSL_DSISR_An_K)
66 pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
67
68 if (dsisr & CXL_PSL_DSISR_An_DM) {
69 /*
70 * In some cases we might be able to handle the fault
71 * immediately if hash_page would succeed, but we still need
72 * the task's mm, which as above we can't get without a lock
73 */
74 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
75 return schedule_cxl_fault(ctx, dsisr, dar);
76 }
77 if (dsisr & CXL_PSL_DSISR_An_ST)
78 WARN(1, "CXL interrupt: Segment Table PTE not found\n");
79 if (dsisr & CXL_PSL_DSISR_An_UR)
80 pr_devel("CXL interrupt: AURP PTE not found\n");
81 if (dsisr & CXL_PSL_DSISR_An_PE)
bc78b05b 82 return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
f204e0b8 83 if (dsisr & CXL_PSL_DSISR_An_AE) {
de369538 84 pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
f204e0b8
IM
85
86 if (ctx->pending_afu_err) {
87 /*
88 * This shouldn't happen - the PSL treats these errors
89 * as fatal and will have reset the AFU, so there's not
90 * much point buffering multiple AFU errors.
91 * OTOH if we DO ever see a storm of these come in it's
92 * probably best that we log them somewhere:
93 */
94 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
de369538 95 "undelivered to pe %i: 0x%016llx\n",
bc78b05b 96 ctx->pe, irq_info->afu_err);
f204e0b8
IM
97 } else {
98 spin_lock(&ctx->lock);
bc78b05b 99 ctx->afu_err = irq_info->afu_err;
f204e0b8
IM
100 ctx->pending_afu_err = 1;
101 spin_unlock(&ctx->lock);
102
103 wake_up_all(&ctx->wq);
104 }
105
106 cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
a6130ed2 107 return IRQ_HANDLED;
f204e0b8
IM
108 }
109 if (dsisr & CXL_PSL_DSISR_An_OC)
110 pr_devel("CXL interrupt: OS Context Warning\n");
111
112 WARN(1, "Unhandled CXL PSL IRQ\n");
113 return IRQ_HANDLED;
114}
115
f204e0b8
IM
116static irqreturn_t cxl_irq_afu(int irq, void *data)
117{
118 struct cxl_context *ctx = data;
119 irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
120 int irq_off, afu_irq = 1;
121 __u16 range;
122 int r;
123
124 for (r = 1; r < CXL_IRQ_RANGES; r++) {
125 irq_off = hwirq - ctx->irqs.offset[r];
126 range = ctx->irqs.range[r];
127 if (irq_off >= 0 && irq_off < range) {
128 afu_irq += irq_off;
129 break;
130 }
131 afu_irq += range;
132 }
133 if (unlikely(r >= CXL_IRQ_RANGES)) {
134 WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
135 ctx->pe, irq, hwirq);
136 return IRQ_HANDLED;
137 }
138
9bcf28cd 139 trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
f204e0b8
IM
140 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
141 afu_irq, ctx->pe, irq, hwirq);
142
143 if (unlikely(!ctx->irq_bitmap)) {
144 WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
145 return IRQ_HANDLED;
146 }
147 spin_lock(&ctx->lock);
148 set_bit(afu_irq - 1, ctx->irq_bitmap);
149 ctx->pending_irq = true;
150 spin_unlock(&ctx->lock);
151
152 wake_up_all(&ctx->wq);
153
154 return IRQ_HANDLED;
155}
156
157unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
80fa93fc 158 irq_handler_t handler, void *cookie, const char *name)
f204e0b8
IM
159{
160 unsigned int virq;
161 int result;
162
163 /* IRQ Domain? */
164 virq = irq_create_mapping(NULL, hwirq);
165 if (!virq) {
166 dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
167 return 0;
168 }
169
170 cxl_setup_irq(adapter, hwirq, virq);
171
172 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
173
80fa93fc 174 result = request_irq(virq, handler, 0, name, cookie);
f204e0b8
IM
175 if (result) {
176 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
177 return 0;
178 }
179
180 return virq;
181}
182
183void cxl_unmap_irq(unsigned int virq, void *cookie)
184{
185 free_irq(virq, cookie);
186 irq_dispose_mapping(virq);
187}
188
86331862
CL
189int cxl_register_one_irq(struct cxl *adapter,
190 irq_handler_t handler,
191 void *cookie,
192 irq_hw_number_t *dest_hwirq,
193 unsigned int *dest_virq,
194 const char *name)
f204e0b8
IM
195{
196 int hwirq, virq;
197
198 if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
199 return hwirq;
200
80fa93fc 201 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
f204e0b8
IM
202 goto err;
203
204 *dest_hwirq = hwirq;
205 *dest_virq = virq;
206
207 return 0;
208
209err:
210 cxl_release_one_irq(adapter, hwirq);
211 return -ENOMEM;
212}
213
8dde152e 214void afu_irq_name_free(struct cxl_context *ctx)
80fa93fc
MN
215{
216 struct cxl_irq_name *irq_name, *tmp;
217
218 list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
219 kfree(irq_name->name);
220 list_del(&irq_name->list);
221 kfree(irq_name);
222 }
f204e0b8
IM
223}
224
c358d84b 225int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
f204e0b8 226{
80fa93fc
MN
227 int rc, r, i, j = 1;
228 struct cxl_irq_name *irq_name;
f204e0b8 229
a6897f39
VJ
230 /* Initialize the list head to hold irq names */
231 INIT_LIST_HEAD(&ctx->irq_names);
232
f204e0b8
IM
233 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
234 return rc;
235
236 /* Multiplexed PSL Interrupt */
237 ctx->irqs.offset[0] = ctx->afu->psl_hwirq;
238 ctx->irqs.range[0] = 1;
239
240 ctx->irq_count = count;
241 ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
242 sizeof(*ctx->irq_bitmap), GFP_KERNEL);
243 if (!ctx->irq_bitmap)
a6897f39 244 goto out;
80fa93fc
MN
245
246 /*
247 * Allocate names first. If any fail, bail out before allocating
248 * actual hardware IRQs.
249 */
80fa93fc 250 for (r = 1; r < CXL_IRQ_RANGES; r++) {
d3383aaa 251 for (i = 0; i < ctx->irqs.range[r]; i++) {
80fa93fc
MN
252 irq_name = kmalloc(sizeof(struct cxl_irq_name),
253 GFP_KERNEL);
254 if (!irq_name)
255 goto out;
256 irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
257 dev_name(&ctx->afu->dev),
258 ctx->pe, j);
259 if (!irq_name->name) {
260 kfree(irq_name);
261 goto out;
262 }
263 /* Add to tail so next look get the correct order */
264 list_add_tail(&irq_name->list, &ctx->irq_names);
265 j++;
266 }
267 }
c358d84b
MN
268 return 0;
269
270out:
a6897f39 271 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
c358d84b
MN
272 afu_irq_name_free(ctx);
273 return -ENOMEM;
274}
275
3d6b040e 276static void afu_register_hwirqs(struct cxl_context *ctx)
c358d84b
MN
277{
278 irq_hw_number_t hwirq;
279 struct cxl_irq_name *irq_name;
280 int r,i;
80fa93fc
MN
281
282 /* We've allocated all memory now, so let's do the irq allocations */
283 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
f204e0b8
IM
284 for (r = 1; r < CXL_IRQ_RANGES; r++) {
285 hwirq = ctx->irqs.offset[r];
286 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
287 cxl_map_irq(ctx->afu->adapter, hwirq,
80fa93fc
MN
288 cxl_irq_afu, ctx, irq_name->name);
289 irq_name = list_next_entry(irq_name, list);
f204e0b8
IM
290 }
291 }
c358d84b 292}
f204e0b8 293
c358d84b
MN
294int afu_register_irqs(struct cxl_context *ctx, u32 count)
295{
296 int rc;
80fa93fc 297
c358d84b
MN
298 rc = afu_allocate_irqs(ctx, count);
299 if (rc)
300 return rc;
301
302 afu_register_hwirqs(ctx);
303 return 0;
d56d301b 304}
f204e0b8 305
6428832a 306void afu_release_irqs(struct cxl_context *ctx, void *cookie)
f204e0b8
IM
307{
308 irq_hw_number_t hwirq;
309 unsigned int virq;
310 int r, i;
311
312 for (r = 1; r < CXL_IRQ_RANGES; r++) {
313 hwirq = ctx->irqs.offset[r];
314 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
315 virq = irq_find_mapping(NULL, hwirq);
316 if (virq)
6428832a 317 cxl_unmap_irq(virq, cookie);
f204e0b8
IM
318 }
319 }
320
80fa93fc 321 afu_irq_name_free(ctx);
f204e0b8 322 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
8c7dd08a 323
8c7dd08a 324 ctx->irq_count = 0;
f204e0b8 325}