treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[linux-block.git] / drivers / misc / sgi-gru / grukdump.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
9cc9b056
JS
2/*
3 * SN Platform GRU Driver
4 *
5 * Dump GRU State
6 *
8820f27a 7 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
9cc9b056
JS
8 */
9
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/spinlock.h>
13#include <linux/uaccess.h>
14#include <linux/delay.h>
15#include <linux/bitops.h>
16#include <asm/uv/uv_hub.h>
fee05f45
GS
17
18#include <linux/nospec.h>
19
9cc9b056
JS
20#include "gru.h"
21#include "grutables.h"
22#include "gruhandles.h"
23#include "grulib.h"
24
25#define CCH_LOCK_ATTEMPTS 10
26
27static int gru_user_copy_handle(void __user **dp, void *s)
28{
2b702b28 29 if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
9cc9b056
JS
30 return -1;
31 *dp += GRU_HANDLE_BYTES;
32 return 0;
33}
34
35static int gru_dump_context_data(void *grubase,
36 struct gru_context_configuration_handle *cch,
b8229bed
JS
37 void __user *ubuf, int ctxnum, int dsrcnt,
38 int flush_cbrs)
9cc9b056
JS
39{
40 void *cb, *cbe, *tfh, *gseg;
41 int i, scr;
42
43 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
44 cb = gseg + GRU_CB_BASE;
45 cbe = grubase + GRU_CBE_BASE;
46 tfh = grubase + GRU_TFH_BASE;
47
48 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
b8229bed
JS
49 if (flush_cbrs)
50 gru_flush_cache(cb);
9cc9b056
JS
51 if (gru_user_copy_handle(&ubuf, cb))
52 goto fail;
53 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
54 goto fail;
55 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
56 goto fail;
57 cb += GRU_HANDLE_STRIDE;
58 }
59 if (dsrcnt)
60 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
61 return 0;
62
63fail:
64 return -EFAULT;
65}
66
67static int gru_dump_tfm(struct gru_state *gru,
68 void __user *ubuf, void __user *ubufend)
69{
70 struct gru_tlb_fault_map *tfm;
a010d276 71 int i;
9cc9b056 72
a010d276
SM
73 if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
74 return -EFBIG;
9cc9b056
JS
75
76 for (i = 0; i < GRU_NUM_TFM; i++) {
77 tfm = get_tfm(gru->gs_gru_base_vaddr, i);
78 if (gru_user_copy_handle(&ubuf, tfm))
79 goto fail;
80 }
81 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
82
83fail:
84 return -EFAULT;
85}
86
87static int gru_dump_tgh(struct gru_state *gru,
88 void __user *ubuf, void __user *ubufend)
89{
90 struct gru_tlb_global_handle *tgh;
a010d276 91 int i;
9cc9b056 92
a010d276
SM
93 if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
94 return -EFBIG;
9cc9b056
JS
95
96 for (i = 0; i < GRU_NUM_TGH; i++) {
97 tgh = get_tgh(gru->gs_gru_base_vaddr, i);
98 if (gru_user_copy_handle(&ubuf, tgh))
99 goto fail;
100 }
101 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
102
103fail:
104 return -EFAULT;
105}
106
107static int gru_dump_context(struct gru_state *gru, int ctxnum,
108 void __user *ubuf, void __user *ubufend, char data_opt,
b8229bed 109 char lock_cch, char flush_cbrs)
9cc9b056
JS
110{
111 struct gru_dump_context_header hdr;
112 struct gru_dump_context_header __user *uhdr = ubuf;
2b702b28 113 struct gru_context_configuration_handle *cch, *ubufcch;
9cc9b056
JS
114 struct gru_thread_state *gts;
115 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
116 void *grubase;
117
118 memset(&hdr, 0, sizeof(hdr));
119 grubase = gru->gs_gru_base_vaddr;
120 cch = get_cch(grubase, ctxnum);
121 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
122 cch_locked = trylock_cch_handle(cch);
123 if (cch_locked)
124 break;
125 msleep(1);
126 }
127
128 ubuf += sizeof(hdr);
2b702b28 129 ubufcch = ubuf;
49d3d6c3
DC
130 if (gru_user_copy_handle(&ubuf, cch)) {
131 if (cch_locked)
132 unlock_cch_handle(cch);
133 return -EFAULT;
134 }
2b702b28
JS
135 if (cch_locked)
136 ubufcch->delresp = 0;
9cc9b056
JS
137 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
138
139 if (cch_locked || !lock_cch) {
140 gts = gru->gs_gts[ctxnum];
836ce679 141 if (gts && gts->ts_vma) {
9cc9b056
JS
142 hdr.pid = gts->ts_tgid_owner;
143 hdr.vaddr = gts->ts_vma->vm_start;
144 }
145 if (cch->state != CCHSTATE_INACTIVE) {
146 cbrcnt = hweight64(cch->cbr_allocation_map) *
147 GRU_CBR_AU_SIZE;
148 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
149 GRU_DSR_AU_CL : 0;
150 }
151 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
152 if (bytes > ubufend - ubuf)
153 ret = -EFBIG;
154 else
155 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
b8229bed 156 dsrcnt, flush_cbrs);
9cc9b056
JS
157 }
158 if (cch_locked)
159 unlock_cch_handle(cch);
160 if (ret)
161 return ret;
162
163 hdr.magic = GRU_DUMP_MAGIC;
2b702b28 164 hdr.gid = gru->gs_gid;
9cc9b056
JS
165 hdr.ctxnum = ctxnum;
166 hdr.cbrcnt = cbrcnt;
167 hdr.dsrcnt = dsrcnt;
168 hdr.cch_locked = cch_locked;
b6a83d92
DC
169 if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
170 return -EFAULT;
9cc9b056 171
b6a83d92 172 return bytes;
9cc9b056
JS
173}
174
175int gru_dump_chiplet_request(unsigned long arg)
176{
177 struct gru_state *gru;
178 struct gru_dump_chiplet_state_req req;
179 void __user *ubuf;
180 void __user *ubufend;
181 int ctxnum, ret, cnt = 0;
182
183 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
184 return -EFAULT;
185
186 /* Currently, only dump by gid is implemented */
c2ed545c 187 if (req.gid >= gru_max_gids)
9cc9b056 188 return -EINVAL;
fee05f45 189 req.gid = array_index_nospec(req.gid, gru_max_gids);
9cc9b056
JS
190
191 gru = GID_TO_GRU(req.gid);
192 ubuf = req.buf;
193 ubufend = req.buf + req.buflen;
194
195 ret = gru_dump_tfm(gru, ubuf, ubufend);
196 if (ret < 0)
197 goto fail;
198 ubuf += ret;
199
200 ret = gru_dump_tgh(gru, ubuf, ubufend);
201 if (ret < 0)
202 goto fail;
203 ubuf += ret;
204
205 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
206 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
207 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
b8229bed
JS
208 req.data_opt, req.lock_cch,
209 req.flush_cbrs);
9cc9b056
JS
210 if (ret < 0)
211 goto fail;
212 ubuf += ret;
213 cnt++;
214 }
215 }
216
217 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
218 return -EFAULT;
219 return cnt;
220
221fail:
222 return ret;
223}