Commit | Line | Data |
---|---|---|
9a0deecc JS |
1 | /* |
2 | * SN Platform GRU Driver | |
3 | * | |
4 | * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD | |
5 | * | |
8820f27a | 6 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. |
9a0deecc | 7 | * |
8820f27a JS |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
9a0deecc JS |
21 | */ |
22 | ||
23 | #include <linux/kernel.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/spinlock.h> | |
27 | #include <linux/sched.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/list.h> | |
7f2251b1 | 30 | #include <linux/err.h> |
9a0deecc JS |
31 | #include <asm/uv/uv_hub.h> |
32 | #include "gru.h" | |
33 | #include "grutables.h" | |
34 | #include "gruhandles.h" | |
35 | ||
9ca8e40c | 36 | unsigned long gru_options __read_mostly; |
9a0deecc JS |
37 | |
38 | static struct device_driver gru_driver = { | |
39 | .name = "gru" | |
40 | }; | |
41 | ||
42 | static struct device gru_device = { | |
bb0dc43e | 43 | .init_name = "", |
9a0deecc JS |
44 | .driver = &gru_driver, |
45 | }; | |
46 | ||
47 | struct device *grudev = &gru_device; | |
48 | ||
49 | /* | |
50 | * Select a gru fault map to be used by the current cpu. Note that | |
51 | * multiple cpus may be using the same map. | |
9a0deecc JS |
52 | * ZZZ should be inline but did not work on emulator |
53 | */ | |
54 | int gru_cpu_fault_map_id(void) | |
55 | { | |
4107e1d3 JS |
56 | int cpu = smp_processor_id(); |
57 | int id, core; | |
58 | ||
59 | core = uv_cpu_core_number(cpu); | |
60 | id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); | |
61 | return id; | |
9a0deecc JS |
62 | } |
63 | ||
64 | /*--------- ASID Management ------------------------------------------- | |
65 | * | |
66 | * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. | |
67 | * Once MAX is reached, flush the TLB & start over. However, | |
68 | * some asids may still be in use. There won't be many (percentage wise) still | |
69 | * in use. Search active contexts & determine the value of the first | |
70 | * asid in use ("x"s below). Set "limit" to this value. | |
71 | * This defines a block of assignable asids. | |
72 | * | |
73 | * When "limit" is reached, search forward from limit+1 and determine the | |
74 | * next block of assignable asids. | |
75 | * | |
76 | * Repeat until MAX_ASID is reached, then start over again. | |
77 | * | |
78 | * Each time MAX_ASID is reached, increment the asid generation. Since | |
79 | * the search for in-use asids only checks contexts with GRUs currently | |
80 | * assigned, asids in some contexts will be missed. Prior to loading | |
81 | * a context, the asid generation of the GTS asid is rechecked. If it | |
82 | * doesn't match the current generation, a new asid will be assigned. | |
83 | * | |
84 | * 0---------------x------------x---------------------x----| | |
85 | * ^-next ^-limit ^-MAX_ASID | |
86 | * | |
87 | * All asid manipulation & context loading/unloading is protected by the | |
88 | * gs_lock. | |
89 | */ | |
90 | ||
91 | /* Hit the asid limit. Start over */ | |
92 | static int gru_wrap_asid(struct gru_state *gru) | |
93 | { | |
43884604 | 94 | gru_dbg(grudev, "gid %d\n", gru->gs_gid); |
9a0deecc JS |
95 | STAT(asid_wrap); |
96 | gru->gs_asid_gen++; | |
9a0deecc JS |
97 | return MIN_ASID; |
98 | } | |
99 | ||
100 | /* Find the next chunk of unused asids */ | |
101 | static int gru_reset_asid_limit(struct gru_state *gru, int asid) | |
102 | { | |
103 | int i, gid, inuse_asid, limit; | |
104 | ||
43884604 | 105 | gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); |
9a0deecc JS |
106 | STAT(asid_next); |
107 | limit = MAX_ASID; | |
108 | if (asid >= limit) | |
109 | asid = gru_wrap_asid(gru); | |
87419412 | 110 | gru_flush_all_tlb(gru); |
9a0deecc JS |
111 | gid = gru->gs_gid; |
112 | again: | |
113 | for (i = 0; i < GRU_NUM_CCH; i++) { | |
836ce679 | 114 | if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i])) |
9a0deecc JS |
115 | continue; |
116 | inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; | |
43884604 JS |
117 | gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", |
118 | gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, | |
119 | inuse_asid, i); | |
9a0deecc JS |
120 | if (inuse_asid == asid) { |
121 | asid += ASID_INC; | |
122 | if (asid >= limit) { | |
123 | /* | |
124 | * empty range: reset the range limit and | |
125 | * start over | |
126 | */ | |
127 | limit = MAX_ASID; | |
128 | if (asid >= MAX_ASID) | |
129 | asid = gru_wrap_asid(gru); | |
130 | goto again; | |
131 | } | |
132 | } | |
133 | ||
134 | if ((inuse_asid > asid) && (inuse_asid < limit)) | |
135 | limit = inuse_asid; | |
136 | } | |
137 | gru->gs_asid_limit = limit; | |
138 | gru->gs_asid = asid; | |
43884604 JS |
139 | gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, |
140 | asid, limit); | |
9a0deecc JS |
141 | return asid; |
142 | } | |
143 | ||
144 | /* Assign a new ASID to a thread context. */ | |
145 | static int gru_assign_asid(struct gru_state *gru) | |
146 | { | |
147 | int asid; | |
148 | ||
9a0deecc JS |
149 | gru->gs_asid += ASID_INC; |
150 | asid = gru->gs_asid; | |
151 | if (asid >= gru->gs_asid_limit) | |
152 | asid = gru_reset_asid_limit(gru, asid); | |
9a0deecc | 153 | |
43884604 | 154 | gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); |
9a0deecc JS |
155 | return asid; |
156 | } | |
157 | ||
158 | /* | |
159 | * Clear n bits in a word. Return a word indicating the bits that were cleared. | |
160 | * Optionally, build an array of chars that contain the bit numbers allocated. | |
161 | */ | |
162 | static unsigned long reserve_resources(unsigned long *p, int n, int mmax, | |
163 | char *idx) | |
164 | { | |
165 | unsigned long bits = 0; | |
166 | int i; | |
167 | ||
3eac2e95 | 168 | while (n--) { |
9a0deecc JS |
169 | i = find_first_bit(p, mmax); |
170 | if (i == mmax) | |
171 | BUG(); | |
172 | __clear_bit(i, p); | |
173 | __set_bit(i, &bits); | |
174 | if (idx) | |
175 | *idx++ = i; | |
3eac2e95 | 176 | } |
9a0deecc JS |
177 | return bits; |
178 | } | |
179 | ||
9ca8e40c | 180 | unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, |
9a0deecc JS |
181 | char *cbmap) |
182 | { | |
183 | return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, | |
184 | cbmap); | |
185 | } | |
186 | ||
9ca8e40c | 187 | unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, |
9a0deecc JS |
188 | char *dsmap) |
189 | { | |
190 | return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, | |
191 | dsmap); | |
192 | } | |
193 | ||
194 | static void reserve_gru_resources(struct gru_state *gru, | |
195 | struct gru_thread_state *gts) | |
196 | { | |
197 | gru->gs_active_contexts++; | |
198 | gts->ts_cbr_map = | |
9ca8e40c | 199 | gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, |
9a0deecc JS |
200 | gts->ts_cbr_idx); |
201 | gts->ts_dsr_map = | |
9ca8e40c | 202 | gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); |
9a0deecc JS |
203 | } |
204 | ||
205 | static void free_gru_resources(struct gru_state *gru, | |
206 | struct gru_thread_state *gts) | |
207 | { | |
208 | gru->gs_active_contexts--; | |
209 | gru->gs_cbr_map |= gts->ts_cbr_map; | |
210 | gru->gs_dsr_map |= gts->ts_dsr_map; | |
211 | } | |
212 | ||
213 | /* | |
214 | * Check if a GRU has sufficient free resources to satisfy an allocation | |
215 | * request. Note: GRU locks may or may not be held when this is called. If | |
216 | * not held, recheck after acquiring the appropriate locks. | |
217 | * | |
218 | * Returns 1 if sufficient resources, 0 if not | |
219 | */ | |
220 | static int check_gru_resources(struct gru_state *gru, int cbr_au_count, | |
221 | int dsr_au_count, int max_active_contexts) | |
222 | { | |
223 | return hweight64(gru->gs_cbr_map) >= cbr_au_count | |
224 | && hweight64(gru->gs_dsr_map) >= dsr_au_count | |
225 | && gru->gs_active_contexts < max_active_contexts; | |
226 | } | |
227 | ||
228 | /* | |
229 | * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG | |
230 | * context. | |
231 | */ | |
43884604 JS |
232 | static int gru_load_mm_tracker(struct gru_state *gru, |
233 | struct gru_thread_state *gts) | |
9a0deecc | 234 | { |
43884604 | 235 | struct gru_mm_struct *gms = gts->ts_gms; |
9a0deecc | 236 | struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; |
43884604 | 237 | unsigned short ctxbitmap = (1 << gts->ts_ctxnum); |
9a0deecc JS |
238 | int asid; |
239 | ||
240 | spin_lock(&gms->ms_asid_lock); | |
241 | asid = asids->mt_asid; | |
242 | ||
87419412 JS |
243 | spin_lock(&gru->gs_asid_lock); |
244 | if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen != | |
245 | gru->gs_asid_gen)) { | |
9a0deecc JS |
246 | asid = gru_assign_asid(gru); |
247 | asids->mt_asid = asid; | |
248 | asids->mt_asid_gen = gru->gs_asid_gen; | |
249 | STAT(asid_new); | |
250 | } else { | |
251 | STAT(asid_reuse); | |
252 | } | |
87419412 | 253 | spin_unlock(&gru->gs_asid_lock); |
9a0deecc JS |
254 | |
255 | BUG_ON(asids->mt_ctxbitmap & ctxbitmap); | |
256 | asids->mt_ctxbitmap |= ctxbitmap; | |
257 | if (!test_bit(gru->gs_gid, gms->ms_asidmap)) | |
258 | __set_bit(gru->gs_gid, gms->ms_asidmap); | |
259 | spin_unlock(&gms->ms_asid_lock); | |
260 | ||
261 | gru_dbg(grudev, | |
43884604 JS |
262 | "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", |
263 | gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, | |
264 | gms->ms_asidmap[0]); | |
9a0deecc JS |
265 | return asid; |
266 | } | |
267 | ||
268 | static void gru_unload_mm_tracker(struct gru_state *gru, | |
43884604 | 269 | struct gru_thread_state *gts) |
9a0deecc | 270 | { |
43884604 | 271 | struct gru_mm_struct *gms = gts->ts_gms; |
9a0deecc JS |
272 | struct gru_mm_tracker *asids; |
273 | unsigned short ctxbitmap; | |
274 | ||
275 | asids = &gms->ms_asids[gru->gs_gid]; | |
43884604 | 276 | ctxbitmap = (1 << gts->ts_ctxnum); |
9a0deecc | 277 | spin_lock(&gms->ms_asid_lock); |
87419412 | 278 | spin_lock(&gru->gs_asid_lock); |
9a0deecc JS |
279 | BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); |
280 | asids->mt_ctxbitmap ^= ctxbitmap; | |
43884604 JS |
281 | gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", |
282 | gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); | |
87419412 | 283 | spin_unlock(&gru->gs_asid_lock); |
9a0deecc JS |
284 | spin_unlock(&gms->ms_asid_lock); |
285 | } | |
286 | ||
287 | /* | |
288 | * Decrement the reference count on a GTS structure. Free the structure | |
289 | * if the reference count goes to zero. | |
290 | */ | |
291 | void gts_drop(struct gru_thread_state *gts) | |
292 | { | |
293 | if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { | |
7f2251b1 JS |
294 | if (gts->ts_gms) |
295 | gru_drop_mmu_notifier(gts->ts_gms); | |
9a0deecc JS |
296 | kfree(gts); |
297 | STAT(gts_free); | |
298 | } | |
299 | } | |
300 | ||
301 | /* | |
302 | * Locate the GTS structure for the current thread. | |
303 | */ | |
304 | static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data | |
305 | *vdata, int tsid) | |
306 | { | |
307 | struct gru_thread_state *gts; | |
308 | ||
309 | list_for_each_entry(gts, &vdata->vd_head, ts_next) | |
310 | if (gts->ts_tsid == tsid) | |
311 | return gts; | |
312 | return NULL; | |
313 | } | |
314 | ||
315 | /* | |
316 | * Allocate a thread state structure. | |
317 | */ | |
364b76df JS |
318 | struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, |
319 | int cbr_au_count, int dsr_au_count, int options, int tsid) | |
9a0deecc JS |
320 | { |
321 | struct gru_thread_state *gts; | |
7f2251b1 | 322 | struct gru_mm_struct *gms; |
9a0deecc JS |
323 | int bytes; |
324 | ||
364b76df | 325 | bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); |
9a0deecc | 326 | bytes += sizeof(struct gru_thread_state); |
940229b9 | 327 | gts = kmalloc(bytes, GFP_KERNEL); |
9a0deecc | 328 | if (!gts) |
7f2251b1 | 329 | return ERR_PTR(-ENOMEM); |
9a0deecc JS |
330 | |
331 | STAT(gts_alloc); | |
940229b9 | 332 | memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */ |
9a0deecc JS |
333 | atomic_set(>s->ts_refcnt, 1); |
334 | mutex_init(>s->ts_ctxlock); | |
364b76df JS |
335 | gts->ts_cbr_au_count = cbr_au_count; |
336 | gts->ts_dsr_au_count = dsr_au_count; | |
337 | gts->ts_user_options = options; | |
518e5cd4 JS |
338 | gts->ts_user_blade_id = -1; |
339 | gts->ts_user_chiplet_id = -1; | |
9a0deecc | 340 | gts->ts_tsid = tsid; |
9a0deecc | 341 | gts->ts_ctxnum = NULLCTX; |
9a0deecc | 342 | gts->ts_tlb_int_select = -1; |
b1b19fcf | 343 | gts->ts_cch_req_slice = -1; |
7b8274e9 | 344 | gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT); |
364b76df JS |
345 | if (vma) { |
346 | gts->ts_mm = current->mm; | |
347 | gts->ts_vma = vma; | |
7f2251b1 JS |
348 | gms = gru_register_mmu_notifier(); |
349 | if (IS_ERR(gms)) | |
364b76df | 350 | goto err; |
7f2251b1 | 351 | gts->ts_gms = gms; |
364b76df | 352 | } |
9a0deecc | 353 | |
364b76df | 354 | gru_dbg(grudev, "alloc gts %p\n", gts); |
9a0deecc JS |
355 | return gts; |
356 | ||
357 | err: | |
358 | gts_drop(gts); | |
7f2251b1 | 359 | return ERR_CAST(gms); |
9a0deecc JS |
360 | } |
361 | ||
362 | /* | |
363 | * Allocate a vma private data structure. | |
364 | */ | |
365 | struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) | |
366 | { | |
367 | struct gru_vma_data *vdata = NULL; | |
368 | ||
369 | vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); | |
370 | if (!vdata) | |
371 | return NULL; | |
372 | ||
563447d7 | 373 | STAT(vdata_alloc); |
9a0deecc JS |
374 | INIT_LIST_HEAD(&vdata->vd_head); |
375 | spin_lock_init(&vdata->vd_lock); | |
376 | gru_dbg(grudev, "alloc vdata %p\n", vdata); | |
377 | return vdata; | |
378 | } | |
379 | ||
380 | /* | |
381 | * Find the thread state structure for the current thread. | |
382 | */ | |
383 | struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, | |
384 | int tsid) | |
385 | { | |
386 | struct gru_vma_data *vdata = vma->vm_private_data; | |
387 | struct gru_thread_state *gts; | |
388 | ||
389 | spin_lock(&vdata->vd_lock); | |
390 | gts = gru_find_current_gts_nolock(vdata, tsid); | |
391 | spin_unlock(&vdata->vd_lock); | |
392 | gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); | |
393 | return gts; | |
394 | } | |
395 | ||
396 | /* | |
397 | * Allocate a new thread state for a GSEG. Note that races may allow | |
398 | * another thread to race to create a gts. | |
399 | */ | |
400 | struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, | |
401 | int tsid) | |
402 | { | |
403 | struct gru_vma_data *vdata = vma->vm_private_data; | |
404 | struct gru_thread_state *gts, *ngts; | |
405 | ||
364b76df JS |
406 | gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count, |
407 | vdata->vd_user_options, tsid); | |
7f2251b1 JS |
408 | if (IS_ERR(gts)) |
409 | return gts; | |
9a0deecc JS |
410 | |
411 | spin_lock(&vdata->vd_lock); | |
412 | ngts = gru_find_current_gts_nolock(vdata, tsid); | |
413 | if (ngts) { | |
414 | gts_drop(gts); | |
415 | gts = ngts; | |
416 | STAT(gts_double_allocate); | |
417 | } else { | |
418 | list_add(>s->ts_next, &vdata->vd_head); | |
419 | } | |
420 | spin_unlock(&vdata->vd_lock); | |
421 | gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); | |
422 | return gts; | |
423 | } | |
424 | ||
425 | /* | |
426 | * Free the GRU context assigned to the thread state. | |
427 | */ | |
428 | static void gru_free_gru_context(struct gru_thread_state *gts) | |
429 | { | |
430 | struct gru_state *gru; | |
431 | ||
432 | gru = gts->ts_gru; | |
43884604 | 433 | gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); |
9a0deecc JS |
434 | |
435 | spin_lock(&gru->gs_lock); | |
436 | gru->gs_gts[gts->ts_ctxnum] = NULL; | |
437 | free_gru_resources(gru, gts); | |
438 | BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); | |
439 | __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); | |
440 | gts->ts_ctxnum = NULLCTX; | |
441 | gts->ts_gru = NULL; | |
87419412 | 442 | gts->ts_blade = -1; |
9a0deecc JS |
443 | spin_unlock(&gru->gs_lock); |
444 | ||
445 | gts_drop(gts); | |
446 | STAT(free_context); | |
447 | } | |
448 | ||
449 | /* | |
450 | * Prefetching cachelines help hardware performance. | |
9ca8e40c | 451 | * (Strictly a performance enhancement. Not functionally required). |
9a0deecc JS |
452 | */ |
453 | static void prefetch_data(void *p, int num, int stride) | |
454 | { | |
455 | while (num-- > 0) { | |
456 | prefetchw(p); | |
457 | p += stride; | |
458 | } | |
459 | } | |
460 | ||
461 | static inline long gru_copy_handle(void *d, void *s) | |
462 | { | |
463 | memcpy(d, s, GRU_HANDLE_BYTES); | |
464 | return GRU_HANDLE_BYTES; | |
465 | } | |
466 | ||
fe5bb6b0 JS |
467 | static void gru_prefetch_context(void *gseg, void *cb, void *cbe, |
468 | unsigned long cbrmap, unsigned long length) | |
9a0deecc | 469 | { |
9a0deecc JS |
470 | int i, scr; |
471 | ||
9a0deecc JS |
472 | prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, |
473 | GRU_CACHE_LINE_BYTES); | |
474 | ||
9a0deecc JS |
475 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { |
476 | prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); | |
477 | prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, | |
478 | GRU_CACHE_LINE_BYTES); | |
479 | cb += GRU_HANDLE_STRIDE; | |
480 | } | |
923f7f69 JS |
481 | } |
482 | ||
483 | static void gru_load_context_data(void *save, void *grubase, int ctxnum, | |
940229b9 JS |
484 | unsigned long cbrmap, unsigned long dsrmap, |
485 | int data_valid) | |
923f7f69 JS |
486 | { |
487 | void *gseg, *cb, *cbe; | |
488 | unsigned long length; | |
489 | int i, scr; | |
9a0deecc | 490 | |
923f7f69 | 491 | gseg = grubase + ctxnum * GRU_GSEG_STRIDE; |
9a0deecc | 492 | cb = gseg + GRU_CB_BASE; |
923f7f69 JS |
493 | cbe = grubase + GRU_CBE_BASE; |
494 | length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; | |
495 | gru_prefetch_context(gseg, cb, cbe, cbrmap, length); | |
496 | ||
9a0deecc | 497 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { |
940229b9 JS |
498 | if (data_valid) { |
499 | save += gru_copy_handle(cb, save); | |
500 | save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, | |
501 | save); | |
502 | } else { | |
503 | memset(cb, 0, GRU_CACHE_LINE_BYTES); | |
504 | memset(cbe + i * GRU_HANDLE_STRIDE, 0, | |
505 | GRU_CACHE_LINE_BYTES); | |
506 | } | |
67bf04a5 JS |
507 | /* Flush CBE to hide race in context restart */ |
508 | mb(); | |
509 | gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); | |
9a0deecc JS |
510 | cb += GRU_HANDLE_STRIDE; |
511 | } | |
512 | ||
940229b9 JS |
513 | if (data_valid) |
514 | memcpy(gseg + GRU_DS_BASE, save, length); | |
515 | else | |
516 | memset(gseg + GRU_DS_BASE, 0, length); | |
9a0deecc JS |
517 | } |
518 | ||
519 | static void gru_unload_context_data(void *save, void *grubase, int ctxnum, | |
520 | unsigned long cbrmap, unsigned long dsrmap) | |
521 | { | |
522 | void *gseg, *cb, *cbe; | |
523 | unsigned long length; | |
524 | int i, scr; | |
525 | ||
526 | gseg = grubase + ctxnum * GRU_GSEG_STRIDE; | |
9a0deecc JS |
527 | cb = gseg + GRU_CB_BASE; |
528 | cbe = grubase + GRU_CBE_BASE; | |
923f7f69 | 529 | length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; |
67bf04a5 JS |
530 | |
531 | /* CBEs may not be coherent. Flush them from cache */ | |
532 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) | |
533 | gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE); | |
534 | mb(); /* Let the CL flush complete */ | |
535 | ||
923f7f69 JS |
536 | gru_prefetch_context(gseg, cb, cbe, cbrmap, length); |
537 | ||
9a0deecc JS |
538 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { |
539 | save += gru_copy_handle(save, cb); | |
540 | save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); | |
541 | cb += GRU_HANDLE_STRIDE; | |
542 | } | |
9a0deecc JS |
543 | memcpy(save, gseg + GRU_DS_BASE, length); |
544 | } | |
545 | ||
546 | void gru_unload_context(struct gru_thread_state *gts, int savestate) | |
547 | { | |
548 | struct gru_state *gru = gts->ts_gru; | |
549 | struct gru_context_configuration_handle *cch; | |
550 | int ctxnum = gts->ts_ctxnum; | |
551 | ||
836ce679 JS |
552 | if (!is_kernel_context(gts)) |
553 | zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); | |
9a0deecc JS |
554 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); |
555 | ||
563447d7 JS |
556 | gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n", |
557 | gts, gts->ts_cbr_map, gts->ts_dsr_map); | |
9a0deecc JS |
558 | lock_cch_handle(cch); |
559 | if (cch_interrupt_sync(cch)) | |
560 | BUG(); | |
9a0deecc | 561 | |
836ce679 JS |
562 | if (!is_kernel_context(gts)) |
563 | gru_unload_mm_tracker(gru, gts); | |
940229b9 | 564 | if (savestate) { |
9a0deecc JS |
565 | gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, |
566 | ctxnum, gts->ts_cbr_map, | |
567 | gts->ts_dsr_map); | |
940229b9 JS |
568 | gts->ts_data_valid = 1; |
569 | } | |
9a0deecc JS |
570 | |
571 | if (cch_deallocate(cch)) | |
572 | BUG(); | |
9a0deecc JS |
573 | unlock_cch_handle(cch); |
574 | ||
575 | gru_free_gru_context(gts); | |
9a0deecc JS |
576 | } |
577 | ||
578 | /* | |
579 | * Load a GRU context by copying it from the thread data structure in memory | |
580 | * to the GRU. | |
581 | */ | |
d57c82b1 | 582 | void gru_load_context(struct gru_thread_state *gts) |
9a0deecc JS |
583 | { |
584 | struct gru_state *gru = gts->ts_gru; | |
585 | struct gru_context_configuration_handle *cch; | |
6e910074 | 586 | int i, err, asid, ctxnum = gts->ts_ctxnum; |
9a0deecc | 587 | |
9a0deecc | 588 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); |
9a0deecc | 589 | lock_cch_handle(cch); |
9a0deecc JS |
590 | cch->tfm_fault_bit_enable = |
591 | (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL | |
592 | || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); | |
593 | cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); | |
594 | if (cch->tlb_int_enable) { | |
595 | gts->ts_tlb_int_select = gru_cpu_fault_map_id(); | |
596 | cch->tlb_int_select = gts->ts_tlb_int_select; | |
597 | } | |
b1b19fcf JS |
598 | if (gts->ts_cch_req_slice >= 0) { |
599 | cch->req_slice_set_enable = 1; | |
600 | cch->req_slice = gts->ts_cch_req_slice; | |
601 | } else { | |
602 | cch->req_slice_set_enable =0; | |
603 | } | |
9a0deecc | 604 | cch->tfm_done_bit_enable = 0; |
6e910074 JS |
605 | cch->dsr_allocation_map = gts->ts_dsr_map; |
606 | cch->cbr_allocation_map = gts->ts_cbr_map; | |
836ce679 JS |
607 | |
608 | if (is_kernel_context(gts)) { | |
609 | cch->unmap_enable = 1; | |
4a7a17c1 JS |
610 | cch->tfm_done_bit_enable = 1; |
611 | cch->cb_int_enable = 1; | |
4107e1d3 | 612 | cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */ |
836ce679 JS |
613 | } else { |
614 | cch->unmap_enable = 0; | |
4a7a17c1 JS |
615 | cch->tfm_done_bit_enable = 0; |
616 | cch->cb_int_enable = 0; | |
836ce679 JS |
617 | asid = gru_load_mm_tracker(gru, gts); |
618 | for (i = 0; i < 8; i++) { | |
619 | cch->asid[i] = asid + i; | |
620 | cch->sizeavail[i] = gts->ts_sizeavail; | |
621 | } | |
6e910074 JS |
622 | } |
623 | ||
624 | err = cch_allocate(cch); | |
9a0deecc JS |
625 | if (err) { |
626 | gru_dbg(grudev, | |
627 | "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", | |
628 | err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); | |
629 | BUG(); | |
630 | } | |
631 | ||
632 | gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, | |
940229b9 | 633 | gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid); |
9a0deecc JS |
634 | |
635 | if (cch_start(cch)) | |
636 | BUG(); | |
637 | unlock_cch_handle(cch); | |
563447d7 JS |
638 | |
639 | gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n", | |
640 | gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map, | |
641 | (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select); | |
9a0deecc JS |
642 | } |
643 | ||
644 | /* | |
645 | * Update fields in an active CCH: | |
646 | * - retarget interrupts on local blade | |
7b8274e9 | 647 | * - update sizeavail mask |
9a0deecc | 648 | */ |
99f7c229 | 649 | int gru_update_cch(struct gru_thread_state *gts) |
9a0deecc JS |
650 | { |
651 | struct gru_context_configuration_handle *cch; | |
652 | struct gru_state *gru = gts->ts_gru; | |
653 | int i, ctxnum = gts->ts_ctxnum, ret = 0; | |
654 | ||
655 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); | |
656 | ||
657 | lock_cch_handle(cch); | |
658 | if (cch->state == CCHSTATE_ACTIVE) { | |
659 | if (gru->gs_gts[gts->ts_ctxnum] != gts) | |
660 | goto exit; | |
661 | if (cch_interrupt(cch)) | |
662 | BUG(); | |
99f7c229 JS |
663 | for (i = 0; i < 8; i++) |
664 | cch->sizeavail[i] = gts->ts_sizeavail; | |
665 | gts->ts_tlb_int_select = gru_cpu_fault_map_id(); | |
666 | cch->tlb_int_select = gru_cpu_fault_map_id(); | |
667 | cch->tfm_fault_bit_enable = | |
668 | (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL | |
669 | || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); | |
9a0deecc JS |
670 | if (cch_start(cch)) |
671 | BUG(); | |
672 | ret = 1; | |
673 | } | |
674 | exit: | |
675 | unlock_cch_handle(cch); | |
676 | return ret; | |
677 | } | |
678 | ||
679 | /* | |
680 | * Update CCH tlb interrupt select. Required when all the following is true: | |
681 | * - task's GRU context is loaded into a GRU | |
682 | * - task is using interrupt notification for TLB faults | |
683 | * - task has migrated to a different cpu on the same blade where | |
684 | * it was previously running. | |
685 | */ | |
686 | static int gru_retarget_intr(struct gru_thread_state *gts) | |
687 | { | |
688 | if (gts->ts_tlb_int_select < 0 | |
689 | || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) | |
690 | return 0; | |
691 | ||
692 | gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, | |
693 | gru_cpu_fault_map_id()); | |
99f7c229 | 694 | return gru_update_cch(gts); |
9a0deecc JS |
695 | } |
696 | ||
55484c45 JS |
697 | /* |
698 | * Unload the gru context if it is not assigned to the correct blade or | |
699 | * chiplet. Misassignment can occur if the process migrates to a different | |
700 | * blade or if the user changes the selected blade/chiplet. | |
701 | * Return 0 if context correct placed, otherwise 1 | |
702 | */ | |
703 | void gru_check_context_placement(struct gru_thread_state *gts) | |
704 | { | |
705 | struct gru_state *gru; | |
706 | int blade_id, chiplet_id; | |
707 | ||
708 | /* | |
709 | * If the current task is the context owner, verify that the | |
710 | * context is correctly placed. This test is skipped for non-owner | |
711 | * references. Pthread apps use non-owner references to the CBRs. | |
712 | */ | |
713 | gru = gts->ts_gru; | |
714 | if (!gru || gts->ts_tgid_owner != current->tgid) | |
715 | return; | |
716 | ||
717 | blade_id = gts->ts_user_blade_id; | |
718 | if (blade_id < 0) | |
719 | blade_id = uv_numa_blade_id(); | |
720 | ||
721 | chiplet_id = gts->ts_user_chiplet_id; | |
722 | if (gru->gs_blade_id != blade_id || | |
723 | (chiplet_id >= 0 && chiplet_id != gru->gs_chiplet_id)) { | |
724 | STAT(check_context_unload); | |
725 | gru_unload_context(gts, 1); | |
726 | } else if (gru_retarget_intr(gts)) { | |
727 | STAT(check_context_retarget_intr); | |
728 | } | |
729 | } | |
730 | ||
9a0deecc JS |
731 | |
732 | /* | |
733 | * Insufficient GRU resources available on the local blade. Steal a context from | |
734 | * a process. This is a hack until a _real_ resource scheduler is written.... | |
735 | */ | |
736 | #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) | |
737 | #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ | |
738 | ((g)+1) : &(b)->bs_grus[0]) | |
739 | ||
836ce679 JS |
740 | static int is_gts_stealable(struct gru_thread_state *gts, |
741 | struct gru_blade_state *bs) | |
742 | { | |
743 | if (is_kernel_context(gts)) | |
744 | return down_write_trylock(&bs->bs_kgts_sema); | |
745 | else | |
746 | return mutex_trylock(>s->ts_ctxlock); | |
747 | } | |
748 | ||
749 | static void gts_stolen(struct gru_thread_state *gts, | |
750 | struct gru_blade_state *bs) | |
751 | { | |
752 | if (is_kernel_context(gts)) { | |
753 | up_write(&bs->bs_kgts_sema); | |
754 | STAT(steal_kernel_context); | |
755 | } else { | |
756 | mutex_unlock(>s->ts_ctxlock); | |
757 | STAT(steal_user_context); | |
758 | } | |
759 | } | |
760 | ||
55484c45 | 761 | void gru_steal_context(struct gru_thread_state *gts) |
9a0deecc JS |
762 | { |
763 | struct gru_blade_state *blade; | |
764 | struct gru_state *gru, *gru0; | |
765 | struct gru_thread_state *ngts = NULL; | |
766 | int ctxnum, ctxnum0, flag = 0, cbr, dsr; | |
55484c45 JS |
767 | int blade_id = gts->ts_user_blade_id; |
768 | int chiplet_id = gts->ts_user_chiplet_id; | |
9a0deecc | 769 | |
55484c45 JS |
770 | if (blade_id < 0) |
771 | blade_id = uv_numa_blade_id(); | |
9a0deecc JS |
772 | cbr = gts->ts_cbr_au_count; |
773 | dsr = gts->ts_dsr_au_count; | |
774 | ||
364b76df | 775 | blade = gru_base[blade_id]; |
9a0deecc JS |
776 | spin_lock(&blade->bs_lock); |
777 | ||
778 | ctxnum = next_ctxnum(blade->bs_lru_ctxnum); | |
779 | gru = blade->bs_lru_gru; | |
780 | if (ctxnum == 0) | |
781 | gru = next_gru(blade, gru); | |
55484c45 JS |
782 | blade->bs_lru_gru = gru; |
783 | blade->bs_lru_ctxnum = ctxnum; | |
9a0deecc JS |
784 | ctxnum0 = ctxnum; |
785 | gru0 = gru; | |
786 | while (1) { | |
55484c45 JS |
787 | if (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id) { |
788 | if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) | |
9a0deecc | 789 | break; |
55484c45 JS |
790 | spin_lock(&gru->gs_lock); |
791 | for (; ctxnum < GRU_NUM_CCH; ctxnum++) { | |
792 | if (flag && gru == gru0 && ctxnum == ctxnum0) | |
793 | break; | |
794 | ngts = gru->gs_gts[ctxnum]; | |
795 | /* | |
796 | * We are grabbing locks out of order, so trylock is | |
797 | * needed. GTSs are usually not locked, so the odds of | |
798 | * success are high. If trylock fails, try to steal a | |
799 | * different GSEG. | |
800 | */ | |
801 | if (ngts && is_gts_stealable(ngts, blade)) | |
802 | break; | |
803 | ngts = NULL; | |
804 | } | |
805 | spin_unlock(&gru->gs_lock); | |
806 | if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) | |
9a0deecc | 807 | break; |
9a0deecc | 808 | } |
55484c45 | 809 | if (flag && gru == gru0) |
9a0deecc | 810 | break; |
55484c45 | 811 | flag = 1; |
9a0deecc JS |
812 | ctxnum = 0; |
813 | gru = next_gru(blade, gru); | |
814 | } | |
9a0deecc | 815 | spin_unlock(&blade->bs_lock); |
9a0deecc JS |
816 | |
817 | if (ngts) { | |
7e796a72 | 818 | gts->ustats.context_stolen++; |
9a0deecc | 819 | ngts->ts_steal_jiffies = jiffies; |
836ce679 JS |
820 | gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1); |
821 | gts_stolen(ngts, blade); | |
9a0deecc JS |
822 | } else { |
823 | STAT(steal_context_failed); | |
824 | } | |
825 | gru_dbg(grudev, | |
43884604 | 826 | "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" |
9a0deecc JS |
827 | " avail cb %ld, ds %ld\n", |
828 | gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), | |
829 | hweight64(gru->gs_dsr_map)); | |
830 | } | |
831 | ||
55484c45 JS |
832 | /* |
833 | * Assign a gru context. | |
834 | */ | |
835 | static int gru_assign_context_number(struct gru_state *gru) | |
836 | { | |
837 | int ctxnum; | |
838 | ||
839 | ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); | |
840 | __set_bit(ctxnum, &gru->gs_context_map); | |
841 | return ctxnum; | |
842 | } | |
843 | ||
9a0deecc JS |
844 | /* |
845 | * Scan the GRUs on the local blade & assign a GRU context. | |
846 | */ | |
55484c45 | 847 | struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) |
9a0deecc JS |
848 | { |
849 | struct gru_state *gru, *grux; | |
850 | int i, max_active_contexts; | |
55484c45 JS |
851 | int blade_id = gts->ts_user_blade_id; |
852 | int chiplet_id = gts->ts_user_chiplet_id; | |
9a0deecc | 853 | |
55484c45 JS |
854 | if (blade_id < 0) |
855 | blade_id = uv_numa_blade_id(); | |
9a0deecc JS |
856 | again: |
857 | gru = NULL; | |
858 | max_active_contexts = GRU_NUM_CCH; | |
55484c45 JS |
859 | for_each_gru_on_blade(grux, blade_id, i) { |
860 | if (chiplet_id >= 0 && chiplet_id != grux->gs_chiplet_id) | |
861 | continue; | |
9a0deecc JS |
862 | if (check_gru_resources(grux, gts->ts_cbr_au_count, |
863 | gts->ts_dsr_au_count, | |
864 | max_active_contexts)) { | |
865 | gru = grux; | |
866 | max_active_contexts = grux->gs_active_contexts; | |
867 | if (max_active_contexts == 0) | |
868 | break; | |
869 | } | |
870 | } | |
871 | ||
872 | if (gru) { | |
873 | spin_lock(&gru->gs_lock); | |
874 | if (!check_gru_resources(gru, gts->ts_cbr_au_count, | |
875 | gts->ts_dsr_au_count, GRU_NUM_CCH)) { | |
876 | spin_unlock(&gru->gs_lock); | |
877 | goto again; | |
878 | } | |
879 | reserve_gru_resources(gru, gts); | |
880 | gts->ts_gru = gru; | |
87419412 | 881 | gts->ts_blade = gru->gs_blade_id; |
55484c45 | 882 | gts->ts_ctxnum = gru_assign_context_number(gru); |
9a0deecc JS |
883 | atomic_inc(>s->ts_refcnt); |
884 | gru->gs_gts[gts->ts_ctxnum] = gts; | |
9a0deecc JS |
885 | spin_unlock(&gru->gs_lock); |
886 | ||
887 | STAT(assign_context); | |
888 | gru_dbg(grudev, | |
43884604 | 889 | "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", |
9a0deecc JS |
890 | gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, |
891 | gts->ts_gru->gs_gid, gts->ts_ctxnum, | |
892 | gts->ts_cbr_au_count, gts->ts_dsr_au_count); | |
893 | } else { | |
894 | gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); | |
895 | STAT(assign_context_failed); | |
896 | } | |
897 | ||
9a0deecc JS |
898 | return gru; |
899 | } | |
900 | ||
901 | /* | |
902 | * gru_nopage | |
903 | * | |
904 | * Map the user's GRU segment | |
9ca8e40c JS |
905 | * |
906 | * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. | |
9a0deecc JS |
907 | */ |
908 | int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
909 | { | |
910 | struct gru_thread_state *gts; | |
911 | unsigned long paddr, vaddr; | |
912 | ||
913 | vaddr = (unsigned long)vmf->virtual_address; | |
914 | gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", | |
915 | vma, vaddr, GSEG_BASE(vaddr)); | |
916 | STAT(nopfn); | |
917 | ||
9ca8e40c | 918 | /* The following check ensures vaddr is a valid address in the VMA */ |
9a0deecc JS |
919 | gts = gru_find_thread_state(vma, TSID(vaddr, vma)); |
920 | if (!gts) | |
921 | return VM_FAULT_SIGBUS; | |
922 | ||
923 | again: | |
9a0deecc | 924 | mutex_lock(>s->ts_ctxlock); |
fe5bb6b0 | 925 | preempt_disable(); |
364b76df | 926 | |
55484c45 | 927 | gru_check_context_placement(gts); |
9a0deecc JS |
928 | |
929 | if (!gts->ts_gru) { | |
836ce679 | 930 | STAT(load_user_context); |
55484c45 | 931 | if (!gru_assign_gru_context(gts)) { |
9a0deecc | 932 | preempt_enable(); |
364b76df JS |
933 | mutex_unlock(>s->ts_ctxlock); |
934 | set_current_state(TASK_INTERRUPTIBLE); | |
9a0deecc JS |
935 | schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ |
936 | if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) | |
55484c45 | 937 | gru_steal_context(gts); |
9a0deecc JS |
938 | goto again; |
939 | } | |
940 | gru_load_context(gts); | |
941 | paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); | |
942 | remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), | |
943 | paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, | |
944 | vma->vm_page_prot); | |
945 | } | |
946 | ||
9a0deecc | 947 | preempt_enable(); |
364b76df | 948 | mutex_unlock(>s->ts_ctxlock); |
9a0deecc JS |
949 | |
950 | return VM_FAULT_NOPAGE; | |
951 | } | |
952 |