Commit | Line | Data |
---|---|---|
9a0deecc JS |
1 | /* |
2 | * SN Platform GRU Driver | |
3 | * | |
4 | * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD | |
5 | * | |
6 | * This file is subject to the terms and conditions of the GNU General Public | |
7 | * License. See the file "COPYING" in the main directory of this archive | |
8 | * for more details. | |
9 | * | |
10 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/list.h> | |
20 | #include <asm/uv/uv_hub.h> | |
21 | #include "gru.h" | |
22 | #include "grutables.h" | |
23 | #include "gruhandles.h" | |
24 | ||
9ca8e40c | 25 | unsigned long gru_options __read_mostly; |
9a0deecc JS |
26 | |
27 | static struct device_driver gru_driver = { | |
28 | .name = "gru" | |
29 | }; | |
30 | ||
31 | static struct device gru_device = { | |
bb0dc43e | 32 | .init_name = "", |
9a0deecc JS |
33 | .driver = &gru_driver, |
34 | }; | |
35 | ||
36 | struct device *grudev = &gru_device; | |
37 | ||
38 | /* | |
39 | * Select a gru fault map to be used by the current cpu. Note that | |
40 | * multiple cpus may be using the same map. | |
41 | * ZZZ should "shift" be used?? Depends on HT cpu numbering | |
42 | * ZZZ should be inline but did not work on emulator | |
43 | */ | |
44 | int gru_cpu_fault_map_id(void) | |
45 | { | |
46 | return uv_blade_processor_id() % GRU_NUM_TFM; | |
47 | } | |
48 | ||
49 | /*--------- ASID Management ------------------------------------------- | |
50 | * | |
51 | * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. | |
52 | * Once MAX is reached, flush the TLB & start over. However, | |
53 | * some asids may still be in use. There won't be many (percentage wise) still | |
54 | * in use. Search active contexts & determine the value of the first | |
55 | * asid in use ("x"s below). Set "limit" to this value. | |
56 | * This defines a block of assignable asids. | |
57 | * | |
58 | * When "limit" is reached, search forward from limit+1 and determine the | |
59 | * next block of assignable asids. | |
60 | * | |
61 | * Repeat until MAX_ASID is reached, then start over again. | |
62 | * | |
63 | * Each time MAX_ASID is reached, increment the asid generation. Since | |
64 | * the search for in-use asids only checks contexts with GRUs currently | |
65 | * assigned, asids in some contexts will be missed. Prior to loading | |
66 | * a context, the asid generation of the GTS asid is rechecked. If it | |
67 | * doesn't match the current generation, a new asid will be assigned. | |
68 | * | |
69 | * 0---------------x------------x---------------------x----| | |
70 | * ^-next ^-limit ^-MAX_ASID | |
71 | * | |
72 | * All asid manipulation & context loading/unloading is protected by the | |
73 | * gs_lock. | |
74 | */ | |
75 | ||
76 | /* Hit the asid limit. Start over */ | |
77 | static int gru_wrap_asid(struct gru_state *gru) | |
78 | { | |
43884604 | 79 | gru_dbg(grudev, "gid %d\n", gru->gs_gid); |
9a0deecc JS |
80 | STAT(asid_wrap); |
81 | gru->gs_asid_gen++; | |
82 | gru_flush_all_tlb(gru); | |
83 | return MIN_ASID; | |
84 | } | |
85 | ||
86 | /* Find the next chunk of unused asids */ | |
87 | static int gru_reset_asid_limit(struct gru_state *gru, int asid) | |
88 | { | |
89 | int i, gid, inuse_asid, limit; | |
90 | ||
43884604 | 91 | gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); |
9a0deecc JS |
92 | STAT(asid_next); |
93 | limit = MAX_ASID; | |
94 | if (asid >= limit) | |
95 | asid = gru_wrap_asid(gru); | |
96 | gid = gru->gs_gid; | |
97 | again: | |
98 | for (i = 0; i < GRU_NUM_CCH; i++) { | |
99 | if (!gru->gs_gts[i]) | |
100 | continue; | |
101 | inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; | |
43884604 JS |
102 | gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", |
103 | gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms, | |
104 | inuse_asid, i); | |
9a0deecc JS |
105 | if (inuse_asid == asid) { |
106 | asid += ASID_INC; | |
107 | if (asid >= limit) { | |
108 | /* | |
109 | * empty range: reset the range limit and | |
110 | * start over | |
111 | */ | |
112 | limit = MAX_ASID; | |
113 | if (asid >= MAX_ASID) | |
114 | asid = gru_wrap_asid(gru); | |
115 | goto again; | |
116 | } | |
117 | } | |
118 | ||
119 | if ((inuse_asid > asid) && (inuse_asid < limit)) | |
120 | limit = inuse_asid; | |
121 | } | |
122 | gru->gs_asid_limit = limit; | |
123 | gru->gs_asid = asid; | |
43884604 JS |
124 | gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid, |
125 | asid, limit); | |
9a0deecc JS |
126 | return asid; |
127 | } | |
128 | ||
129 | /* Assign a new ASID to a thread context. */ | |
130 | static int gru_assign_asid(struct gru_state *gru) | |
131 | { | |
132 | int asid; | |
133 | ||
134 | spin_lock(&gru->gs_asid_lock); | |
135 | gru->gs_asid += ASID_INC; | |
136 | asid = gru->gs_asid; | |
137 | if (asid >= gru->gs_asid_limit) | |
138 | asid = gru_reset_asid_limit(gru, asid); | |
139 | spin_unlock(&gru->gs_asid_lock); | |
140 | ||
43884604 | 141 | gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid); |
9a0deecc JS |
142 | return asid; |
143 | } | |
144 | ||
145 | /* | |
146 | * Clear n bits in a word. Return a word indicating the bits that were cleared. | |
147 | * Optionally, build an array of chars that contain the bit numbers allocated. | |
148 | */ | |
149 | static unsigned long reserve_resources(unsigned long *p, int n, int mmax, | |
150 | char *idx) | |
151 | { | |
152 | unsigned long bits = 0; | |
153 | int i; | |
154 | ||
155 | do { | |
156 | i = find_first_bit(p, mmax); | |
157 | if (i == mmax) | |
158 | BUG(); | |
159 | __clear_bit(i, p); | |
160 | __set_bit(i, &bits); | |
161 | if (idx) | |
162 | *idx++ = i; | |
163 | } while (--n); | |
164 | return bits; | |
165 | } | |
166 | ||
9ca8e40c | 167 | unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, |
9a0deecc JS |
168 | char *cbmap) |
169 | { | |
170 | return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, | |
171 | cbmap); | |
172 | } | |
173 | ||
9ca8e40c | 174 | unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, |
9a0deecc JS |
175 | char *dsmap) |
176 | { | |
177 | return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, | |
178 | dsmap); | |
179 | } | |
180 | ||
181 | static void reserve_gru_resources(struct gru_state *gru, | |
182 | struct gru_thread_state *gts) | |
183 | { | |
184 | gru->gs_active_contexts++; | |
185 | gts->ts_cbr_map = | |
9ca8e40c | 186 | gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, |
9a0deecc JS |
187 | gts->ts_cbr_idx); |
188 | gts->ts_dsr_map = | |
9ca8e40c | 189 | gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); |
9a0deecc JS |
190 | } |
191 | ||
192 | static void free_gru_resources(struct gru_state *gru, | |
193 | struct gru_thread_state *gts) | |
194 | { | |
195 | gru->gs_active_contexts--; | |
196 | gru->gs_cbr_map |= gts->ts_cbr_map; | |
197 | gru->gs_dsr_map |= gts->ts_dsr_map; | |
198 | } | |
199 | ||
200 | /* | |
201 | * Check if a GRU has sufficient free resources to satisfy an allocation | |
202 | * request. Note: GRU locks may or may not be held when this is called. If | |
203 | * not held, recheck after acquiring the appropriate locks. | |
204 | * | |
205 | * Returns 1 if sufficient resources, 0 if not | |
206 | */ | |
207 | static int check_gru_resources(struct gru_state *gru, int cbr_au_count, | |
208 | int dsr_au_count, int max_active_contexts) | |
209 | { | |
210 | return hweight64(gru->gs_cbr_map) >= cbr_au_count | |
211 | && hweight64(gru->gs_dsr_map) >= dsr_au_count | |
212 | && gru->gs_active_contexts < max_active_contexts; | |
213 | } | |
214 | ||
215 | /* | |
216 | * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG | |
217 | * context. | |
218 | */ | |
43884604 JS |
219 | static int gru_load_mm_tracker(struct gru_state *gru, |
220 | struct gru_thread_state *gts) | |
9a0deecc | 221 | { |
43884604 | 222 | struct gru_mm_struct *gms = gts->ts_gms; |
9a0deecc | 223 | struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; |
43884604 | 224 | unsigned short ctxbitmap = (1 << gts->ts_ctxnum); |
9a0deecc JS |
225 | int asid; |
226 | ||
227 | spin_lock(&gms->ms_asid_lock); | |
228 | asid = asids->mt_asid; | |
229 | ||
230 | if (asid == 0 || asids->mt_asid_gen != gru->gs_asid_gen) { | |
231 | asid = gru_assign_asid(gru); | |
232 | asids->mt_asid = asid; | |
233 | asids->mt_asid_gen = gru->gs_asid_gen; | |
234 | STAT(asid_new); | |
235 | } else { | |
236 | STAT(asid_reuse); | |
237 | } | |
238 | ||
239 | BUG_ON(asids->mt_ctxbitmap & ctxbitmap); | |
240 | asids->mt_ctxbitmap |= ctxbitmap; | |
241 | if (!test_bit(gru->gs_gid, gms->ms_asidmap)) | |
242 | __set_bit(gru->gs_gid, gms->ms_asidmap); | |
243 | spin_unlock(&gms->ms_asid_lock); | |
244 | ||
245 | gru_dbg(grudev, | |
43884604 JS |
246 | "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n", |
247 | gru->gs_gid, gts, gms, gts->ts_ctxnum, asid, | |
248 | gms->ms_asidmap[0]); | |
9a0deecc JS |
249 | return asid; |
250 | } | |
251 | ||
252 | static void gru_unload_mm_tracker(struct gru_state *gru, | |
43884604 | 253 | struct gru_thread_state *gts) |
9a0deecc | 254 | { |
43884604 | 255 | struct gru_mm_struct *gms = gts->ts_gms; |
9a0deecc JS |
256 | struct gru_mm_tracker *asids; |
257 | unsigned short ctxbitmap; | |
258 | ||
259 | asids = &gms->ms_asids[gru->gs_gid]; | |
43884604 | 260 | ctxbitmap = (1 << gts->ts_ctxnum); |
9a0deecc JS |
261 | spin_lock(&gms->ms_asid_lock); |
262 | BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); | |
263 | asids->mt_ctxbitmap ^= ctxbitmap; | |
43884604 JS |
264 | gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", |
265 | gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]); | |
9a0deecc JS |
266 | spin_unlock(&gms->ms_asid_lock); |
267 | } | |
268 | ||
269 | /* | |
270 | * Decrement the reference count on a GTS structure. Free the structure | |
271 | * if the reference count goes to zero. | |
272 | */ | |
273 | void gts_drop(struct gru_thread_state *gts) | |
274 | { | |
275 | if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { | |
276 | gru_drop_mmu_notifier(gts->ts_gms); | |
277 | kfree(gts); | |
278 | STAT(gts_free); | |
279 | } | |
280 | } | |
281 | ||
282 | /* | |
283 | * Locate the GTS structure for the current thread. | |
284 | */ | |
285 | static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data | |
286 | *vdata, int tsid) | |
287 | { | |
288 | struct gru_thread_state *gts; | |
289 | ||
290 | list_for_each_entry(gts, &vdata->vd_head, ts_next) | |
291 | if (gts->ts_tsid == tsid) | |
292 | return gts; | |
293 | return NULL; | |
294 | } | |
295 | ||
296 | /* | |
297 | * Allocate a thread state structure. | |
298 | */ | |
299 | static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | |
300 | struct gru_vma_data *vdata, | |
301 | int tsid) | |
302 | { | |
303 | struct gru_thread_state *gts; | |
304 | int bytes; | |
305 | ||
306 | bytes = DSR_BYTES(vdata->vd_dsr_au_count) + | |
307 | CBR_BYTES(vdata->vd_cbr_au_count); | |
308 | bytes += sizeof(struct gru_thread_state); | |
309 | gts = kzalloc(bytes, GFP_KERNEL); | |
310 | if (!gts) | |
311 | return NULL; | |
312 | ||
313 | STAT(gts_alloc); | |
314 | atomic_set(>s->ts_refcnt, 1); | |
315 | mutex_init(>s->ts_ctxlock); | |
316 | gts->ts_cbr_au_count = vdata->vd_cbr_au_count; | |
317 | gts->ts_dsr_au_count = vdata->vd_dsr_au_count; | |
318 | gts->ts_user_options = vdata->vd_user_options; | |
319 | gts->ts_tsid = tsid; | |
320 | gts->ts_user_options = vdata->vd_user_options; | |
321 | gts->ts_ctxnum = NULLCTX; | |
322 | gts->ts_mm = current->mm; | |
323 | gts->ts_vma = vma; | |
324 | gts->ts_tlb_int_select = -1; | |
325 | gts->ts_gms = gru_register_mmu_notifier(); | |
326 | if (!gts->ts_gms) | |
327 | goto err; | |
328 | ||
329 | gru_dbg(grudev, "alloc vdata %p, new gts %p\n", vdata, gts); | |
330 | return gts; | |
331 | ||
332 | err: | |
333 | gts_drop(gts); | |
334 | return NULL; | |
335 | } | |
336 | ||
337 | /* | |
338 | * Allocate a vma private data structure. | |
339 | */ | |
340 | struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) | |
341 | { | |
342 | struct gru_vma_data *vdata = NULL; | |
343 | ||
344 | vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); | |
345 | if (!vdata) | |
346 | return NULL; | |
347 | ||
348 | INIT_LIST_HEAD(&vdata->vd_head); | |
349 | spin_lock_init(&vdata->vd_lock); | |
350 | gru_dbg(grudev, "alloc vdata %p\n", vdata); | |
351 | return vdata; | |
352 | } | |
353 | ||
354 | /* | |
355 | * Find the thread state structure for the current thread. | |
356 | */ | |
357 | struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, | |
358 | int tsid) | |
359 | { | |
360 | struct gru_vma_data *vdata = vma->vm_private_data; | |
361 | struct gru_thread_state *gts; | |
362 | ||
363 | spin_lock(&vdata->vd_lock); | |
364 | gts = gru_find_current_gts_nolock(vdata, tsid); | |
365 | spin_unlock(&vdata->vd_lock); | |
366 | gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); | |
367 | return gts; | |
368 | } | |
369 | ||
370 | /* | |
371 | * Allocate a new thread state for a GSEG. Note that races may allow | |
372 | * another thread to race to create a gts. | |
373 | */ | |
374 | struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, | |
375 | int tsid) | |
376 | { | |
377 | struct gru_vma_data *vdata = vma->vm_private_data; | |
378 | struct gru_thread_state *gts, *ngts; | |
379 | ||
380 | gts = gru_alloc_gts(vma, vdata, tsid); | |
381 | if (!gts) | |
382 | return NULL; | |
383 | ||
384 | spin_lock(&vdata->vd_lock); | |
385 | ngts = gru_find_current_gts_nolock(vdata, tsid); | |
386 | if (ngts) { | |
387 | gts_drop(gts); | |
388 | gts = ngts; | |
389 | STAT(gts_double_allocate); | |
390 | } else { | |
391 | list_add(>s->ts_next, &vdata->vd_head); | |
392 | } | |
393 | spin_unlock(&vdata->vd_lock); | |
394 | gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); | |
395 | return gts; | |
396 | } | |
397 | ||
398 | /* | |
399 | * Free the GRU context assigned to the thread state. | |
400 | */ | |
401 | static void gru_free_gru_context(struct gru_thread_state *gts) | |
402 | { | |
403 | struct gru_state *gru; | |
404 | ||
405 | gru = gts->ts_gru; | |
43884604 | 406 | gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid); |
9a0deecc JS |
407 | |
408 | spin_lock(&gru->gs_lock); | |
409 | gru->gs_gts[gts->ts_ctxnum] = NULL; | |
410 | free_gru_resources(gru, gts); | |
411 | BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); | |
412 | __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); | |
413 | gts->ts_ctxnum = NULLCTX; | |
414 | gts->ts_gru = NULL; | |
415 | spin_unlock(&gru->gs_lock); | |
416 | ||
417 | gts_drop(gts); | |
418 | STAT(free_context); | |
419 | } | |
420 | ||
421 | /* | |
422 | * Prefetching cachelines help hardware performance. | |
9ca8e40c | 423 | * (Strictly a performance enhancement. Not functionally required). |
9a0deecc JS |
424 | */ |
425 | static void prefetch_data(void *p, int num, int stride) | |
426 | { | |
427 | while (num-- > 0) { | |
428 | prefetchw(p); | |
429 | p += stride; | |
430 | } | |
431 | } | |
432 | ||
433 | static inline long gru_copy_handle(void *d, void *s) | |
434 | { | |
435 | memcpy(d, s, GRU_HANDLE_BYTES); | |
436 | return GRU_HANDLE_BYTES; | |
437 | } | |
438 | ||
fe5bb6b0 JS |
439 | static void gru_prefetch_context(void *gseg, void *cb, void *cbe, |
440 | unsigned long cbrmap, unsigned long length) | |
9a0deecc | 441 | { |
9a0deecc JS |
442 | int i, scr; |
443 | ||
9a0deecc JS |
444 | prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, |
445 | GRU_CACHE_LINE_BYTES); | |
446 | ||
9a0deecc JS |
447 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { |
448 | prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); | |
449 | prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, | |
450 | GRU_CACHE_LINE_BYTES); | |
451 | cb += GRU_HANDLE_STRIDE; | |
452 | } | |
923f7f69 JS |
453 | } |
454 | ||
455 | static void gru_load_context_data(void *save, void *grubase, int ctxnum, | |
456 | unsigned long cbrmap, unsigned long dsrmap) | |
457 | { | |
458 | void *gseg, *cb, *cbe; | |
459 | unsigned long length; | |
460 | int i, scr; | |
9a0deecc | 461 | |
923f7f69 | 462 | gseg = grubase + ctxnum * GRU_GSEG_STRIDE; |
9a0deecc | 463 | cb = gseg + GRU_CB_BASE; |
923f7f69 JS |
464 | cbe = grubase + GRU_CBE_BASE; |
465 | length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; | |
466 | gru_prefetch_context(gseg, cb, cbe, cbrmap, length); | |
467 | ||
9a0deecc JS |
468 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { |
469 | save += gru_copy_handle(cb, save); | |
470 | save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save); | |
471 | cb += GRU_HANDLE_STRIDE; | |
472 | } | |
473 | ||
474 | memcpy(gseg + GRU_DS_BASE, save, length); | |
475 | } | |
476 | ||
477 | static void gru_unload_context_data(void *save, void *grubase, int ctxnum, | |
478 | unsigned long cbrmap, unsigned long dsrmap) | |
479 | { | |
480 | void *gseg, *cb, *cbe; | |
481 | unsigned long length; | |
482 | int i, scr; | |
483 | ||
484 | gseg = grubase + ctxnum * GRU_GSEG_STRIDE; | |
9a0deecc JS |
485 | cb = gseg + GRU_CB_BASE; |
486 | cbe = grubase + GRU_CBE_BASE; | |
923f7f69 JS |
487 | length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; |
488 | gru_prefetch_context(gseg, cb, cbe, cbrmap, length); | |
489 | ||
9a0deecc JS |
490 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { |
491 | save += gru_copy_handle(save, cb); | |
492 | save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); | |
493 | cb += GRU_HANDLE_STRIDE; | |
494 | } | |
9a0deecc JS |
495 | memcpy(save, gseg + GRU_DS_BASE, length); |
496 | } | |
497 | ||
498 | void gru_unload_context(struct gru_thread_state *gts, int savestate) | |
499 | { | |
500 | struct gru_state *gru = gts->ts_gru; | |
501 | struct gru_context_configuration_handle *cch; | |
502 | int ctxnum = gts->ts_ctxnum; | |
503 | ||
504 | zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); | |
505 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); | |
506 | ||
43884604 | 507 | gru_dbg(grudev, "gts %p\n", gts); |
9a0deecc JS |
508 | lock_cch_handle(cch); |
509 | if (cch_interrupt_sync(cch)) | |
510 | BUG(); | |
9a0deecc | 511 | |
43884604 | 512 | gru_unload_mm_tracker(gru, gts); |
9a0deecc JS |
513 | if (savestate) |
514 | gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, | |
515 | ctxnum, gts->ts_cbr_map, | |
516 | gts->ts_dsr_map); | |
517 | ||
518 | if (cch_deallocate(cch)) | |
519 | BUG(); | |
520 | gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */ | |
521 | unlock_cch_handle(cch); | |
522 | ||
523 | gru_free_gru_context(gts); | |
524 | STAT(unload_context); | |
525 | } | |
526 | ||
527 | /* | |
528 | * Load a GRU context by copying it from the thread data structure in memory | |
529 | * to the GRU. | |
530 | */ | |
531 | static void gru_load_context(struct gru_thread_state *gts) | |
532 | { | |
533 | struct gru_state *gru = gts->ts_gru; | |
534 | struct gru_context_configuration_handle *cch; | |
535 | int err, asid, ctxnum = gts->ts_ctxnum; | |
536 | ||
537 | gru_dbg(grudev, "gts %p\n", gts); | |
538 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); | |
539 | ||
540 | lock_cch_handle(cch); | |
43884604 | 541 | asid = gru_load_mm_tracker(gru, gts); |
9a0deecc JS |
542 | cch->tfm_fault_bit_enable = |
543 | (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL | |
544 | || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); | |
545 | cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); | |
546 | if (cch->tlb_int_enable) { | |
547 | gts->ts_tlb_int_select = gru_cpu_fault_map_id(); | |
548 | cch->tlb_int_select = gts->ts_tlb_int_select; | |
549 | } | |
550 | cch->tfm_done_bit_enable = 0; | |
551 | err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map); | |
552 | if (err) { | |
553 | gru_dbg(grudev, | |
554 | "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", | |
555 | err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); | |
556 | BUG(); | |
557 | } | |
558 | ||
559 | gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, | |
560 | gts->ts_cbr_map, gts->ts_dsr_map); | |
561 | ||
562 | if (cch_start(cch)) | |
563 | BUG(); | |
564 | unlock_cch_handle(cch); | |
565 | ||
566 | STAT(load_context); | |
567 | } | |
568 | ||
569 | /* | |
570 | * Update fields in an active CCH: | |
571 | * - retarget interrupts on local blade | |
572 | * - force a delayed context unload by clearing the CCH asids. This | |
573 | * forces TLB misses for new GRU instructions. The context is unloaded | |
574 | * when the next TLB miss occurs. | |
575 | */ | |
576 | static int gru_update_cch(struct gru_thread_state *gts, int int_select) | |
577 | { | |
578 | struct gru_context_configuration_handle *cch; | |
579 | struct gru_state *gru = gts->ts_gru; | |
580 | int i, ctxnum = gts->ts_ctxnum, ret = 0; | |
581 | ||
582 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); | |
583 | ||
584 | lock_cch_handle(cch); | |
585 | if (cch->state == CCHSTATE_ACTIVE) { | |
586 | if (gru->gs_gts[gts->ts_ctxnum] != gts) | |
587 | goto exit; | |
588 | if (cch_interrupt(cch)) | |
589 | BUG(); | |
590 | if (int_select >= 0) { | |
591 | gts->ts_tlb_int_select = int_select; | |
592 | cch->tlb_int_select = int_select; | |
593 | } else { | |
594 | for (i = 0; i < 8; i++) | |
595 | cch->asid[i] = 0; | |
596 | cch->tfm_fault_bit_enable = 0; | |
597 | cch->tlb_int_enable = 0; | |
598 | gts->ts_force_unload = 1; | |
599 | } | |
600 | if (cch_start(cch)) | |
601 | BUG(); | |
602 | ret = 1; | |
603 | } | |
604 | exit: | |
605 | unlock_cch_handle(cch); | |
606 | return ret; | |
607 | } | |
608 | ||
609 | /* | |
610 | * Update CCH tlb interrupt select. Required when all the following is true: | |
611 | * - task's GRU context is loaded into a GRU | |
612 | * - task is using interrupt notification for TLB faults | |
613 | * - task has migrated to a different cpu on the same blade where | |
614 | * it was previously running. | |
615 | */ | |
616 | static int gru_retarget_intr(struct gru_thread_state *gts) | |
617 | { | |
618 | if (gts->ts_tlb_int_select < 0 | |
619 | || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) | |
620 | return 0; | |
621 | ||
622 | gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, | |
623 | gru_cpu_fault_map_id()); | |
624 | return gru_update_cch(gts, gru_cpu_fault_map_id()); | |
625 | } | |
626 | ||
627 | ||
628 | /* | |
629 | * Insufficient GRU resources available on the local blade. Steal a context from | |
630 | * a process. This is a hack until a _real_ resource scheduler is written.... | |
631 | */ | |
632 | #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) | |
633 | #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ | |
634 | ((g)+1) : &(b)->bs_grus[0]) | |
635 | ||
636 | static void gru_steal_context(struct gru_thread_state *gts) | |
637 | { | |
638 | struct gru_blade_state *blade; | |
639 | struct gru_state *gru, *gru0; | |
640 | struct gru_thread_state *ngts = NULL; | |
641 | int ctxnum, ctxnum0, flag = 0, cbr, dsr; | |
642 | ||
643 | cbr = gts->ts_cbr_au_count; | |
644 | dsr = gts->ts_dsr_au_count; | |
645 | ||
646 | preempt_disable(); | |
647 | blade = gru_base[uv_numa_blade_id()]; | |
648 | spin_lock(&blade->bs_lock); | |
649 | ||
650 | ctxnum = next_ctxnum(blade->bs_lru_ctxnum); | |
651 | gru = blade->bs_lru_gru; | |
652 | if (ctxnum == 0) | |
653 | gru = next_gru(blade, gru); | |
654 | ctxnum0 = ctxnum; | |
655 | gru0 = gru; | |
656 | while (1) { | |
657 | if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) | |
658 | break; | |
659 | spin_lock(&gru->gs_lock); | |
660 | for (; ctxnum < GRU_NUM_CCH; ctxnum++) { | |
661 | if (flag && gru == gru0 && ctxnum == ctxnum0) | |
662 | break; | |
663 | ngts = gru->gs_gts[ctxnum]; | |
664 | /* | |
665 | * We are grabbing locks out of order, so trylock is | |
666 | * needed. GTSs are usually not locked, so the odds of | |
667 | * success are high. If trylock fails, try to steal a | |
668 | * different GSEG. | |
669 | */ | |
670 | if (ngts && mutex_trylock(&ngts->ts_ctxlock)) | |
671 | break; | |
672 | ngts = NULL; | |
673 | flag = 1; | |
674 | } | |
675 | spin_unlock(&gru->gs_lock); | |
676 | if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) | |
677 | break; | |
678 | ctxnum = 0; | |
679 | gru = next_gru(blade, gru); | |
680 | } | |
681 | blade->bs_lru_gru = gru; | |
682 | blade->bs_lru_ctxnum = ctxnum; | |
683 | spin_unlock(&blade->bs_lock); | |
684 | preempt_enable(); | |
685 | ||
686 | if (ngts) { | |
687 | STAT(steal_context); | |
688 | ngts->ts_steal_jiffies = jiffies; | |
689 | gru_unload_context(ngts, 1); | |
690 | mutex_unlock(&ngts->ts_ctxlock); | |
691 | } else { | |
692 | STAT(steal_context_failed); | |
693 | } | |
694 | gru_dbg(grudev, | |
43884604 | 695 | "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;" |
9a0deecc JS |
696 | " avail cb %ld, ds %ld\n", |
697 | gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), | |
698 | hweight64(gru->gs_dsr_map)); | |
699 | } | |
700 | ||
701 | /* | |
702 | * Scan the GRUs on the local blade & assign a GRU context. | |
703 | */ | |
704 | static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) | |
705 | { | |
706 | struct gru_state *gru, *grux; | |
707 | int i, max_active_contexts; | |
708 | ||
709 | preempt_disable(); | |
710 | ||
711 | again: | |
712 | gru = NULL; | |
713 | max_active_contexts = GRU_NUM_CCH; | |
714 | for_each_gru_on_blade(grux, uv_numa_blade_id(), i) { | |
715 | if (check_gru_resources(grux, gts->ts_cbr_au_count, | |
716 | gts->ts_dsr_au_count, | |
717 | max_active_contexts)) { | |
718 | gru = grux; | |
719 | max_active_contexts = grux->gs_active_contexts; | |
720 | if (max_active_contexts == 0) | |
721 | break; | |
722 | } | |
723 | } | |
724 | ||
725 | if (gru) { | |
726 | spin_lock(&gru->gs_lock); | |
727 | if (!check_gru_resources(gru, gts->ts_cbr_au_count, | |
728 | gts->ts_dsr_au_count, GRU_NUM_CCH)) { | |
729 | spin_unlock(&gru->gs_lock); | |
730 | goto again; | |
731 | } | |
732 | reserve_gru_resources(gru, gts); | |
733 | gts->ts_gru = gru; | |
734 | gts->ts_ctxnum = | |
735 | find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); | |
736 | BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); | |
737 | atomic_inc(>s->ts_refcnt); | |
738 | gru->gs_gts[gts->ts_ctxnum] = gts; | |
739 | __set_bit(gts->ts_ctxnum, &gru->gs_context_map); | |
740 | spin_unlock(&gru->gs_lock); | |
741 | ||
742 | STAT(assign_context); | |
743 | gru_dbg(grudev, | |
43884604 | 744 | "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n", |
9a0deecc JS |
745 | gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, |
746 | gts->ts_gru->gs_gid, gts->ts_ctxnum, | |
747 | gts->ts_cbr_au_count, gts->ts_dsr_au_count); | |
748 | } else { | |
749 | gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); | |
750 | STAT(assign_context_failed); | |
751 | } | |
752 | ||
753 | preempt_enable(); | |
754 | return gru; | |
755 | } | |
756 | ||
757 | /* | |
758 | * gru_nopage | |
759 | * | |
760 | * Map the user's GRU segment | |
9ca8e40c JS |
761 | * |
762 | * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. | |
9a0deecc JS |
763 | */ |
764 | int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
765 | { | |
766 | struct gru_thread_state *gts; | |
767 | unsigned long paddr, vaddr; | |
768 | ||
769 | vaddr = (unsigned long)vmf->virtual_address; | |
770 | gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", | |
771 | vma, vaddr, GSEG_BASE(vaddr)); | |
772 | STAT(nopfn); | |
773 | ||
9ca8e40c | 774 | /* The following check ensures vaddr is a valid address in the VMA */ |
9a0deecc JS |
775 | gts = gru_find_thread_state(vma, TSID(vaddr, vma)); |
776 | if (!gts) | |
777 | return VM_FAULT_SIGBUS; | |
778 | ||
779 | again: | |
9a0deecc | 780 | mutex_lock(>s->ts_ctxlock); |
fe5bb6b0 | 781 | preempt_disable(); |
9a0deecc JS |
782 | if (gts->ts_gru) { |
783 | if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { | |
784 | STAT(migrated_nopfn_unload); | |
785 | gru_unload_context(gts, 1); | |
786 | } else { | |
787 | if (gru_retarget_intr(gts)) | |
788 | STAT(migrated_nopfn_retarget); | |
789 | } | |
790 | } | |
791 | ||
792 | if (!gts->ts_gru) { | |
9ca8e40c | 793 | if (!gru_assign_gru_context(gts)) { |
9a0deecc JS |
794 | mutex_unlock(>s->ts_ctxlock); |
795 | preempt_enable(); | |
796 | schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ | |
797 | if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) | |
798 | gru_steal_context(gts); | |
799 | goto again; | |
800 | } | |
801 | gru_load_context(gts); | |
802 | paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); | |
803 | remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), | |
804 | paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, | |
805 | vma->vm_page_prot); | |
806 | } | |
807 | ||
808 | mutex_unlock(>s->ts_ctxlock); | |
809 | preempt_enable(); | |
810 | ||
811 | return VM_FAULT_NOPAGE; | |
812 | } | |
813 |