sgi-gru: fix bugs related to module unload of the GRU driver
[linux-2.6-block.git] / drivers / misc / sgi-gru / grumain.c
CommitLineData
9a0deecc
JS
1/*
2 * SN Platform GRU Driver
3 *
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 */
12
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/mm.h>
16#include <linux/spinlock.h>
17#include <linux/sched.h>
18#include <linux/device.h>
19#include <linux/list.h>
20#include <asm/uv/uv_hub.h>
21#include "gru.h"
22#include "grutables.h"
23#include "gruhandles.h"
24
9ca8e40c 25unsigned long gru_options __read_mostly;
9a0deecc
JS
26
27static struct device_driver gru_driver = {
28 .name = "gru"
29};
30
31static struct device gru_device = {
bb0dc43e 32 .init_name = "",
9a0deecc
JS
33 .driver = &gru_driver,
34};
35
36struct device *grudev = &gru_device;
37
38/*
39 * Select a gru fault map to be used by the current cpu. Note that
40 * multiple cpus may be using the same map.
41 * ZZZ should "shift" be used?? Depends on HT cpu numbering
42 * ZZZ should be inline but did not work on emulator
43 */
44int gru_cpu_fault_map_id(void)
45{
46 return uv_blade_processor_id() % GRU_NUM_TFM;
47}
48
49/*--------- ASID Management -------------------------------------------
50 *
51 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
52 * Once MAX is reached, flush the TLB & start over. However,
53 * some asids may still be in use. There won't be many (percentage wise) still
54 * in use. Search active contexts & determine the value of the first
55 * asid in use ("x"s below). Set "limit" to this value.
56 * This defines a block of assignable asids.
57 *
58 * When "limit" is reached, search forward from limit+1 and determine the
59 * next block of assignable asids.
60 *
61 * Repeat until MAX_ASID is reached, then start over again.
62 *
63 * Each time MAX_ASID is reached, increment the asid generation. Since
64 * the search for in-use asids only checks contexts with GRUs currently
65 * assigned, asids in some contexts will be missed. Prior to loading
66 * a context, the asid generation of the GTS asid is rechecked. If it
67 * doesn't match the current generation, a new asid will be assigned.
68 *
69 * 0---------------x------------x---------------------x----|
70 * ^-next ^-limit ^-MAX_ASID
71 *
72 * All asid manipulation & context loading/unloading is protected by the
73 * gs_lock.
74 */
75
76/* Hit the asid limit. Start over */
77static int gru_wrap_asid(struct gru_state *gru)
78{
43884604 79 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
9a0deecc
JS
80 STAT(asid_wrap);
81 gru->gs_asid_gen++;
9a0deecc
JS
82 return MIN_ASID;
83}
84
85/* Find the next chunk of unused asids */
86static int gru_reset_asid_limit(struct gru_state *gru, int asid)
87{
88 int i, gid, inuse_asid, limit;
89
43884604 90 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
9a0deecc
JS
91 STAT(asid_next);
92 limit = MAX_ASID;
93 if (asid >= limit)
94 asid = gru_wrap_asid(gru);
87419412 95 gru_flush_all_tlb(gru);
9a0deecc
JS
96 gid = gru->gs_gid;
97again:
98 for (i = 0; i < GRU_NUM_CCH; i++) {
99 if (!gru->gs_gts[i])
100 continue;
101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
43884604
JS
102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
103 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
104 inuse_asid, i);
9a0deecc
JS
105 if (inuse_asid == asid) {
106 asid += ASID_INC;
107 if (asid >= limit) {
108 /*
109 * empty range: reset the range limit and
110 * start over
111 */
112 limit = MAX_ASID;
113 if (asid >= MAX_ASID)
114 asid = gru_wrap_asid(gru);
115 goto again;
116 }
117 }
118
119 if ((inuse_asid > asid) && (inuse_asid < limit))
120 limit = inuse_asid;
121 }
122 gru->gs_asid_limit = limit;
123 gru->gs_asid = asid;
43884604
JS
124 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
125 asid, limit);
9a0deecc
JS
126 return asid;
127}
128
129/* Assign a new ASID to a thread context. */
130static int gru_assign_asid(struct gru_state *gru)
131{
132 int asid;
133
9a0deecc
JS
134 gru->gs_asid += ASID_INC;
135 asid = gru->gs_asid;
136 if (asid >= gru->gs_asid_limit)
137 asid = gru_reset_asid_limit(gru, asid);
9a0deecc 138
43884604 139 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
9a0deecc
JS
140 return asid;
141}
142
143/*
144 * Clear n bits in a word. Return a word indicating the bits that were cleared.
145 * Optionally, build an array of chars that contain the bit numbers allocated.
146 */
147static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
148 char *idx)
149{
150 unsigned long bits = 0;
151 int i;
152
153 do {
154 i = find_first_bit(p, mmax);
155 if (i == mmax)
156 BUG();
157 __clear_bit(i, p);
158 __set_bit(i, &bits);
159 if (idx)
160 *idx++ = i;
161 } while (--n);
162 return bits;
163}
164
9ca8e40c 165unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
9a0deecc
JS
166 char *cbmap)
167{
168 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
169 cbmap);
170}
171
9ca8e40c 172unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
9a0deecc
JS
173 char *dsmap)
174{
175 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
176 dsmap);
177}
178
179static void reserve_gru_resources(struct gru_state *gru,
180 struct gru_thread_state *gts)
181{
182 gru->gs_active_contexts++;
183 gts->ts_cbr_map =
9ca8e40c 184 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
9a0deecc
JS
185 gts->ts_cbr_idx);
186 gts->ts_dsr_map =
9ca8e40c 187 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
9a0deecc
JS
188}
189
190static void free_gru_resources(struct gru_state *gru,
191 struct gru_thread_state *gts)
192{
193 gru->gs_active_contexts--;
194 gru->gs_cbr_map |= gts->ts_cbr_map;
195 gru->gs_dsr_map |= gts->ts_dsr_map;
196}
197
198/*
199 * Check if a GRU has sufficient free resources to satisfy an allocation
200 * request. Note: GRU locks may or may not be held when this is called. If
201 * not held, recheck after acquiring the appropriate locks.
202 *
203 * Returns 1 if sufficient resources, 0 if not
204 */
205static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
206 int dsr_au_count, int max_active_contexts)
207{
208 return hweight64(gru->gs_cbr_map) >= cbr_au_count
209 && hweight64(gru->gs_dsr_map) >= dsr_au_count
210 && gru->gs_active_contexts < max_active_contexts;
211}
212
213/*
214 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
215 * context.
216 */
43884604
JS
217static int gru_load_mm_tracker(struct gru_state *gru,
218 struct gru_thread_state *gts)
9a0deecc 219{
43884604 220 struct gru_mm_struct *gms = gts->ts_gms;
9a0deecc 221 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
43884604 222 unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
9a0deecc
JS
223 int asid;
224
225 spin_lock(&gms->ms_asid_lock);
226 asid = asids->mt_asid;
227
87419412
JS
228 spin_lock(&gru->gs_asid_lock);
229 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
230 gru->gs_asid_gen)) {
9a0deecc
JS
231 asid = gru_assign_asid(gru);
232 asids->mt_asid = asid;
233 asids->mt_asid_gen = gru->gs_asid_gen;
234 STAT(asid_new);
235 } else {
236 STAT(asid_reuse);
237 }
87419412 238 spin_unlock(&gru->gs_asid_lock);
9a0deecc
JS
239
240 BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
241 asids->mt_ctxbitmap |= ctxbitmap;
242 if (!test_bit(gru->gs_gid, gms->ms_asidmap))
243 __set_bit(gru->gs_gid, gms->ms_asidmap);
244 spin_unlock(&gms->ms_asid_lock);
245
246 gru_dbg(grudev,
43884604
JS
247 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
248 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
249 gms->ms_asidmap[0]);
9a0deecc
JS
250 return asid;
251}
252
253static void gru_unload_mm_tracker(struct gru_state *gru,
43884604 254 struct gru_thread_state *gts)
9a0deecc 255{
43884604 256 struct gru_mm_struct *gms = gts->ts_gms;
9a0deecc
JS
257 struct gru_mm_tracker *asids;
258 unsigned short ctxbitmap;
259
260 asids = &gms->ms_asids[gru->gs_gid];
43884604 261 ctxbitmap = (1 << gts->ts_ctxnum);
9a0deecc 262 spin_lock(&gms->ms_asid_lock);
87419412 263 spin_lock(&gru->gs_asid_lock);
9a0deecc
JS
264 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
265 asids->mt_ctxbitmap ^= ctxbitmap;
43884604
JS
266 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
267 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
87419412 268 spin_unlock(&gru->gs_asid_lock);
9a0deecc
JS
269 spin_unlock(&gms->ms_asid_lock);
270}
271
272/*
273 * Decrement the reference count on a GTS structure. Free the structure
274 * if the reference count goes to zero.
275 */
276void gts_drop(struct gru_thread_state *gts)
277{
278 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
279 gru_drop_mmu_notifier(gts->ts_gms);
280 kfree(gts);
281 STAT(gts_free);
282 }
283}
284
285/*
286 * Locate the GTS structure for the current thread.
287 */
288static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
289 *vdata, int tsid)
290{
291 struct gru_thread_state *gts;
292
293 list_for_each_entry(gts, &vdata->vd_head, ts_next)
294 if (gts->ts_tsid == tsid)
295 return gts;
296 return NULL;
297}
298
299/*
300 * Allocate a thread state structure.
301 */
302static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
303 struct gru_vma_data *vdata,
304 int tsid)
305{
306 struct gru_thread_state *gts;
307 int bytes;
308
309 bytes = DSR_BYTES(vdata->vd_dsr_au_count) +
310 CBR_BYTES(vdata->vd_cbr_au_count);
311 bytes += sizeof(struct gru_thread_state);
312 gts = kzalloc(bytes, GFP_KERNEL);
313 if (!gts)
314 return NULL;
315
316 STAT(gts_alloc);
317 atomic_set(&gts->ts_refcnt, 1);
318 mutex_init(&gts->ts_ctxlock);
319 gts->ts_cbr_au_count = vdata->vd_cbr_au_count;
320 gts->ts_dsr_au_count = vdata->vd_dsr_au_count;
321 gts->ts_user_options = vdata->vd_user_options;
322 gts->ts_tsid = tsid;
323 gts->ts_user_options = vdata->vd_user_options;
324 gts->ts_ctxnum = NULLCTX;
325 gts->ts_mm = current->mm;
326 gts->ts_vma = vma;
327 gts->ts_tlb_int_select = -1;
328 gts->ts_gms = gru_register_mmu_notifier();
329 if (!gts->ts_gms)
330 goto err;
331
332 gru_dbg(grudev, "alloc vdata %p, new gts %p\n", vdata, gts);
333 return gts;
334
335err:
336 gts_drop(gts);
337 return NULL;
338}
339
340/*
341 * Allocate a vma private data structure.
342 */
343struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
344{
345 struct gru_vma_data *vdata = NULL;
346
347 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
348 if (!vdata)
349 return NULL;
350
351 INIT_LIST_HEAD(&vdata->vd_head);
352 spin_lock_init(&vdata->vd_lock);
353 gru_dbg(grudev, "alloc vdata %p\n", vdata);
354 return vdata;
355}
356
357/*
358 * Find the thread state structure for the current thread.
359 */
360struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
361 int tsid)
362{
363 struct gru_vma_data *vdata = vma->vm_private_data;
364 struct gru_thread_state *gts;
365
366 spin_lock(&vdata->vd_lock);
367 gts = gru_find_current_gts_nolock(vdata, tsid);
368 spin_unlock(&vdata->vd_lock);
369 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
370 return gts;
371}
372
373/*
374 * Allocate a new thread state for a GSEG. Note that races may allow
375 * another thread to race to create a gts.
376 */
377struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
378 int tsid)
379{
380 struct gru_vma_data *vdata = vma->vm_private_data;
381 struct gru_thread_state *gts, *ngts;
382
383 gts = gru_alloc_gts(vma, vdata, tsid);
384 if (!gts)
385 return NULL;
386
387 spin_lock(&vdata->vd_lock);
388 ngts = gru_find_current_gts_nolock(vdata, tsid);
389 if (ngts) {
390 gts_drop(gts);
391 gts = ngts;
392 STAT(gts_double_allocate);
393 } else {
394 list_add(&gts->ts_next, &vdata->vd_head);
395 }
396 spin_unlock(&vdata->vd_lock);
397 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
398 return gts;
399}
400
401/*
402 * Free the GRU context assigned to the thread state.
403 */
404static void gru_free_gru_context(struct gru_thread_state *gts)
405{
406 struct gru_state *gru;
407
408 gru = gts->ts_gru;
43884604 409 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
9a0deecc
JS
410
411 spin_lock(&gru->gs_lock);
412 gru->gs_gts[gts->ts_ctxnum] = NULL;
413 free_gru_resources(gru, gts);
414 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
415 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
416 gts->ts_ctxnum = NULLCTX;
417 gts->ts_gru = NULL;
87419412 418 gts->ts_blade = -1;
9a0deecc
JS
419 spin_unlock(&gru->gs_lock);
420
421 gts_drop(gts);
422 STAT(free_context);
423}
424
425/*
426 * Prefetching cachelines help hardware performance.
9ca8e40c 427 * (Strictly a performance enhancement. Not functionally required).
9a0deecc
JS
428 */
429static void prefetch_data(void *p, int num, int stride)
430{
431 while (num-- > 0) {
432 prefetchw(p);
433 p += stride;
434 }
435}
436
437static inline long gru_copy_handle(void *d, void *s)
438{
439 memcpy(d, s, GRU_HANDLE_BYTES);
440 return GRU_HANDLE_BYTES;
441}
442
fe5bb6b0
JS
443static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
444 unsigned long cbrmap, unsigned long length)
9a0deecc 445{
9a0deecc
JS
446 int i, scr;
447
9a0deecc
JS
448 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
449 GRU_CACHE_LINE_BYTES);
450
9a0deecc
JS
451 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
452 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
453 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
454 GRU_CACHE_LINE_BYTES);
455 cb += GRU_HANDLE_STRIDE;
456 }
923f7f69
JS
457}
458
459static void gru_load_context_data(void *save, void *grubase, int ctxnum,
460 unsigned long cbrmap, unsigned long dsrmap)
461{
462 void *gseg, *cb, *cbe;
463 unsigned long length;
464 int i, scr;
9a0deecc 465
923f7f69 466 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
9a0deecc 467 cb = gseg + GRU_CB_BASE;
923f7f69
JS
468 cbe = grubase + GRU_CBE_BASE;
469 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
470 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
471
9a0deecc
JS
472 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
473 save += gru_copy_handle(cb, save);
474 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save);
475 cb += GRU_HANDLE_STRIDE;
476 }
477
478 memcpy(gseg + GRU_DS_BASE, save, length);
479}
480
481static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
482 unsigned long cbrmap, unsigned long dsrmap)
483{
484 void *gseg, *cb, *cbe;
485 unsigned long length;
486 int i, scr;
487
488 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
9a0deecc
JS
489 cb = gseg + GRU_CB_BASE;
490 cbe = grubase + GRU_CBE_BASE;
923f7f69
JS
491 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
492 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
493
9a0deecc
JS
494 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
495 save += gru_copy_handle(save, cb);
496 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
497 cb += GRU_HANDLE_STRIDE;
498 }
9a0deecc
JS
499 memcpy(save, gseg + GRU_DS_BASE, length);
500}
501
502void gru_unload_context(struct gru_thread_state *gts, int savestate)
503{
504 struct gru_state *gru = gts->ts_gru;
505 struct gru_context_configuration_handle *cch;
506 int ctxnum = gts->ts_ctxnum;
507
508 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
509 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
510
43884604 511 gru_dbg(grudev, "gts %p\n", gts);
9a0deecc
JS
512 lock_cch_handle(cch);
513 if (cch_interrupt_sync(cch))
514 BUG();
9a0deecc 515
43884604 516 gru_unload_mm_tracker(gru, gts);
9a0deecc
JS
517 if (savestate)
518 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
519 ctxnum, gts->ts_cbr_map,
520 gts->ts_dsr_map);
521
522 if (cch_deallocate(cch))
523 BUG();
524 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
525 unlock_cch_handle(cch);
526
527 gru_free_gru_context(gts);
528 STAT(unload_context);
529}
530
531/*
532 * Load a GRU context by copying it from the thread data structure in memory
533 * to the GRU.
534 */
535static void gru_load_context(struct gru_thread_state *gts)
536{
537 struct gru_state *gru = gts->ts_gru;
538 struct gru_context_configuration_handle *cch;
539 int err, asid, ctxnum = gts->ts_ctxnum;
540
541 gru_dbg(grudev, "gts %p\n", gts);
542 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
543
544 lock_cch_handle(cch);
43884604 545 asid = gru_load_mm_tracker(gru, gts);
9a0deecc
JS
546 cch->tfm_fault_bit_enable =
547 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
548 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
549 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
550 if (cch->tlb_int_enable) {
551 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
552 cch->tlb_int_select = gts->ts_tlb_int_select;
553 }
554 cch->tfm_done_bit_enable = 0;
555 err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map);
556 if (err) {
557 gru_dbg(grudev,
558 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
559 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
560 BUG();
561 }
562
563 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
564 gts->ts_cbr_map, gts->ts_dsr_map);
565
566 if (cch_start(cch))
567 BUG();
568 unlock_cch_handle(cch);
569
570 STAT(load_context);
571}
572
573/*
574 * Update fields in an active CCH:
575 * - retarget interrupts on local blade
576 * - force a delayed context unload by clearing the CCH asids. This
577 * forces TLB misses for new GRU instructions. The context is unloaded
578 * when the next TLB miss occurs.
579 */
580static int gru_update_cch(struct gru_thread_state *gts, int int_select)
581{
582 struct gru_context_configuration_handle *cch;
583 struct gru_state *gru = gts->ts_gru;
584 int i, ctxnum = gts->ts_ctxnum, ret = 0;
585
586 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
587
588 lock_cch_handle(cch);
589 if (cch->state == CCHSTATE_ACTIVE) {
590 if (gru->gs_gts[gts->ts_ctxnum] != gts)
591 goto exit;
592 if (cch_interrupt(cch))
593 BUG();
594 if (int_select >= 0) {
595 gts->ts_tlb_int_select = int_select;
596 cch->tlb_int_select = int_select;
597 } else {
598 for (i = 0; i < 8; i++)
599 cch->asid[i] = 0;
600 cch->tfm_fault_bit_enable = 0;
601 cch->tlb_int_enable = 0;
602 gts->ts_force_unload = 1;
603 }
604 if (cch_start(cch))
605 BUG();
606 ret = 1;
607 }
608exit:
609 unlock_cch_handle(cch);
610 return ret;
611}
612
613/*
614 * Update CCH tlb interrupt select. Required when all the following is true:
615 * - task's GRU context is loaded into a GRU
616 * - task is using interrupt notification for TLB faults
617 * - task has migrated to a different cpu on the same blade where
618 * it was previously running.
619 */
620static int gru_retarget_intr(struct gru_thread_state *gts)
621{
622 if (gts->ts_tlb_int_select < 0
623 || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
624 return 0;
625
626 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
627 gru_cpu_fault_map_id());
628 return gru_update_cch(gts, gru_cpu_fault_map_id());
629}
630
631
632/*
633 * Insufficient GRU resources available on the local blade. Steal a context from
634 * a process. This is a hack until a _real_ resource scheduler is written....
635 */
636#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
637#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
638 ((g)+1) : &(b)->bs_grus[0])
639
640static void gru_steal_context(struct gru_thread_state *gts)
641{
642 struct gru_blade_state *blade;
643 struct gru_state *gru, *gru0;
644 struct gru_thread_state *ngts = NULL;
645 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
646
647 cbr = gts->ts_cbr_au_count;
648 dsr = gts->ts_dsr_au_count;
649
650 preempt_disable();
651 blade = gru_base[uv_numa_blade_id()];
652 spin_lock(&blade->bs_lock);
653
654 ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
655 gru = blade->bs_lru_gru;
656 if (ctxnum == 0)
657 gru = next_gru(blade, gru);
658 ctxnum0 = ctxnum;
659 gru0 = gru;
660 while (1) {
661 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
662 break;
663 spin_lock(&gru->gs_lock);
664 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
665 if (flag && gru == gru0 && ctxnum == ctxnum0)
666 break;
667 ngts = gru->gs_gts[ctxnum];
668 /*
669 * We are grabbing locks out of order, so trylock is
670 * needed. GTSs are usually not locked, so the odds of
671 * success are high. If trylock fails, try to steal a
672 * different GSEG.
673 */
674 if (ngts && mutex_trylock(&ngts->ts_ctxlock))
675 break;
676 ngts = NULL;
677 flag = 1;
678 }
679 spin_unlock(&gru->gs_lock);
680 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
681 break;
682 ctxnum = 0;
683 gru = next_gru(blade, gru);
684 }
685 blade->bs_lru_gru = gru;
686 blade->bs_lru_ctxnum = ctxnum;
687 spin_unlock(&blade->bs_lock);
688 preempt_enable();
689
690 if (ngts) {
691 STAT(steal_context);
692 ngts->ts_steal_jiffies = jiffies;
693 gru_unload_context(ngts, 1);
694 mutex_unlock(&ngts->ts_ctxlock);
695 } else {
696 STAT(steal_context_failed);
697 }
698 gru_dbg(grudev,
43884604 699 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
9a0deecc
JS
700 " avail cb %ld, ds %ld\n",
701 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
702 hweight64(gru->gs_dsr_map));
703}
704
705/*
706 * Scan the GRUs on the local blade & assign a GRU context.
707 */
708static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
709{
710 struct gru_state *gru, *grux;
711 int i, max_active_contexts;
712
713 preempt_disable();
714
715again:
716 gru = NULL;
717 max_active_contexts = GRU_NUM_CCH;
718 for_each_gru_on_blade(grux, uv_numa_blade_id(), i) {
719 if (check_gru_resources(grux, gts->ts_cbr_au_count,
720 gts->ts_dsr_au_count,
721 max_active_contexts)) {
722 gru = grux;
723 max_active_contexts = grux->gs_active_contexts;
724 if (max_active_contexts == 0)
725 break;
726 }
727 }
728
729 if (gru) {
730 spin_lock(&gru->gs_lock);
731 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
732 gts->ts_dsr_au_count, GRU_NUM_CCH)) {
733 spin_unlock(&gru->gs_lock);
734 goto again;
735 }
736 reserve_gru_resources(gru, gts);
737 gts->ts_gru = gru;
87419412 738 gts->ts_blade = gru->gs_blade_id;
9a0deecc
JS
739 gts->ts_ctxnum =
740 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
741 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
742 atomic_inc(&gts->ts_refcnt);
743 gru->gs_gts[gts->ts_ctxnum] = gts;
744 __set_bit(gts->ts_ctxnum, &gru->gs_context_map);
745 spin_unlock(&gru->gs_lock);
746
747 STAT(assign_context);
748 gru_dbg(grudev,
43884604 749 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
9a0deecc
JS
750 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
751 gts->ts_gru->gs_gid, gts->ts_ctxnum,
752 gts->ts_cbr_au_count, gts->ts_dsr_au_count);
753 } else {
754 gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
755 STAT(assign_context_failed);
756 }
757
758 preempt_enable();
759 return gru;
760}
761
762/*
763 * gru_nopage
764 *
765 * Map the user's GRU segment
9ca8e40c
JS
766 *
767 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
9a0deecc
JS
768 */
769int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
770{
771 struct gru_thread_state *gts;
772 unsigned long paddr, vaddr;
773
774 vaddr = (unsigned long)vmf->virtual_address;
775 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
776 vma, vaddr, GSEG_BASE(vaddr));
777 STAT(nopfn);
778
9ca8e40c 779 /* The following check ensures vaddr is a valid address in the VMA */
9a0deecc
JS
780 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
781 if (!gts)
782 return VM_FAULT_SIGBUS;
783
784again:
9a0deecc 785 mutex_lock(&gts->ts_ctxlock);
fe5bb6b0 786 preempt_disable();
9a0deecc
JS
787 if (gts->ts_gru) {
788 if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) {
789 STAT(migrated_nopfn_unload);
790 gru_unload_context(gts, 1);
791 } else {
792 if (gru_retarget_intr(gts))
793 STAT(migrated_nopfn_retarget);
794 }
795 }
796
797 if (!gts->ts_gru) {
9ca8e40c 798 if (!gru_assign_gru_context(gts)) {
9a0deecc
JS
799 mutex_unlock(&gts->ts_ctxlock);
800 preempt_enable();
801 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
802 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
803 gru_steal_context(gts);
804 goto again;
805 }
806 gru_load_context(gts);
807 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
808 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
809 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
810 vma->vm_page_prot);
811 }
812
813 mutex_unlock(&gts->ts_ctxlock);
814 preempt_enable();
815
816 return VM_FAULT_NOPAGE;
817}
818