gru: support 64-bit GRU addresses
[linux-2.6-block.git] / drivers / misc / sgi-gru / grumain.c
CommitLineData
9a0deecc
JS
1/*
2 * SN Platform GRU Driver
3 *
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
5 *
8820f27a 6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
9a0deecc 7 *
8820f27a
JS
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
9a0deecc
JS
21 */
22
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/device.h>
29#include <linux/list.h>
30#include <asm/uv/uv_hub.h>
31#include "gru.h"
32#include "grutables.h"
33#include "gruhandles.h"
34
9ca8e40c 35unsigned long gru_options __read_mostly;
9a0deecc
JS
36
37static struct device_driver gru_driver = {
38 .name = "gru"
39};
40
41static struct device gru_device = {
bb0dc43e 42 .init_name = "",
9a0deecc
JS
43 .driver = &gru_driver,
44};
45
46struct device *grudev = &gru_device;
47
48/*
49 * Select a gru fault map to be used by the current cpu. Note that
50 * multiple cpus may be using the same map.
51 * ZZZ should "shift" be used?? Depends on HT cpu numbering
52 * ZZZ should be inline but did not work on emulator
53 */
54int gru_cpu_fault_map_id(void)
55{
56 return uv_blade_processor_id() % GRU_NUM_TFM;
57}
58
59/*--------- ASID Management -------------------------------------------
60 *
61 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
62 * Once MAX is reached, flush the TLB & start over. However,
63 * some asids may still be in use. There won't be many (percentage wise) still
64 * in use. Search active contexts & determine the value of the first
65 * asid in use ("x"s below). Set "limit" to this value.
66 * This defines a block of assignable asids.
67 *
68 * When "limit" is reached, search forward from limit+1 and determine the
69 * next block of assignable asids.
70 *
71 * Repeat until MAX_ASID is reached, then start over again.
72 *
73 * Each time MAX_ASID is reached, increment the asid generation. Since
74 * the search for in-use asids only checks contexts with GRUs currently
75 * assigned, asids in some contexts will be missed. Prior to loading
76 * a context, the asid generation of the GTS asid is rechecked. If it
77 * doesn't match the current generation, a new asid will be assigned.
78 *
79 * 0---------------x------------x---------------------x----|
80 * ^-next ^-limit ^-MAX_ASID
81 *
82 * All asid manipulation & context loading/unloading is protected by the
83 * gs_lock.
84 */
85
86/* Hit the asid limit. Start over */
87static int gru_wrap_asid(struct gru_state *gru)
88{
43884604 89 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
9a0deecc
JS
90 STAT(asid_wrap);
91 gru->gs_asid_gen++;
9a0deecc
JS
92 return MIN_ASID;
93}
94
95/* Find the next chunk of unused asids */
96static int gru_reset_asid_limit(struct gru_state *gru, int asid)
97{
98 int i, gid, inuse_asid, limit;
99
43884604 100 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
9a0deecc
JS
101 STAT(asid_next);
102 limit = MAX_ASID;
103 if (asid >= limit)
104 asid = gru_wrap_asid(gru);
87419412 105 gru_flush_all_tlb(gru);
9a0deecc
JS
106 gid = gru->gs_gid;
107again:
108 for (i = 0; i < GRU_NUM_CCH; i++) {
836ce679 109 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
9a0deecc
JS
110 continue;
111 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
43884604
JS
112 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
113 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
114 inuse_asid, i);
9a0deecc
JS
115 if (inuse_asid == asid) {
116 asid += ASID_INC;
117 if (asid >= limit) {
118 /*
119 * empty range: reset the range limit and
120 * start over
121 */
122 limit = MAX_ASID;
123 if (asid >= MAX_ASID)
124 asid = gru_wrap_asid(gru);
125 goto again;
126 }
127 }
128
129 if ((inuse_asid > asid) && (inuse_asid < limit))
130 limit = inuse_asid;
131 }
132 gru->gs_asid_limit = limit;
133 gru->gs_asid = asid;
43884604
JS
134 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
135 asid, limit);
9a0deecc
JS
136 return asid;
137}
138
139/* Assign a new ASID to a thread context. */
140static int gru_assign_asid(struct gru_state *gru)
141{
142 int asid;
143
9a0deecc
JS
144 gru->gs_asid += ASID_INC;
145 asid = gru->gs_asid;
146 if (asid >= gru->gs_asid_limit)
147 asid = gru_reset_asid_limit(gru, asid);
9a0deecc 148
43884604 149 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
9a0deecc
JS
150 return asid;
151}
152
153/*
154 * Clear n bits in a word. Return a word indicating the bits that were cleared.
155 * Optionally, build an array of chars that contain the bit numbers allocated.
156 */
157static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
158 char *idx)
159{
160 unsigned long bits = 0;
161 int i;
162
3eac2e95 163 while (n--) {
9a0deecc
JS
164 i = find_first_bit(p, mmax);
165 if (i == mmax)
166 BUG();
167 __clear_bit(i, p);
168 __set_bit(i, &bits);
169 if (idx)
170 *idx++ = i;
3eac2e95 171 }
9a0deecc
JS
172 return bits;
173}
174
9ca8e40c 175unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
9a0deecc
JS
176 char *cbmap)
177{
178 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
179 cbmap);
180}
181
9ca8e40c 182unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
9a0deecc
JS
183 char *dsmap)
184{
185 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
186 dsmap);
187}
188
189static void reserve_gru_resources(struct gru_state *gru,
190 struct gru_thread_state *gts)
191{
192 gru->gs_active_contexts++;
193 gts->ts_cbr_map =
9ca8e40c 194 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
9a0deecc
JS
195 gts->ts_cbr_idx);
196 gts->ts_dsr_map =
9ca8e40c 197 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
9a0deecc
JS
198}
199
200static void free_gru_resources(struct gru_state *gru,
201 struct gru_thread_state *gts)
202{
203 gru->gs_active_contexts--;
204 gru->gs_cbr_map |= gts->ts_cbr_map;
205 gru->gs_dsr_map |= gts->ts_dsr_map;
206}
207
208/*
209 * Check if a GRU has sufficient free resources to satisfy an allocation
210 * request. Note: GRU locks may or may not be held when this is called. If
211 * not held, recheck after acquiring the appropriate locks.
212 *
213 * Returns 1 if sufficient resources, 0 if not
214 */
215static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
216 int dsr_au_count, int max_active_contexts)
217{
218 return hweight64(gru->gs_cbr_map) >= cbr_au_count
219 && hweight64(gru->gs_dsr_map) >= dsr_au_count
220 && gru->gs_active_contexts < max_active_contexts;
221}
222
223/*
224 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
225 * context.
226 */
43884604
JS
227static int gru_load_mm_tracker(struct gru_state *gru,
228 struct gru_thread_state *gts)
9a0deecc 229{
43884604 230 struct gru_mm_struct *gms = gts->ts_gms;
9a0deecc 231 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
43884604 232 unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
9a0deecc
JS
233 int asid;
234
235 spin_lock(&gms->ms_asid_lock);
236 asid = asids->mt_asid;
237
87419412
JS
238 spin_lock(&gru->gs_asid_lock);
239 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
240 gru->gs_asid_gen)) {
9a0deecc
JS
241 asid = gru_assign_asid(gru);
242 asids->mt_asid = asid;
243 asids->mt_asid_gen = gru->gs_asid_gen;
244 STAT(asid_new);
245 } else {
246 STAT(asid_reuse);
247 }
87419412 248 spin_unlock(&gru->gs_asid_lock);
9a0deecc
JS
249
250 BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
251 asids->mt_ctxbitmap |= ctxbitmap;
252 if (!test_bit(gru->gs_gid, gms->ms_asidmap))
253 __set_bit(gru->gs_gid, gms->ms_asidmap);
254 spin_unlock(&gms->ms_asid_lock);
255
256 gru_dbg(grudev,
43884604
JS
257 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
258 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
259 gms->ms_asidmap[0]);
9a0deecc
JS
260 return asid;
261}
262
263static void gru_unload_mm_tracker(struct gru_state *gru,
43884604 264 struct gru_thread_state *gts)
9a0deecc 265{
43884604 266 struct gru_mm_struct *gms = gts->ts_gms;
9a0deecc
JS
267 struct gru_mm_tracker *asids;
268 unsigned short ctxbitmap;
269
270 asids = &gms->ms_asids[gru->gs_gid];
43884604 271 ctxbitmap = (1 << gts->ts_ctxnum);
9a0deecc 272 spin_lock(&gms->ms_asid_lock);
87419412 273 spin_lock(&gru->gs_asid_lock);
9a0deecc
JS
274 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
275 asids->mt_ctxbitmap ^= ctxbitmap;
43884604
JS
276 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
277 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
87419412 278 spin_unlock(&gru->gs_asid_lock);
9a0deecc
JS
279 spin_unlock(&gms->ms_asid_lock);
280}
281
282/*
283 * Decrement the reference count on a GTS structure. Free the structure
284 * if the reference count goes to zero.
285 */
286void gts_drop(struct gru_thread_state *gts)
287{
288 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
289 gru_drop_mmu_notifier(gts->ts_gms);
290 kfree(gts);
291 STAT(gts_free);
292 }
293}
294
295/*
296 * Locate the GTS structure for the current thread.
297 */
298static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
299 *vdata, int tsid)
300{
301 struct gru_thread_state *gts;
302
303 list_for_each_entry(gts, &vdata->vd_head, ts_next)
304 if (gts->ts_tsid == tsid)
305 return gts;
306 return NULL;
307}
308
309/*
310 * Allocate a thread state structure.
311 */
364b76df
JS
312struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
313 int cbr_au_count, int dsr_au_count, int options, int tsid)
9a0deecc
JS
314{
315 struct gru_thread_state *gts;
316 int bytes;
317
364b76df 318 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
9a0deecc 319 bytes += sizeof(struct gru_thread_state);
940229b9 320 gts = kmalloc(bytes, GFP_KERNEL);
9a0deecc
JS
321 if (!gts)
322 return NULL;
323
324 STAT(gts_alloc);
940229b9 325 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
9a0deecc
JS
326 atomic_set(&gts->ts_refcnt, 1);
327 mutex_init(&gts->ts_ctxlock);
364b76df
JS
328 gts->ts_cbr_au_count = cbr_au_count;
329 gts->ts_dsr_au_count = dsr_au_count;
330 gts->ts_user_options = options;
518e5cd4
JS
331 gts->ts_user_blade_id = -1;
332 gts->ts_user_chiplet_id = -1;
9a0deecc 333 gts->ts_tsid = tsid;
9a0deecc 334 gts->ts_ctxnum = NULLCTX;
9a0deecc 335 gts->ts_tlb_int_select = -1;
b1b19fcf 336 gts->ts_cch_req_slice = -1;
7b8274e9 337 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
364b76df
JS
338 if (vma) {
339 gts->ts_mm = current->mm;
340 gts->ts_vma = vma;
341 gts->ts_gms = gru_register_mmu_notifier();
342 if (!gts->ts_gms)
343 goto err;
344 }
9a0deecc 345
364b76df 346 gru_dbg(grudev, "alloc gts %p\n", gts);
9a0deecc
JS
347 return gts;
348
349err:
350 gts_drop(gts);
351 return NULL;
352}
353
354/*
355 * Allocate a vma private data structure.
356 */
357struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
358{
359 struct gru_vma_data *vdata = NULL;
360
361 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
362 if (!vdata)
363 return NULL;
364
365 INIT_LIST_HEAD(&vdata->vd_head);
366 spin_lock_init(&vdata->vd_lock);
367 gru_dbg(grudev, "alloc vdata %p\n", vdata);
368 return vdata;
369}
370
371/*
372 * Find the thread state structure for the current thread.
373 */
374struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
375 int tsid)
376{
377 struct gru_vma_data *vdata = vma->vm_private_data;
378 struct gru_thread_state *gts;
379
380 spin_lock(&vdata->vd_lock);
381 gts = gru_find_current_gts_nolock(vdata, tsid);
382 spin_unlock(&vdata->vd_lock);
383 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
384 return gts;
385}
386
387/*
388 * Allocate a new thread state for a GSEG. Note that races may allow
389 * another thread to race to create a gts.
390 */
391struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
392 int tsid)
393{
394 struct gru_vma_data *vdata = vma->vm_private_data;
395 struct gru_thread_state *gts, *ngts;
396
364b76df
JS
397 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
398 vdata->vd_user_options, tsid);
9a0deecc
JS
399 if (!gts)
400 return NULL;
401
402 spin_lock(&vdata->vd_lock);
403 ngts = gru_find_current_gts_nolock(vdata, tsid);
404 if (ngts) {
405 gts_drop(gts);
406 gts = ngts;
407 STAT(gts_double_allocate);
408 } else {
409 list_add(&gts->ts_next, &vdata->vd_head);
410 }
411 spin_unlock(&vdata->vd_lock);
412 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
413 return gts;
414}
415
416/*
417 * Free the GRU context assigned to the thread state.
418 */
419static void gru_free_gru_context(struct gru_thread_state *gts)
420{
421 struct gru_state *gru;
422
423 gru = gts->ts_gru;
43884604 424 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
9a0deecc
JS
425
426 spin_lock(&gru->gs_lock);
427 gru->gs_gts[gts->ts_ctxnum] = NULL;
428 free_gru_resources(gru, gts);
429 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
430 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
431 gts->ts_ctxnum = NULLCTX;
432 gts->ts_gru = NULL;
87419412 433 gts->ts_blade = -1;
9a0deecc
JS
434 spin_unlock(&gru->gs_lock);
435
436 gts_drop(gts);
437 STAT(free_context);
438}
439
440/*
441 * Prefetching cachelines help hardware performance.
9ca8e40c 442 * (Strictly a performance enhancement. Not functionally required).
9a0deecc
JS
443 */
444static void prefetch_data(void *p, int num, int stride)
445{
446 while (num-- > 0) {
447 prefetchw(p);
448 p += stride;
449 }
450}
451
452static inline long gru_copy_handle(void *d, void *s)
453{
454 memcpy(d, s, GRU_HANDLE_BYTES);
455 return GRU_HANDLE_BYTES;
456}
457
fe5bb6b0
JS
458static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
459 unsigned long cbrmap, unsigned long length)
9a0deecc 460{
9a0deecc
JS
461 int i, scr;
462
9a0deecc
JS
463 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
464 GRU_CACHE_LINE_BYTES);
465
9a0deecc
JS
466 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
467 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
468 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
469 GRU_CACHE_LINE_BYTES);
470 cb += GRU_HANDLE_STRIDE;
471 }
923f7f69
JS
472}
473
474static void gru_load_context_data(void *save, void *grubase, int ctxnum,
940229b9
JS
475 unsigned long cbrmap, unsigned long dsrmap,
476 int data_valid)
923f7f69
JS
477{
478 void *gseg, *cb, *cbe;
479 unsigned long length;
480 int i, scr;
9a0deecc 481
923f7f69 482 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
9a0deecc 483 cb = gseg + GRU_CB_BASE;
923f7f69
JS
484 cbe = grubase + GRU_CBE_BASE;
485 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
486 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
487
9a0deecc 488 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
940229b9
JS
489 if (data_valid) {
490 save += gru_copy_handle(cb, save);
491 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
492 save);
493 } else {
494 memset(cb, 0, GRU_CACHE_LINE_BYTES);
495 memset(cbe + i * GRU_HANDLE_STRIDE, 0,
496 GRU_CACHE_LINE_BYTES);
497 }
9a0deecc
JS
498 cb += GRU_HANDLE_STRIDE;
499 }
500
940229b9
JS
501 if (data_valid)
502 memcpy(gseg + GRU_DS_BASE, save, length);
503 else
504 memset(gseg + GRU_DS_BASE, 0, length);
9a0deecc
JS
505}
506
507static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
508 unsigned long cbrmap, unsigned long dsrmap)
509{
510 void *gseg, *cb, *cbe;
511 unsigned long length;
512 int i, scr;
513
514 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
9a0deecc
JS
515 cb = gseg + GRU_CB_BASE;
516 cbe = grubase + GRU_CBE_BASE;
923f7f69
JS
517 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
518 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
519
9a0deecc
JS
520 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
521 save += gru_copy_handle(save, cb);
522 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
523 cb += GRU_HANDLE_STRIDE;
524 }
9a0deecc
JS
525 memcpy(save, gseg + GRU_DS_BASE, length);
526}
527
528void gru_unload_context(struct gru_thread_state *gts, int savestate)
529{
530 struct gru_state *gru = gts->ts_gru;
531 struct gru_context_configuration_handle *cch;
532 int ctxnum = gts->ts_ctxnum;
533
836ce679
JS
534 if (!is_kernel_context(gts))
535 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
9a0deecc
JS
536 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
537
43884604 538 gru_dbg(grudev, "gts %p\n", gts);
9a0deecc
JS
539 lock_cch_handle(cch);
540 if (cch_interrupt_sync(cch))
541 BUG();
9a0deecc 542
836ce679
JS
543 if (!is_kernel_context(gts))
544 gru_unload_mm_tracker(gru, gts);
940229b9 545 if (savestate) {
9a0deecc
JS
546 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
547 ctxnum, gts->ts_cbr_map,
548 gts->ts_dsr_map);
940229b9
JS
549 gts->ts_data_valid = 1;
550 }
9a0deecc
JS
551
552 if (cch_deallocate(cch))
553 BUG();
9a0deecc
JS
554 unlock_cch_handle(cch);
555
556 gru_free_gru_context(gts);
9a0deecc
JS
557}
558
559/*
560 * Load a GRU context by copying it from the thread data structure in memory
561 * to the GRU.
562 */
d57c82b1 563void gru_load_context(struct gru_thread_state *gts)
9a0deecc
JS
564{
565 struct gru_state *gru = gts->ts_gru;
566 struct gru_context_configuration_handle *cch;
6e910074 567 int i, err, asid, ctxnum = gts->ts_ctxnum;
9a0deecc
JS
568
569 gru_dbg(grudev, "gts %p\n", gts);
570 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
571
572 lock_cch_handle(cch);
9a0deecc
JS
573 cch->tfm_fault_bit_enable =
574 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
575 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
576 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
577 if (cch->tlb_int_enable) {
578 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
579 cch->tlb_int_select = gts->ts_tlb_int_select;
580 }
b1b19fcf
JS
581 if (gts->ts_cch_req_slice >= 0) {
582 cch->req_slice_set_enable = 1;
583 cch->req_slice = gts->ts_cch_req_slice;
584 } else {
585 cch->req_slice_set_enable =0;
586 }
9a0deecc 587 cch->tfm_done_bit_enable = 0;
6e910074
JS
588 cch->dsr_allocation_map = gts->ts_dsr_map;
589 cch->cbr_allocation_map = gts->ts_cbr_map;
836ce679
JS
590
591 if (is_kernel_context(gts)) {
592 cch->unmap_enable = 1;
4a7a17c1
JS
593 cch->tfm_done_bit_enable = 1;
594 cch->cb_int_enable = 1;
836ce679
JS
595 } else {
596 cch->unmap_enable = 0;
4a7a17c1
JS
597 cch->tfm_done_bit_enable = 0;
598 cch->cb_int_enable = 0;
836ce679
JS
599 asid = gru_load_mm_tracker(gru, gts);
600 for (i = 0; i < 8; i++) {
601 cch->asid[i] = asid + i;
602 cch->sizeavail[i] = gts->ts_sizeavail;
603 }
6e910074
JS
604 }
605
606 err = cch_allocate(cch);
9a0deecc
JS
607 if (err) {
608 gru_dbg(grudev,
609 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
610 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
611 BUG();
612 }
613
614 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
940229b9 615 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
9a0deecc
JS
616
617 if (cch_start(cch))
618 BUG();
619 unlock_cch_handle(cch);
9a0deecc
JS
620}
621
622/*
623 * Update fields in an active CCH:
624 * - retarget interrupts on local blade
7b8274e9 625 * - update sizeavail mask
9a0deecc 626 */
99f7c229 627int gru_update_cch(struct gru_thread_state *gts)
9a0deecc
JS
628{
629 struct gru_context_configuration_handle *cch;
630 struct gru_state *gru = gts->ts_gru;
631 int i, ctxnum = gts->ts_ctxnum, ret = 0;
632
633 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
634
635 lock_cch_handle(cch);
636 if (cch->state == CCHSTATE_ACTIVE) {
637 if (gru->gs_gts[gts->ts_ctxnum] != gts)
638 goto exit;
639 if (cch_interrupt(cch))
640 BUG();
99f7c229
JS
641 for (i = 0; i < 8; i++)
642 cch->sizeavail[i] = gts->ts_sizeavail;
643 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
644 cch->tlb_int_select = gru_cpu_fault_map_id();
645 cch->tfm_fault_bit_enable =
646 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
647 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
9a0deecc
JS
648 if (cch_start(cch))
649 BUG();
650 ret = 1;
651 }
652exit:
653 unlock_cch_handle(cch);
654 return ret;
655}
656
657/*
658 * Update CCH tlb interrupt select. Required when all the following is true:
659 * - task's GRU context is loaded into a GRU
660 * - task is using interrupt notification for TLB faults
661 * - task has migrated to a different cpu on the same blade where
662 * it was previously running.
663 */
664static int gru_retarget_intr(struct gru_thread_state *gts)
665{
666 if (gts->ts_tlb_int_select < 0
667 || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
668 return 0;
669
670 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
671 gru_cpu_fault_map_id());
99f7c229 672 return gru_update_cch(gts);
9a0deecc
JS
673}
674
55484c45
JS
675/*
676 * Unload the gru context if it is not assigned to the correct blade or
677 * chiplet. Misassignment can occur if the process migrates to a different
678 * blade or if the user changes the selected blade/chiplet.
679 * Return 0 if context correct placed, otherwise 1
680 */
681void gru_check_context_placement(struct gru_thread_state *gts)
682{
683 struct gru_state *gru;
684 int blade_id, chiplet_id;
685
686 /*
687 * If the current task is the context owner, verify that the
688 * context is correctly placed. This test is skipped for non-owner
689 * references. Pthread apps use non-owner references to the CBRs.
690 */
691 gru = gts->ts_gru;
692 if (!gru || gts->ts_tgid_owner != current->tgid)
693 return;
694
695 blade_id = gts->ts_user_blade_id;
696 if (blade_id < 0)
697 blade_id = uv_numa_blade_id();
698
699 chiplet_id = gts->ts_user_chiplet_id;
700 if (gru->gs_blade_id != blade_id ||
701 (chiplet_id >= 0 && chiplet_id != gru->gs_chiplet_id)) {
702 STAT(check_context_unload);
703 gru_unload_context(gts, 1);
704 } else if (gru_retarget_intr(gts)) {
705 STAT(check_context_retarget_intr);
706 }
707}
708
9a0deecc
JS
709
710/*
711 * Insufficient GRU resources available on the local blade. Steal a context from
712 * a process. This is a hack until a _real_ resource scheduler is written....
713 */
714#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
715#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
716 ((g)+1) : &(b)->bs_grus[0])
717
836ce679
JS
718static int is_gts_stealable(struct gru_thread_state *gts,
719 struct gru_blade_state *bs)
720{
721 if (is_kernel_context(gts))
722 return down_write_trylock(&bs->bs_kgts_sema);
723 else
724 return mutex_trylock(&gts->ts_ctxlock);
725}
726
727static void gts_stolen(struct gru_thread_state *gts,
728 struct gru_blade_state *bs)
729{
730 if (is_kernel_context(gts)) {
731 up_write(&bs->bs_kgts_sema);
732 STAT(steal_kernel_context);
733 } else {
734 mutex_unlock(&gts->ts_ctxlock);
735 STAT(steal_user_context);
736 }
737}
738
55484c45 739void gru_steal_context(struct gru_thread_state *gts)
9a0deecc
JS
740{
741 struct gru_blade_state *blade;
742 struct gru_state *gru, *gru0;
743 struct gru_thread_state *ngts = NULL;
744 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
55484c45
JS
745 int blade_id = gts->ts_user_blade_id;
746 int chiplet_id = gts->ts_user_chiplet_id;
9a0deecc 747
55484c45
JS
748 if (blade_id < 0)
749 blade_id = uv_numa_blade_id();
9a0deecc
JS
750 cbr = gts->ts_cbr_au_count;
751 dsr = gts->ts_dsr_au_count;
752
364b76df 753 blade = gru_base[blade_id];
9a0deecc
JS
754 spin_lock(&blade->bs_lock);
755
756 ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
757 gru = blade->bs_lru_gru;
758 if (ctxnum == 0)
759 gru = next_gru(blade, gru);
55484c45
JS
760 blade->bs_lru_gru = gru;
761 blade->bs_lru_ctxnum = ctxnum;
9a0deecc
JS
762 ctxnum0 = ctxnum;
763 gru0 = gru;
764 while (1) {
55484c45
JS
765 if (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id) {
766 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
9a0deecc 767 break;
55484c45
JS
768 spin_lock(&gru->gs_lock);
769 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
770 if (flag && gru == gru0 && ctxnum == ctxnum0)
771 break;
772 ngts = gru->gs_gts[ctxnum];
773 /*
774 * We are grabbing locks out of order, so trylock is
775 * needed. GTSs are usually not locked, so the odds of
776 * success are high. If trylock fails, try to steal a
777 * different GSEG.
778 */
779 if (ngts && is_gts_stealable(ngts, blade))
780 break;
781 ngts = NULL;
782 }
783 spin_unlock(&gru->gs_lock);
784 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
9a0deecc 785 break;
9a0deecc 786 }
55484c45 787 if (flag && gru == gru0)
9a0deecc 788 break;
55484c45 789 flag = 1;
9a0deecc
JS
790 ctxnum = 0;
791 gru = next_gru(blade, gru);
792 }
9a0deecc 793 spin_unlock(&blade->bs_lock);
9a0deecc
JS
794
795 if (ngts) {
7e796a72 796 gts->ustats.context_stolen++;
9a0deecc 797 ngts->ts_steal_jiffies = jiffies;
836ce679
JS
798 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
799 gts_stolen(ngts, blade);
9a0deecc
JS
800 } else {
801 STAT(steal_context_failed);
802 }
803 gru_dbg(grudev,
43884604 804 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
9a0deecc
JS
805 " avail cb %ld, ds %ld\n",
806 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
807 hweight64(gru->gs_dsr_map));
808}
809
55484c45
JS
810/*
811 * Assign a gru context.
812 */
813static int gru_assign_context_number(struct gru_state *gru)
814{
815 int ctxnum;
816
817 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
818 __set_bit(ctxnum, &gru->gs_context_map);
819 return ctxnum;
820}
821
9a0deecc
JS
822/*
823 * Scan the GRUs on the local blade & assign a GRU context.
824 */
55484c45 825struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
9a0deecc
JS
826{
827 struct gru_state *gru, *grux;
828 int i, max_active_contexts;
55484c45
JS
829 int blade_id = gts->ts_user_blade_id;
830 int chiplet_id = gts->ts_user_chiplet_id;
9a0deecc 831
55484c45
JS
832 if (blade_id < 0)
833 blade_id = uv_numa_blade_id();
9a0deecc
JS
834again:
835 gru = NULL;
836 max_active_contexts = GRU_NUM_CCH;
55484c45
JS
837 for_each_gru_on_blade(grux, blade_id, i) {
838 if (chiplet_id >= 0 && chiplet_id != grux->gs_chiplet_id)
839 continue;
9a0deecc
JS
840 if (check_gru_resources(grux, gts->ts_cbr_au_count,
841 gts->ts_dsr_au_count,
842 max_active_contexts)) {
843 gru = grux;
844 max_active_contexts = grux->gs_active_contexts;
845 if (max_active_contexts == 0)
846 break;
847 }
848 }
849
850 if (gru) {
851 spin_lock(&gru->gs_lock);
852 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
853 gts->ts_dsr_au_count, GRU_NUM_CCH)) {
854 spin_unlock(&gru->gs_lock);
855 goto again;
856 }
857 reserve_gru_resources(gru, gts);
858 gts->ts_gru = gru;
87419412 859 gts->ts_blade = gru->gs_blade_id;
55484c45 860 gts->ts_ctxnum = gru_assign_context_number(gru);
9a0deecc
JS
861 atomic_inc(&gts->ts_refcnt);
862 gru->gs_gts[gts->ts_ctxnum] = gts;
9a0deecc
JS
863 spin_unlock(&gru->gs_lock);
864
865 STAT(assign_context);
866 gru_dbg(grudev,
43884604 867 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
9a0deecc
JS
868 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
869 gts->ts_gru->gs_gid, gts->ts_ctxnum,
870 gts->ts_cbr_au_count, gts->ts_dsr_au_count);
871 } else {
872 gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
873 STAT(assign_context_failed);
874 }
875
9a0deecc
JS
876 return gru;
877}
878
879/*
880 * gru_nopage
881 *
882 * Map the user's GRU segment
9ca8e40c
JS
883 *
884 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
9a0deecc
JS
885 */
886int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
887{
888 struct gru_thread_state *gts;
889 unsigned long paddr, vaddr;
890
891 vaddr = (unsigned long)vmf->virtual_address;
892 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
893 vma, vaddr, GSEG_BASE(vaddr));
894 STAT(nopfn);
895
9ca8e40c 896 /* The following check ensures vaddr is a valid address in the VMA */
9a0deecc
JS
897 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
898 if (!gts)
899 return VM_FAULT_SIGBUS;
900
901again:
9a0deecc 902 mutex_lock(&gts->ts_ctxlock);
fe5bb6b0 903 preempt_disable();
364b76df 904
55484c45 905 gru_check_context_placement(gts);
9a0deecc
JS
906
907 if (!gts->ts_gru) {
836ce679 908 STAT(load_user_context);
55484c45 909 if (!gru_assign_gru_context(gts)) {
9a0deecc 910 preempt_enable();
364b76df
JS
911 mutex_unlock(&gts->ts_ctxlock);
912 set_current_state(TASK_INTERRUPTIBLE);
9a0deecc
JS
913 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
914 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
55484c45 915 gru_steal_context(gts);
9a0deecc
JS
916 goto again;
917 }
918 gru_load_context(gts);
919 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
920 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
921 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
922 vma->vm_page_prot);
923 }
924
9a0deecc 925 preempt_enable();
364b76df 926 mutex_unlock(&gts->ts_ctxlock);
9a0deecc
JS
927
928 return VM_FAULT_NOPAGE;
929}
930