Merge branch 'for-33' of git://repo.or.cz/linux-kbuild
[linux-2.6-block.git] / drivers / misc / sgi-gru / grumain.c
CommitLineData
9a0deecc
JS
1/*
2 * SN Platform GRU Driver
3 *
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
5 *
8820f27a 6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
9a0deecc 7 *
8820f27a
JS
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
9a0deecc
JS
21 */
22
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/device.h>
29#include <linux/list.h>
7f2251b1 30#include <linux/err.h>
9a0deecc
JS
31#include <asm/uv/uv_hub.h>
32#include "gru.h"
33#include "grutables.h"
34#include "gruhandles.h"
35
9ca8e40c 36unsigned long gru_options __read_mostly;
9a0deecc
JS
37
38static struct device_driver gru_driver = {
39 .name = "gru"
40};
41
42static struct device gru_device = {
bb0dc43e 43 .init_name = "",
9a0deecc
JS
44 .driver = &gru_driver,
45};
46
47struct device *grudev = &gru_device;
48
49/*
50 * Select a gru fault map to be used by the current cpu. Note that
51 * multiple cpus may be using the same map.
9a0deecc
JS
52 * ZZZ should be inline but did not work on emulator
53 */
54int gru_cpu_fault_map_id(void)
55{
41314790
JS
56#ifdef CONFIG_IA64
57 return uv_blade_processor_id() % GRU_NUM_TFM;
58#else
4107e1d3
JS
59 int cpu = smp_processor_id();
60 int id, core;
61
62 core = uv_cpu_core_number(cpu);
63 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
64 return id;
41314790 65#endif
9a0deecc
JS
66}
67
68/*--------- ASID Management -------------------------------------------
69 *
70 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
71 * Once MAX is reached, flush the TLB & start over. However,
72 * some asids may still be in use. There won't be many (percentage wise) still
73 * in use. Search active contexts & determine the value of the first
74 * asid in use ("x"s below). Set "limit" to this value.
75 * This defines a block of assignable asids.
76 *
77 * When "limit" is reached, search forward from limit+1 and determine the
78 * next block of assignable asids.
79 *
80 * Repeat until MAX_ASID is reached, then start over again.
81 *
82 * Each time MAX_ASID is reached, increment the asid generation. Since
83 * the search for in-use asids only checks contexts with GRUs currently
84 * assigned, asids in some contexts will be missed. Prior to loading
85 * a context, the asid generation of the GTS asid is rechecked. If it
86 * doesn't match the current generation, a new asid will be assigned.
87 *
88 * 0---------------x------------x---------------------x----|
89 * ^-next ^-limit ^-MAX_ASID
90 *
91 * All asid manipulation & context loading/unloading is protected by the
92 * gs_lock.
93 */
94
95/* Hit the asid limit. Start over */
96static int gru_wrap_asid(struct gru_state *gru)
97{
43884604 98 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
9a0deecc
JS
99 STAT(asid_wrap);
100 gru->gs_asid_gen++;
9a0deecc
JS
101 return MIN_ASID;
102}
103
104/* Find the next chunk of unused asids */
105static int gru_reset_asid_limit(struct gru_state *gru, int asid)
106{
107 int i, gid, inuse_asid, limit;
108
43884604 109 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
9a0deecc
JS
110 STAT(asid_next);
111 limit = MAX_ASID;
112 if (asid >= limit)
113 asid = gru_wrap_asid(gru);
87419412 114 gru_flush_all_tlb(gru);
9a0deecc
JS
115 gid = gru->gs_gid;
116again:
117 for (i = 0; i < GRU_NUM_CCH; i++) {
836ce679 118 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
9a0deecc
JS
119 continue;
120 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
43884604
JS
121 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
122 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
123 inuse_asid, i);
9a0deecc
JS
124 if (inuse_asid == asid) {
125 asid += ASID_INC;
126 if (asid >= limit) {
127 /*
128 * empty range: reset the range limit and
129 * start over
130 */
131 limit = MAX_ASID;
132 if (asid >= MAX_ASID)
133 asid = gru_wrap_asid(gru);
134 goto again;
135 }
136 }
137
138 if ((inuse_asid > asid) && (inuse_asid < limit))
139 limit = inuse_asid;
140 }
141 gru->gs_asid_limit = limit;
142 gru->gs_asid = asid;
43884604
JS
143 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
144 asid, limit);
9a0deecc
JS
145 return asid;
146}
147
148/* Assign a new ASID to a thread context. */
149static int gru_assign_asid(struct gru_state *gru)
150{
151 int asid;
152
9a0deecc
JS
153 gru->gs_asid += ASID_INC;
154 asid = gru->gs_asid;
155 if (asid >= gru->gs_asid_limit)
156 asid = gru_reset_asid_limit(gru, asid);
9a0deecc 157
43884604 158 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
9a0deecc
JS
159 return asid;
160}
161
162/*
163 * Clear n bits in a word. Return a word indicating the bits that were cleared.
164 * Optionally, build an array of chars that contain the bit numbers allocated.
165 */
166static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
167 char *idx)
168{
169 unsigned long bits = 0;
170 int i;
171
3eac2e95 172 while (n--) {
9a0deecc
JS
173 i = find_first_bit(p, mmax);
174 if (i == mmax)
175 BUG();
176 __clear_bit(i, p);
177 __set_bit(i, &bits);
178 if (idx)
179 *idx++ = i;
3eac2e95 180 }
9a0deecc
JS
181 return bits;
182}
183
9ca8e40c 184unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
9a0deecc
JS
185 char *cbmap)
186{
187 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
188 cbmap);
189}
190
9ca8e40c 191unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
9a0deecc
JS
192 char *dsmap)
193{
194 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
195 dsmap);
196}
197
198static void reserve_gru_resources(struct gru_state *gru,
199 struct gru_thread_state *gts)
200{
201 gru->gs_active_contexts++;
202 gts->ts_cbr_map =
9ca8e40c 203 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
9a0deecc
JS
204 gts->ts_cbr_idx);
205 gts->ts_dsr_map =
9ca8e40c 206 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
9a0deecc
JS
207}
208
209static void free_gru_resources(struct gru_state *gru,
210 struct gru_thread_state *gts)
211{
212 gru->gs_active_contexts--;
213 gru->gs_cbr_map |= gts->ts_cbr_map;
214 gru->gs_dsr_map |= gts->ts_dsr_map;
215}
216
217/*
218 * Check if a GRU has sufficient free resources to satisfy an allocation
219 * request. Note: GRU locks may or may not be held when this is called. If
220 * not held, recheck after acquiring the appropriate locks.
221 *
222 * Returns 1 if sufficient resources, 0 if not
223 */
224static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
225 int dsr_au_count, int max_active_contexts)
226{
227 return hweight64(gru->gs_cbr_map) >= cbr_au_count
228 && hweight64(gru->gs_dsr_map) >= dsr_au_count
229 && gru->gs_active_contexts < max_active_contexts;
230}
231
232/*
233 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
234 * context.
235 */
43884604
JS
236static int gru_load_mm_tracker(struct gru_state *gru,
237 struct gru_thread_state *gts)
9a0deecc 238{
43884604 239 struct gru_mm_struct *gms = gts->ts_gms;
9a0deecc 240 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
43884604 241 unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
9a0deecc
JS
242 int asid;
243
244 spin_lock(&gms->ms_asid_lock);
245 asid = asids->mt_asid;
246
87419412
JS
247 spin_lock(&gru->gs_asid_lock);
248 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
249 gru->gs_asid_gen)) {
9a0deecc
JS
250 asid = gru_assign_asid(gru);
251 asids->mt_asid = asid;
252 asids->mt_asid_gen = gru->gs_asid_gen;
253 STAT(asid_new);
254 } else {
255 STAT(asid_reuse);
256 }
87419412 257 spin_unlock(&gru->gs_asid_lock);
9a0deecc
JS
258
259 BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
260 asids->mt_ctxbitmap |= ctxbitmap;
261 if (!test_bit(gru->gs_gid, gms->ms_asidmap))
262 __set_bit(gru->gs_gid, gms->ms_asidmap);
263 spin_unlock(&gms->ms_asid_lock);
264
265 gru_dbg(grudev,
43884604
JS
266 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
267 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
268 gms->ms_asidmap[0]);
9a0deecc
JS
269 return asid;
270}
271
272static void gru_unload_mm_tracker(struct gru_state *gru,
43884604 273 struct gru_thread_state *gts)
9a0deecc 274{
43884604 275 struct gru_mm_struct *gms = gts->ts_gms;
9a0deecc
JS
276 struct gru_mm_tracker *asids;
277 unsigned short ctxbitmap;
278
279 asids = &gms->ms_asids[gru->gs_gid];
43884604 280 ctxbitmap = (1 << gts->ts_ctxnum);
9a0deecc 281 spin_lock(&gms->ms_asid_lock);
87419412 282 spin_lock(&gru->gs_asid_lock);
9a0deecc
JS
283 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
284 asids->mt_ctxbitmap ^= ctxbitmap;
43884604
JS
285 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
286 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
87419412 287 spin_unlock(&gru->gs_asid_lock);
9a0deecc
JS
288 spin_unlock(&gms->ms_asid_lock);
289}
290
291/*
292 * Decrement the reference count on a GTS structure. Free the structure
293 * if the reference count goes to zero.
294 */
295void gts_drop(struct gru_thread_state *gts)
296{
297 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
7f2251b1
JS
298 if (gts->ts_gms)
299 gru_drop_mmu_notifier(gts->ts_gms);
9a0deecc
JS
300 kfree(gts);
301 STAT(gts_free);
302 }
303}
304
305/*
306 * Locate the GTS structure for the current thread.
307 */
308static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
309 *vdata, int tsid)
310{
311 struct gru_thread_state *gts;
312
313 list_for_each_entry(gts, &vdata->vd_head, ts_next)
314 if (gts->ts_tsid == tsid)
315 return gts;
316 return NULL;
317}
318
319/*
320 * Allocate a thread state structure.
321 */
364b76df 322struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
c550222f
JS
323 int cbr_au_count, int dsr_au_count,
324 unsigned char tlb_preload_count, int options, int tsid)
9a0deecc
JS
325{
326 struct gru_thread_state *gts;
7f2251b1 327 struct gru_mm_struct *gms;
9a0deecc
JS
328 int bytes;
329
364b76df 330 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
9a0deecc 331 bytes += sizeof(struct gru_thread_state);
940229b9 332 gts = kmalloc(bytes, GFP_KERNEL);
9a0deecc 333 if (!gts)
7f2251b1 334 return ERR_PTR(-ENOMEM);
9a0deecc
JS
335
336 STAT(gts_alloc);
940229b9 337 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
9a0deecc
JS
338 atomic_set(&gts->ts_refcnt, 1);
339 mutex_init(&gts->ts_ctxlock);
364b76df
JS
340 gts->ts_cbr_au_count = cbr_au_count;
341 gts->ts_dsr_au_count = dsr_au_count;
c550222f 342 gts->ts_tlb_preload_count = tlb_preload_count;
364b76df 343 gts->ts_user_options = options;
518e5cd4
JS
344 gts->ts_user_blade_id = -1;
345 gts->ts_user_chiplet_id = -1;
9a0deecc 346 gts->ts_tsid = tsid;
9a0deecc 347 gts->ts_ctxnum = NULLCTX;
9a0deecc 348 gts->ts_tlb_int_select = -1;
b1b19fcf 349 gts->ts_cch_req_slice = -1;
7b8274e9 350 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
364b76df
JS
351 if (vma) {
352 gts->ts_mm = current->mm;
353 gts->ts_vma = vma;
7f2251b1
JS
354 gms = gru_register_mmu_notifier();
355 if (IS_ERR(gms))
364b76df 356 goto err;
7f2251b1 357 gts->ts_gms = gms;
364b76df 358 }
9a0deecc 359
364b76df 360 gru_dbg(grudev, "alloc gts %p\n", gts);
9a0deecc
JS
361 return gts;
362
363err:
364 gts_drop(gts);
7f2251b1 365 return ERR_CAST(gms);
9a0deecc
JS
366}
367
368/*
369 * Allocate a vma private data structure.
370 */
371struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
372{
373 struct gru_vma_data *vdata = NULL;
374
375 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
376 if (!vdata)
377 return NULL;
378
563447d7 379 STAT(vdata_alloc);
9a0deecc
JS
380 INIT_LIST_HEAD(&vdata->vd_head);
381 spin_lock_init(&vdata->vd_lock);
382 gru_dbg(grudev, "alloc vdata %p\n", vdata);
383 return vdata;
384}
385
386/*
387 * Find the thread state structure for the current thread.
388 */
389struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
390 int tsid)
391{
392 struct gru_vma_data *vdata = vma->vm_private_data;
393 struct gru_thread_state *gts;
394
395 spin_lock(&vdata->vd_lock);
396 gts = gru_find_current_gts_nolock(vdata, tsid);
397 spin_unlock(&vdata->vd_lock);
398 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
399 return gts;
400}
401
402/*
403 * Allocate a new thread state for a GSEG. Note that races may allow
404 * another thread to race to create a gts.
405 */
406struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
407 int tsid)
408{
409 struct gru_vma_data *vdata = vma->vm_private_data;
410 struct gru_thread_state *gts, *ngts;
411
c550222f
JS
412 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
413 vdata->vd_dsr_au_count,
414 vdata->vd_tlb_preload_count,
364b76df 415 vdata->vd_user_options, tsid);
7f2251b1
JS
416 if (IS_ERR(gts))
417 return gts;
9a0deecc
JS
418
419 spin_lock(&vdata->vd_lock);
420 ngts = gru_find_current_gts_nolock(vdata, tsid);
421 if (ngts) {
422 gts_drop(gts);
423 gts = ngts;
424 STAT(gts_double_allocate);
425 } else {
426 list_add(&gts->ts_next, &vdata->vd_head);
427 }
428 spin_unlock(&vdata->vd_lock);
429 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
430 return gts;
431}
432
433/*
434 * Free the GRU context assigned to the thread state.
435 */
436static void gru_free_gru_context(struct gru_thread_state *gts)
437{
438 struct gru_state *gru;
439
440 gru = gts->ts_gru;
43884604 441 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
9a0deecc
JS
442
443 spin_lock(&gru->gs_lock);
444 gru->gs_gts[gts->ts_ctxnum] = NULL;
445 free_gru_resources(gru, gts);
446 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
447 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
448 gts->ts_ctxnum = NULLCTX;
449 gts->ts_gru = NULL;
87419412 450 gts->ts_blade = -1;
9a0deecc
JS
451 spin_unlock(&gru->gs_lock);
452
453 gts_drop(gts);
454 STAT(free_context);
455}
456
457/*
458 * Prefetching cachelines help hardware performance.
9ca8e40c 459 * (Strictly a performance enhancement. Not functionally required).
9a0deecc
JS
460 */
461static void prefetch_data(void *p, int num, int stride)
462{
463 while (num-- > 0) {
464 prefetchw(p);
465 p += stride;
466 }
467}
468
469static inline long gru_copy_handle(void *d, void *s)
470{
471 memcpy(d, s, GRU_HANDLE_BYTES);
472 return GRU_HANDLE_BYTES;
473}
474
fe5bb6b0
JS
475static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
476 unsigned long cbrmap, unsigned long length)
9a0deecc 477{
9a0deecc
JS
478 int i, scr;
479
9a0deecc
JS
480 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
481 GRU_CACHE_LINE_BYTES);
482
9a0deecc
JS
483 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
484 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
485 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
486 GRU_CACHE_LINE_BYTES);
487 cb += GRU_HANDLE_STRIDE;
488 }
923f7f69
JS
489}
490
491static void gru_load_context_data(void *save, void *grubase, int ctxnum,
940229b9
JS
492 unsigned long cbrmap, unsigned long dsrmap,
493 int data_valid)
923f7f69
JS
494{
495 void *gseg, *cb, *cbe;
496 unsigned long length;
497 int i, scr;
9a0deecc 498
923f7f69 499 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
9a0deecc 500 cb = gseg + GRU_CB_BASE;
923f7f69
JS
501 cbe = grubase + GRU_CBE_BASE;
502 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
503 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
504
9a0deecc 505 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
940229b9
JS
506 if (data_valid) {
507 save += gru_copy_handle(cb, save);
508 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
509 save);
510 } else {
511 memset(cb, 0, GRU_CACHE_LINE_BYTES);
512 memset(cbe + i * GRU_HANDLE_STRIDE, 0,
513 GRU_CACHE_LINE_BYTES);
514 }
67bf04a5
JS
515 /* Flush CBE to hide race in context restart */
516 mb();
517 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
9a0deecc
JS
518 cb += GRU_HANDLE_STRIDE;
519 }
520
940229b9
JS
521 if (data_valid)
522 memcpy(gseg + GRU_DS_BASE, save, length);
523 else
524 memset(gseg + GRU_DS_BASE, 0, length);
9a0deecc
JS
525}
526
527static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
528 unsigned long cbrmap, unsigned long dsrmap)
529{
530 void *gseg, *cb, *cbe;
531 unsigned long length;
532 int i, scr;
533
534 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
9a0deecc
JS
535 cb = gseg + GRU_CB_BASE;
536 cbe = grubase + GRU_CBE_BASE;
923f7f69 537 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
67bf04a5
JS
538
539 /* CBEs may not be coherent. Flush them from cache */
540 for_each_cbr_in_allocation_map(i, &cbrmap, scr)
541 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
542 mb(); /* Let the CL flush complete */
543
923f7f69
JS
544 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
545
9a0deecc
JS
546 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
547 save += gru_copy_handle(save, cb);
548 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
549 cb += GRU_HANDLE_STRIDE;
550 }
9a0deecc
JS
551 memcpy(save, gseg + GRU_DS_BASE, length);
552}
553
554void gru_unload_context(struct gru_thread_state *gts, int savestate)
555{
556 struct gru_state *gru = gts->ts_gru;
557 struct gru_context_configuration_handle *cch;
558 int ctxnum = gts->ts_ctxnum;
559
836ce679
JS
560 if (!is_kernel_context(gts))
561 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
9a0deecc
JS
562 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
563
563447d7
JS
564 gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
565 gts, gts->ts_cbr_map, gts->ts_dsr_map);
9a0deecc
JS
566 lock_cch_handle(cch);
567 if (cch_interrupt_sync(cch))
568 BUG();
9a0deecc 569
836ce679
JS
570 if (!is_kernel_context(gts))
571 gru_unload_mm_tracker(gru, gts);
940229b9 572 if (savestate) {
9a0deecc
JS
573 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
574 ctxnum, gts->ts_cbr_map,
575 gts->ts_dsr_map);
940229b9
JS
576 gts->ts_data_valid = 1;
577 }
9a0deecc
JS
578
579 if (cch_deallocate(cch))
580 BUG();
9a0deecc
JS
581 unlock_cch_handle(cch);
582
583 gru_free_gru_context(gts);
9a0deecc
JS
584}
585
586/*
587 * Load a GRU context by copying it from the thread data structure in memory
588 * to the GRU.
589 */
d57c82b1 590void gru_load_context(struct gru_thread_state *gts)
9a0deecc
JS
591{
592 struct gru_state *gru = gts->ts_gru;
593 struct gru_context_configuration_handle *cch;
6e910074 594 int i, err, asid, ctxnum = gts->ts_ctxnum;
9a0deecc 595
9a0deecc 596 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
9a0deecc 597 lock_cch_handle(cch);
9a0deecc
JS
598 cch->tfm_fault_bit_enable =
599 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
600 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
601 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
602 if (cch->tlb_int_enable) {
603 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
604 cch->tlb_int_select = gts->ts_tlb_int_select;
605 }
b1b19fcf
JS
606 if (gts->ts_cch_req_slice >= 0) {
607 cch->req_slice_set_enable = 1;
608 cch->req_slice = gts->ts_cch_req_slice;
609 } else {
610 cch->req_slice_set_enable =0;
611 }
9a0deecc 612 cch->tfm_done_bit_enable = 0;
6e910074
JS
613 cch->dsr_allocation_map = gts->ts_dsr_map;
614 cch->cbr_allocation_map = gts->ts_cbr_map;
836ce679
JS
615
616 if (is_kernel_context(gts)) {
617 cch->unmap_enable = 1;
4a7a17c1
JS
618 cch->tfm_done_bit_enable = 1;
619 cch->cb_int_enable = 1;
4107e1d3 620 cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
836ce679
JS
621 } else {
622 cch->unmap_enable = 0;
4a7a17c1
JS
623 cch->tfm_done_bit_enable = 0;
624 cch->cb_int_enable = 0;
836ce679
JS
625 asid = gru_load_mm_tracker(gru, gts);
626 for (i = 0; i < 8; i++) {
627 cch->asid[i] = asid + i;
628 cch->sizeavail[i] = gts->ts_sizeavail;
629 }
6e910074
JS
630 }
631
632 err = cch_allocate(cch);
9a0deecc
JS
633 if (err) {
634 gru_dbg(grudev,
635 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
636 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
637 BUG();
638 }
639
640 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
940229b9 641 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
9a0deecc
JS
642
643 if (cch_start(cch))
644 BUG();
645 unlock_cch_handle(cch);
563447d7
JS
646
647 gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
648 gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
649 (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
9a0deecc
JS
650}
651
652/*
653 * Update fields in an active CCH:
654 * - retarget interrupts on local blade
7b8274e9 655 * - update sizeavail mask
9a0deecc 656 */
99f7c229 657int gru_update_cch(struct gru_thread_state *gts)
9a0deecc
JS
658{
659 struct gru_context_configuration_handle *cch;
660 struct gru_state *gru = gts->ts_gru;
661 int i, ctxnum = gts->ts_ctxnum, ret = 0;
662
663 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
664
665 lock_cch_handle(cch);
666 if (cch->state == CCHSTATE_ACTIVE) {
667 if (gru->gs_gts[gts->ts_ctxnum] != gts)
668 goto exit;
669 if (cch_interrupt(cch))
670 BUG();
99f7c229
JS
671 for (i = 0; i < 8; i++)
672 cch->sizeavail[i] = gts->ts_sizeavail;
673 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
674 cch->tlb_int_select = gru_cpu_fault_map_id();
675 cch->tfm_fault_bit_enable =
676 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
677 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
9a0deecc
JS
678 if (cch_start(cch))
679 BUG();
680 ret = 1;
681 }
682exit:
683 unlock_cch_handle(cch);
684 return ret;
685}
686
687/*
688 * Update CCH tlb interrupt select. Required when all the following is true:
689 * - task's GRU context is loaded into a GRU
690 * - task is using interrupt notification for TLB faults
691 * - task has migrated to a different cpu on the same blade where
692 * it was previously running.
693 */
694static int gru_retarget_intr(struct gru_thread_state *gts)
695{
696 if (gts->ts_tlb_int_select < 0
697 || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
698 return 0;
699
700 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
701 gru_cpu_fault_map_id());
99f7c229 702 return gru_update_cch(gts);
9a0deecc
JS
703}
704
41314790
JS
705/*
706 * Check if a GRU context is allowed to use a specific chiplet. By default
707 * a context is assigned to any blade-local chiplet. However, users can
708 * override this.
709 * Returns 1 if assignment allowed, 0 otherwise
710 */
711static int gru_check_chiplet_assignment(struct gru_state *gru,
712 struct gru_thread_state *gts)
713{
714 int blade_id;
715 int chiplet_id;
716
717 blade_id = gts->ts_user_blade_id;
718 if (blade_id < 0)
719 blade_id = uv_numa_blade_id();
720
721 chiplet_id = gts->ts_user_chiplet_id;
722 return gru->gs_blade_id == blade_id &&
723 (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
724}
725
55484c45
JS
726/*
727 * Unload the gru context if it is not assigned to the correct blade or
728 * chiplet. Misassignment can occur if the process migrates to a different
729 * blade or if the user changes the selected blade/chiplet.
55484c45
JS
730 */
731void gru_check_context_placement(struct gru_thread_state *gts)
732{
733 struct gru_state *gru;
55484c45
JS
734
735 /*
736 * If the current task is the context owner, verify that the
737 * context is correctly placed. This test is skipped for non-owner
738 * references. Pthread apps use non-owner references to the CBRs.
739 */
740 gru = gts->ts_gru;
741 if (!gru || gts->ts_tgid_owner != current->tgid)
742 return;
743
41314790 744 if (!gru_check_chiplet_assignment(gru, gts)) {
55484c45
JS
745 STAT(check_context_unload);
746 gru_unload_context(gts, 1);
747 } else if (gru_retarget_intr(gts)) {
748 STAT(check_context_retarget_intr);
749 }
750}
751
9a0deecc
JS
752
753/*
754 * Insufficient GRU resources available on the local blade. Steal a context from
755 * a process. This is a hack until a _real_ resource scheduler is written....
756 */
757#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
758#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
759 ((g)+1) : &(b)->bs_grus[0])
760
836ce679
JS
761static int is_gts_stealable(struct gru_thread_state *gts,
762 struct gru_blade_state *bs)
763{
764 if (is_kernel_context(gts))
765 return down_write_trylock(&bs->bs_kgts_sema);
766 else
767 return mutex_trylock(&gts->ts_ctxlock);
768}
769
770static void gts_stolen(struct gru_thread_state *gts,
771 struct gru_blade_state *bs)
772{
773 if (is_kernel_context(gts)) {
774 up_write(&bs->bs_kgts_sema);
775 STAT(steal_kernel_context);
776 } else {
777 mutex_unlock(&gts->ts_ctxlock);
778 STAT(steal_user_context);
779 }
780}
781
55484c45 782void gru_steal_context(struct gru_thread_state *gts)
9a0deecc
JS
783{
784 struct gru_blade_state *blade;
785 struct gru_state *gru, *gru0;
786 struct gru_thread_state *ngts = NULL;
787 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
41314790 788 int blade_id;
9a0deecc 789
41314790 790 blade_id = gts->ts_user_blade_id;
55484c45
JS
791 if (blade_id < 0)
792 blade_id = uv_numa_blade_id();
9a0deecc
JS
793 cbr = gts->ts_cbr_au_count;
794 dsr = gts->ts_dsr_au_count;
795
364b76df 796 blade = gru_base[blade_id];
9a0deecc
JS
797 spin_lock(&blade->bs_lock);
798
799 ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
800 gru = blade->bs_lru_gru;
801 if (ctxnum == 0)
802 gru = next_gru(blade, gru);
55484c45
JS
803 blade->bs_lru_gru = gru;
804 blade->bs_lru_ctxnum = ctxnum;
9a0deecc
JS
805 ctxnum0 = ctxnum;
806 gru0 = gru;
807 while (1) {
41314790 808 if (gru_check_chiplet_assignment(gru, gts)) {
55484c45 809 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
9a0deecc 810 break;
55484c45
JS
811 spin_lock(&gru->gs_lock);
812 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
813 if (flag && gru == gru0 && ctxnum == ctxnum0)
814 break;
815 ngts = gru->gs_gts[ctxnum];
816 /*
817 * We are grabbing locks out of order, so trylock is
818 * needed. GTSs are usually not locked, so the odds of
819 * success are high. If trylock fails, try to steal a
820 * different GSEG.
821 */
822 if (ngts && is_gts_stealable(ngts, blade))
823 break;
824 ngts = NULL;
825 }
826 spin_unlock(&gru->gs_lock);
827 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
9a0deecc 828 break;
9a0deecc 829 }
55484c45 830 if (flag && gru == gru0)
9a0deecc 831 break;
55484c45 832 flag = 1;
9a0deecc
JS
833 ctxnum = 0;
834 gru = next_gru(blade, gru);
835 }
9a0deecc 836 spin_unlock(&blade->bs_lock);
9a0deecc
JS
837
838 if (ngts) {
7e796a72 839 gts->ustats.context_stolen++;
9a0deecc 840 ngts->ts_steal_jiffies = jiffies;
836ce679
JS
841 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
842 gts_stolen(ngts, blade);
9a0deecc
JS
843 } else {
844 STAT(steal_context_failed);
845 }
846 gru_dbg(grudev,
43884604 847 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
9a0deecc
JS
848 " avail cb %ld, ds %ld\n",
849 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
850 hweight64(gru->gs_dsr_map));
851}
852
55484c45
JS
853/*
854 * Assign a gru context.
855 */
856static int gru_assign_context_number(struct gru_state *gru)
857{
858 int ctxnum;
859
860 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
861 __set_bit(ctxnum, &gru->gs_context_map);
862 return ctxnum;
863}
864
9a0deecc
JS
865/*
866 * Scan the GRUs on the local blade & assign a GRU context.
867 */
55484c45 868struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
9a0deecc
JS
869{
870 struct gru_state *gru, *grux;
871 int i, max_active_contexts;
55484c45 872 int blade_id = gts->ts_user_blade_id;
9a0deecc 873
55484c45
JS
874 if (blade_id < 0)
875 blade_id = uv_numa_blade_id();
9a0deecc
JS
876again:
877 gru = NULL;
878 max_active_contexts = GRU_NUM_CCH;
55484c45 879 for_each_gru_on_blade(grux, blade_id, i) {
41314790 880 if (!gru_check_chiplet_assignment(grux, gts))
55484c45 881 continue;
9a0deecc
JS
882 if (check_gru_resources(grux, gts->ts_cbr_au_count,
883 gts->ts_dsr_au_count,
884 max_active_contexts)) {
885 gru = grux;
886 max_active_contexts = grux->gs_active_contexts;
887 if (max_active_contexts == 0)
888 break;
889 }
890 }
891
892 if (gru) {
893 spin_lock(&gru->gs_lock);
894 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
895 gts->ts_dsr_au_count, GRU_NUM_CCH)) {
896 spin_unlock(&gru->gs_lock);
897 goto again;
898 }
899 reserve_gru_resources(gru, gts);
900 gts->ts_gru = gru;
87419412 901 gts->ts_blade = gru->gs_blade_id;
55484c45 902 gts->ts_ctxnum = gru_assign_context_number(gru);
9a0deecc
JS
903 atomic_inc(&gts->ts_refcnt);
904 gru->gs_gts[gts->ts_ctxnum] = gts;
9a0deecc
JS
905 spin_unlock(&gru->gs_lock);
906
907 STAT(assign_context);
908 gru_dbg(grudev,
43884604 909 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
9a0deecc
JS
910 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
911 gts->ts_gru->gs_gid, gts->ts_ctxnum,
912 gts->ts_cbr_au_count, gts->ts_dsr_au_count);
913 } else {
914 gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
915 STAT(assign_context_failed);
916 }
917
9a0deecc
JS
918 return gru;
919}
920
921/*
922 * gru_nopage
923 *
924 * Map the user's GRU segment
9ca8e40c
JS
925 *
926 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
9a0deecc
JS
927 */
928int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
929{
930 struct gru_thread_state *gts;
931 unsigned long paddr, vaddr;
932
933 vaddr = (unsigned long)vmf->virtual_address;
934 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
935 vma, vaddr, GSEG_BASE(vaddr));
936 STAT(nopfn);
937
9ca8e40c 938 /* The following check ensures vaddr is a valid address in the VMA */
9a0deecc
JS
939 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
940 if (!gts)
941 return VM_FAULT_SIGBUS;
942
943again:
9a0deecc 944 mutex_lock(&gts->ts_ctxlock);
fe5bb6b0 945 preempt_disable();
364b76df 946
55484c45 947 gru_check_context_placement(gts);
9a0deecc
JS
948
949 if (!gts->ts_gru) {
836ce679 950 STAT(load_user_context);
55484c45 951 if (!gru_assign_gru_context(gts)) {
9a0deecc 952 preempt_enable();
364b76df
JS
953 mutex_unlock(&gts->ts_ctxlock);
954 set_current_state(TASK_INTERRUPTIBLE);
9a0deecc
JS
955 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
956 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
55484c45 957 gru_steal_context(gts);
9a0deecc
JS
958 goto again;
959 }
960 gru_load_context(gts);
961 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
962 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
963 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
964 vma->vm_page_prot);
965 }
966
9a0deecc 967 preempt_enable();
364b76df 968 mutex_unlock(&gts->ts_ctxlock);
9a0deecc
JS
969
970 return VM_FAULT_NOPAGE;
971}
972