2 * SN Platform GRU Driver
4 * KERNEL SERVICES THAT USE THE GRU
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/spinlock.h>
29 #include <linux/device.h>
30 #include <linux/miscdevice.h>
31 #include <linux/proc_fs.h>
32 #include <linux/interrupt.h>
33 #include <linux/uaccess.h>
34 #include <linux/delay.h>
37 #include "grutables.h"
38 #include "grukservices.h"
39 #include "gru_instructions.h"
40 #include <asm/uv/uv_hub.h>
45 * The following is an interim algorithm for management of kernel GRU
46 * resources. This will likely be replaced when we better understand the
47 * kernel/user requirements.
49 * Blade percpu resources reserved for kernel use. These resources are
50 * reserved whenever the the kernel context for the blade is loaded. Note
51 * that the kernel context is not guaranteed to be always available. It is
52 * loaded on demand & can be stolen by a user if the user demand exceeds the
53 * kernel demand. The kernel can always reload the kernel context but
54 * a SLEEP may be required!!!.
56 #define GRU_NUM_KERNEL_CBR 1
57 #define GRU_NUM_KERNEL_DSR_BYTES 256
58 #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
61 /* GRU instruction attributes for all instructions */
62 #define IMA IMA_CB_DELAY
64 /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
65 #define __gru_cacheline_aligned__ \
66 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
68 #define MAGIC 0x1234567887654321UL
70 /* Default retry count for GRU errors on kernel instructions */
71 #define EXCEPTION_RETRY_LIMIT 3
73 /* Status of message queue sections */
78 /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
79 /* optimized for x86_64 */
80 struct message_queue {
81 union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
82 int qlines; /* DW 1 */
84 void *next __gru_cacheline_aligned__;/* CL 1 */
88 char data ____cacheline_aligned; /* CL 2 */
91 /* First word in every message - used by mesq interface */
92 struct message_header {
99 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
102 * Allocate a kernel context (GTS) for the specified blade.
103 * - protected by writelock on bs_kgts_sema.
105 static void gru_alloc_kernel_context(struct gru_blade_state *bs, int blade_id)
107 int cbr_au_count, dsr_au_count, ncpus;
109 ncpus = uv_blade_nr_possible_cpus(blade_id);
110 cbr_au_count = GRU_CB_COUNT_TO_AU(GRU_NUM_KERNEL_CBR * ncpus);
111 dsr_au_count = GRU_DS_BYTES_TO_AU(GRU_NUM_KERNEL_DSR_BYTES * ncpus);
112 bs->bs_kgts = gru_alloc_gts(NULL, cbr_au_count, dsr_au_count, 0, 0);
116 * Reload the blade's kernel context into a GRU chiplet. Called holding
117 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
119 static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
121 struct gru_state *gru;
122 struct gru_thread_state *kgts;
126 up_read(&bs->bs_kgts_sema);
127 down_write(&bs->bs_kgts_sema);
130 gru_alloc_kernel_context(bs, blade_id);
134 STAT(load_kernel_context);
135 while (!gru_assign_gru_context(kgts, blade_id)) {
137 gru_steal_context(kgts, blade_id);
139 gru_load_context(kgts);
140 gru = bs->bs_kgts->ts_gru;
141 vaddr = gru->gs_gru_base_vaddr;
142 ctxnum = kgts->ts_ctxnum;
143 bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
144 bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
146 downgrade_write(&bs->bs_kgts_sema);
150 * Lock & load the kernel context for the specified blade.
152 static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
154 struct gru_blade_state *bs;
156 STAT(lock_kernel_context);
157 bs = gru_base[blade_id];
159 down_read(&bs->bs_kgts_sema);
160 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
161 gru_load_kernel_context(bs, blade_id);
167 * Unlock the kernel context for the specified blade. Context is not
168 * unloaded but may be stolen before next use.
170 static void gru_unlock_kernel_context(int blade_id)
172 struct gru_blade_state *bs;
174 bs = gru_base[blade_id];
175 up_read(&bs->bs_kgts_sema);
176 STAT(unlock_kernel_context);
180 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
181 * - returns with preemption disabled
183 static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
185 struct gru_blade_state *bs;
188 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
190 bs = gru_lock_kernel_context(uv_numa_blade_id());
191 lcpu = uv_blade_processor_id();
192 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
193 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
198 * Free the current cpus reserved DSR/CBR resources.
200 static void gru_free_cpu_resources(void *cb, void *dsr)
202 gru_unlock_kernel_context(uv_numa_blade_id());
206 /*----------------------------------------------------------------------*/
207 int gru_get_cb_exception_detail(void *cb,
208 struct control_block_extended_exc_detail *excdet)
210 struct gru_control_block_extended *cbe;
212 cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
213 prefetchw(cbe); /* Harmless on hardware, required for emulator */
214 excdet->opc = cbe->opccpy;
215 excdet->exopc = cbe->exopccpy;
216 excdet->ecause = cbe->ecause;
217 excdet->exceptdet0 = cbe->idef1upd;
218 excdet->exceptdet1 = cbe->idef3upd;
222 char *gru_get_cb_exception_detail_str(int ret, void *cb,
225 struct gru_control_block_status *gen = (void *)cb;
226 struct control_block_extended_exc_detail excdet;
228 if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
229 gru_get_cb_exception_detail(cb, &excdet);
231 "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
232 "excdet0 0x%lx, excdet1 0x%x",
233 gen, excdet.opc, excdet.exopc, excdet.ecause,
234 excdet.exceptdet0, excdet.exceptdet1);
236 snprintf(buf, size, "No exception");
241 static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
243 while (gen->istatus >= CBS_ACTIVE) {
250 static int gru_retry_exception(void *cb)
252 struct gru_control_block_status *gen = (void *)cb;
253 struct control_block_extended_exc_detail excdet;
254 int retry = EXCEPTION_RETRY_LIMIT;
257 if (gru_get_cb_message_queue_substatus(cb))
259 if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
262 gru_get_cb_exception_detail(cb, &excdet);
263 if (excdet.ecause & ~EXCEPTION_RETRY_BITS)
268 gru_flush_cache(gen);
270 return CBS_EXCEPTION;
273 int gru_check_status_proc(void *cb)
275 struct gru_control_block_status *gen = (void *)cb;
279 if (ret != CBS_EXCEPTION)
281 return gru_retry_exception(cb);
285 int gru_wait_proc(void *cb)
287 struct gru_control_block_status *gen = (void *)cb;
290 ret = gru_wait_idle_or_exception(gen);
291 if (ret == CBS_EXCEPTION)
292 ret = gru_retry_exception(cb);
297 void gru_abort(int ret, void *cb, char *str)
299 char buf[GRU_EXC_STR_SIZE];
301 panic("GRU FATAL ERROR: %s - %s\n", str,
302 gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
305 void gru_wait_abort_proc(void *cb)
309 ret = gru_wait_proc(cb);
311 gru_abort(ret, cb, "gru_wait_abort");
315 /*------------------------------ MESSAGE QUEUES -----------------------------*/
317 /* Internal status . These are NOT returned to the user. */
318 #define MQIE_AGAIN -1 /* try again */
322 * Save/restore the "present" flag that is in the second line of 2-line
325 static inline int get_present2(void *p)
327 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
328 return mhdr->present;
331 static inline void restore_present2(void *p, int val)
333 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
338 * Create a message queue.
339 * qlines - message queue size in cache lines. Includes 2-line header.
341 int gru_create_message_queue(struct gru_message_queue_desc *mqd,
342 void *p, unsigned int bytes, int nasid, int vector, int apicid)
344 struct message_queue *mq = p;
347 qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
348 memset(mq, 0, bytes);
349 mq->start = &mq->data;
350 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
351 mq->next = &mq->data;
352 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
356 mq->head = gru_mesq_head(2, qlines / 2 + 1);
358 mqd->mq_gpa = uv_gpa(mq);
359 mqd->qlines = qlines;
360 mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
361 mqd->interrupt_vector = vector;
362 mqd->interrupt_apicid = apicid;
365 EXPORT_SYMBOL_GPL(gru_create_message_queue);
368 * Send a NOOP message to a message queue
370 * 0 - if queue is full after the send. This is the normal case
371 * but various races can change this.
372 * -1 - if mesq sent successfully but queue not full
373 * >0 - unexpected error. MQE_xxx returned
375 static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
378 const struct message_header noop_header = {
379 .present = MQS_NOOP, .lines = 1};
382 struct message_header save_mhdr, *mhdr = mesg;
387 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
391 substatus = gru_get_cb_message_queue_substatus(cb);
394 STAT(mesq_noop_unexpected_error);
395 ret = MQE_UNEXPECTED_CB_ERR;
397 case CBSS_LB_OVERFLOWED:
398 STAT(mesq_noop_lb_overflow);
399 ret = MQE_CONGESTION;
401 case CBSS_QLIMIT_REACHED:
402 STAT(mesq_noop_qlimit_reached);
405 case CBSS_AMO_NACKED:
406 STAT(mesq_noop_amo_nacked);
407 ret = MQE_CONGESTION;
409 case CBSS_PUT_NACKED:
410 STAT(mesq_noop_put_nacked);
411 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
412 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
414 if (gru_wait(cb) == CBS_IDLE)
417 ret = MQE_UNEXPECTED_CB_ERR;
419 case CBSS_PAGE_OVERFLOW:
429 * Handle a gru_mesq full.
431 static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
432 void *mesg, int lines)
434 union gru_mesqhead mqh;
435 unsigned int limit, head;
436 unsigned long avalue;
439 /* Determine if switching to first/second half of q */
440 avalue = gru_get_amo_value(cb);
441 head = gru_get_amo_value_head(cb);
442 limit = gru_get_amo_value_limit(cb);
444 qlines = mqd->qlines;
445 half = (limit != qlines);
448 mqh = gru_mesq_head(qlines / 2 + 1, qlines);
450 mqh = gru_mesq_head(2, qlines / 2 + 1);
452 /* Try to get lock for switching head pointer */
453 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
454 if (gru_wait(cb) != CBS_IDLE)
456 if (!gru_get_amo_value(cb)) {
457 STAT(mesq_qf_locked);
458 return MQE_QUEUE_FULL;
461 /* Got the lock. Send optional NOP if queue not full, */
463 if (send_noop_message(cb, mqd, mesg)) {
464 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
466 if (gru_wait(cb) != CBS_IDLE)
468 STAT(mesq_qf_noop_not_full);
474 /* Then flip queuehead to other half of queue. */
475 gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
477 if (gru_wait(cb) != CBS_IDLE)
480 /* If not successfully in swapping queue head, clear the hstatus lock */
481 if (gru_get_amo_value(cb) != avalue) {
482 STAT(mesq_qf_switch_head_failed);
483 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
485 if (gru_wait(cb) != CBS_IDLE)
490 STAT(mesq_qf_unexpected_error);
491 return MQE_UNEXPECTED_CB_ERR;
495 * Send a cross-partition interrupt to the SSI that contains the target
496 * message queue. Normally, the interrupt is automatically delivered by hardware
497 * but some error conditions require explicit delivery.
499 static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
501 if (mqd->interrupt_vector)
502 uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
503 mqd->interrupt_vector);
507 * Handle a PUT failure. Note: if message was a 2-line message, one of the
508 * lines might have successfully have been written. Before sending the
509 * message, "present" must be cleared in BOTH lines to prevent the receiver
510 * from prematurely seeing the full message.
512 static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
513 void *mesg, int lines)
517 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
519 gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
520 if (gru_wait(cb) != CBS_IDLE)
521 return MQE_UNEXPECTED_CB_ERR;
523 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
524 if (gru_wait(cb) != CBS_IDLE)
525 return MQE_UNEXPECTED_CB_ERR;
526 send_message_queue_interrupt(mqd);
531 * Handle a gru_mesq failure. Some of these failures are software recoverable
534 static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
535 void *mesg, int lines)
537 int substatus, ret = 0;
539 substatus = gru_get_cb_message_queue_substatus(cb);
542 STAT(mesq_send_unexpected_error);
543 ret = MQE_UNEXPECTED_CB_ERR;
545 case CBSS_LB_OVERFLOWED:
546 STAT(mesq_send_lb_overflow);
547 ret = MQE_CONGESTION;
549 case CBSS_QLIMIT_REACHED:
550 STAT(mesq_send_qlimit_reached);
551 ret = send_message_queue_full(cb, mqd, mesg, lines);
553 case CBSS_AMO_NACKED:
554 STAT(mesq_send_amo_nacked);
555 ret = MQE_CONGESTION;
557 case CBSS_PUT_NACKED:
558 STAT(mesq_send_put_nacked);
559 ret = send_message_put_nacked(cb, mqd, mesg, lines);
568 * Send a message to a message queue
569 * mqd message queue descriptor
570 * mesg message. ust be vaddr within a GSEG
571 * bytes message size (<= 2 CL)
573 int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
576 struct message_header *mhdr;
579 int istatus, clines, ret;
582 BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
584 clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
585 if (gru_get_cpu_resources(bytes, &cb, &dsr))
586 return MQE_BUG_NO_RESOURCES;
587 memcpy(dsr, mesg, bytes);
589 mhdr->present = MQS_FULL;
590 mhdr->lines = clines;
592 mhdr->present2 = get_present2(mhdr);
593 restore_present2(mhdr, MQS_FULL);
598 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
599 istatus = gru_wait(cb);
600 if (istatus != CBS_IDLE)
601 ret = send_message_failure(cb, mqd, dsr, clines);
602 } while (ret == MQIE_AGAIN);
603 gru_free_cpu_resources(cb, dsr);
606 STAT(mesq_send_failed);
609 EXPORT_SYMBOL_GPL(gru_send_message_gpa);
612 * Advance the receive pointer for the queue to the next message.
614 void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
616 struct message_queue *mq = mqd->mq;
617 struct message_header *mhdr = mq->next;
620 int lines = mhdr->lines;
623 restore_present2(mhdr, MQS_EMPTY);
624 mhdr->present = MQS_EMPTY;
627 next = pnext + GRU_CACHE_LINE_BYTES * lines;
628 if (next == mq->limit) {
631 } else if (pnext < mq->start2 && next >= mq->start2) {
636 mq->hstatus[half] = 1;
639 EXPORT_SYMBOL_GPL(gru_free_message);
642 * Get next message from message queue. Return NULL if no message
643 * present. User must call next_message() to move to next message.
646 void *gru_get_next_message(struct gru_message_queue_desc *mqd)
648 struct message_queue *mq = mqd->mq;
649 struct message_header *mhdr = mq->next;
650 int present = mhdr->present;
652 /* skip NOOP messages */
654 while (present == MQS_NOOP) {
655 gru_free_message(mqd, mhdr);
657 present = mhdr->present;
660 /* Wait for both halves of 2 line messages */
661 if (present == MQS_FULL && mhdr->lines == 2 &&
662 get_present2(mhdr) == MQS_EMPTY)
666 STAT(mesq_receive_none);
670 if (mhdr->lines == 2)
671 restore_present2(mhdr, mhdr->present2);
675 EXPORT_SYMBOL_GPL(gru_get_next_message);
677 /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
680 * Copy a block of data using the GRU resources
682 int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
690 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
691 return MQE_BUG_NO_RESOURCES;
692 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
693 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
695 gru_free_cpu_resources(cb, dsr);
698 EXPORT_SYMBOL_GPL(gru_copy_gpa);
700 /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
701 /* Temp - will delete after we gain confidence in the GRU */
711 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
712 return MQE_BUG_NO_RESOURCES;
717 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
718 if (gru_wait(cb) != CBS_IDLE)
723 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
724 if (gru_wait(cb) != CBS_IDLE)
726 gru_free_cpu_resources(cb, dsr);
728 if (word0 != word1 || word1 != MAGIC) {
730 ("GRU quicktest err: found 0x%lx, expected 0x%lx\n",
732 BUG(); /* ZZZ should not be fatal */
739 int gru_kservices_init(struct gru_state *gru)
741 struct gru_blade_state *bs;
744 if (gru != &bs->bs_grus[0])
747 init_rwsem(&bs->bs_kgts_sema);
749 if (gru_options & GRU_QUICKLOOK)
754 void gru_kservices_exit(struct gru_state *gru)
756 struct gru_blade_state *bs;
757 struct gru_thread_state *kgts;
760 if (gru != &bs->bs_grus[0])
764 if (kgts && kgts->ts_gru)
765 gru_unload_context(kgts, 0);