2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/err.h>
22 #include <asm/uv/uv_hub.h>
23 #if defined CONFIG_X86_64
24 #include <asm/uv/bios.h>
25 #include <asm/uv/uv_irq.h>
26 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
27 #include <asm/sn/intr.h>
28 #include <asm/sn/sn_sal.h>
30 #include "../sgi-gru/gru.h"
31 #include "../sgi-gru/grukservices.h"
34 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
35 struct uv_IO_APIC_route_entry {
49 static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
51 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
52 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
53 XPC_ACTIVATE_MSG_SIZE_UV)
54 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
56 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
57 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
58 XPC_NOTIFY_MSG_SIZE_UV)
59 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
61 static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
62 static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
65 xpc_setup_partitions_uv(void)
68 struct xpc_partition_uv *part_uv;
70 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
71 part_uv = &xpc_partitions[partid].sn.uv;
73 mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
74 spin_lock_init(&part_uv->flags_lock);
75 part_uv->remote_act_state = XPC_P_AS_INACTIVE;
81 xpc_teardown_partitions_uv(void)
84 struct xpc_partition_uv *part_uv;
85 unsigned long irq_flags;
87 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
88 part_uv = &xpc_partitions[partid].sn.uv;
90 if (part_uv->cached_activate_gru_mq_desc != NULL) {
91 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
92 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
93 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
94 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
95 kfree(part_uv->cached_activate_gru_mq_desc);
96 part_uv->cached_activate_gru_mq_desc = NULL;
97 mutex_unlock(&part_uv->
98 cached_activate_gru_mq_desc_mutex);
104 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
106 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
108 #if defined CONFIG_X86_64
109 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
112 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
117 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
119 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
120 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
121 mq->irq = SGI_XPC_ACTIVATE;
122 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
123 mq->irq = SGI_XPC_NOTIFY;
127 mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
128 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
130 #error not a supported configuration
137 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
139 #if defined CONFIG_X86_64
140 uv_teardown_irq(mq->irq);
142 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
144 unsigned long mmr_value;
146 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
147 mmr_value = 1UL << 16;
149 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
151 #error not a supported configuration
156 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
160 #if defined CONFIG_X86_64
161 ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
162 mq->order, &mq->mmr_offset);
164 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
168 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
169 ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
170 mq->order, &mq->mmr_offset);
172 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
177 #error not a supported configuration
180 mq->watchlist_num = ret;
185 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
189 #if defined CONFIG_X86_64
190 ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
191 BUG_ON(ret != BIOS_STATUS_SUCCESS);
192 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
193 ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
194 BUG_ON(ret != SALRET_OK);
196 #error not a supported configuration
200 static struct xpc_gru_mq_uv *
201 xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
202 irq_handler_t irq_handler)
204 enum xp_retval xp_ret;
209 struct xpc_gru_mq_uv *mq;
210 struct uv_IO_APIC_route_entry *mmr_value;
212 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
214 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
215 "a xpc_gru_mq_uv structure\n");
220 mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
222 if (mq->gru_mq_desc == NULL) {
223 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
224 "a gru_message_queue_desc structure\n");
229 pg_order = get_order(mq_size);
230 mq->order = pg_order + PAGE_SHIFT;
231 mq_size = 1UL << mq->order;
233 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
235 nid = cpu_to_node(cpu);
236 page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
239 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
240 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
244 mq->address = page_address(page);
246 /* enable generation of irq when GRU mq operation occurs to this mq */
247 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
251 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
255 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
257 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
262 mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
263 ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
264 nid, mmr_value->vector, mmr_value->dest);
266 dev_err(xpc_part, "gru_create_message_queue() returned "
272 /* allow other partitions to access this GRU mq */
273 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
274 if (xp_ret != xpSuccess) {
281 /* something went wrong */
283 free_irq(mq->irq, NULL);
285 xpc_release_gru_mq_irq_uv(mq);
287 xpc_gru_mq_watchlist_free_uv(mq);
289 free_pages((unsigned long)mq->address, pg_order);
291 kfree(mq->gru_mq_desc);
299 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
301 unsigned int mq_size;
305 /* disallow other partitions to access GRU mq */
306 mq_size = 1UL << mq->order;
307 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
308 BUG_ON(ret != xpSuccess);
310 /* unregister irq handler and release mq irq/vector mapping */
311 free_irq(mq->irq, NULL);
312 xpc_release_gru_mq_irq_uv(mq);
314 /* disable generation of irq when GRU mq op occurs to this mq */
315 xpc_gru_mq_watchlist_free_uv(mq);
317 pg_order = mq->order - PAGE_SHIFT;
318 free_pages((unsigned long)mq->address, pg_order);
323 static enum xp_retval
324 xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
327 enum xp_retval xp_ret;
331 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
337 if (ret == MQE_QUEUE_FULL) {
338 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
339 "error=MQE_QUEUE_FULL\n");
340 /* !!! handle QLimit reached; delay & try again */
341 /* ??? Do we add a limit to the number of retries? */
342 (void)msleep_interruptible(10);
343 } else if (ret == MQE_CONGESTION) {
344 dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
345 "error=MQE_CONGESTION\n");
346 /* !!! handle LB Overflow; simply try again */
347 /* ??? Do we add a limit to the number of retries? */
349 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
350 dev_err(xpc_chan, "gru_send_message_gpa() returned "
352 xp_ret = xpGruSendMqError;
360 xpc_process_activate_IRQ_rcvd_uv(void)
362 unsigned long irq_flags;
364 struct xpc_partition *part;
367 DBUG_ON(xpc_activate_IRQ_rcvd == 0);
369 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
370 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
371 part = &xpc_partitions[partid];
373 if (part->sn.uv.act_state_req == 0)
376 xpc_activate_IRQ_rcvd--;
377 BUG_ON(xpc_activate_IRQ_rcvd < 0);
379 act_state_req = part->sn.uv.act_state_req;
380 part->sn.uv.act_state_req = 0;
381 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
383 if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
384 if (part->act_state == XPC_P_AS_INACTIVE)
385 xpc_activate_partition(part);
386 else if (part->act_state == XPC_P_AS_DEACTIVATING)
387 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
389 } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
390 if (part->act_state == XPC_P_AS_INACTIVE)
391 xpc_activate_partition(part);
393 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
395 } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
396 XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
402 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
403 if (xpc_activate_IRQ_rcvd == 0)
406 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
411 xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
412 struct xpc_activate_mq_msghdr_uv *msg_hdr,
413 int *wakeup_hb_checker)
415 unsigned long irq_flags;
416 struct xpc_partition_uv *part_uv = &part->sn.uv;
417 struct xpc_openclose_args *args;
419 part_uv->remote_act_state = msg_hdr->act_state;
421 switch (msg_hdr->type) {
422 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
423 /* syncing of remote_act_state was just done above */
426 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
427 struct xpc_activate_mq_msg_activate_req_uv *msg;
430 * ??? Do we deal here with ts_jiffies being different
431 * ??? if act_state != XPC_P_AS_INACTIVE instead of
434 msg = container_of(msg_hdr, struct
435 xpc_activate_mq_msg_activate_req_uv, hdr);
437 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
438 if (part_uv->act_state_req == 0)
439 xpc_activate_IRQ_rcvd++;
440 part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
441 part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
442 part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
443 part_uv->heartbeat_gpa = msg->heartbeat_gpa;
445 if (msg->activate_gru_mq_desc_gpa !=
446 part_uv->activate_gru_mq_desc_gpa) {
447 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
448 part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
449 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
450 part_uv->activate_gru_mq_desc_gpa =
451 msg->activate_gru_mq_desc_gpa;
453 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
455 (*wakeup_hb_checker)++;
458 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
459 struct xpc_activate_mq_msg_deactivate_req_uv *msg;
461 msg = container_of(msg_hdr, struct
462 xpc_activate_mq_msg_deactivate_req_uv, hdr);
464 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
465 if (part_uv->act_state_req == 0)
466 xpc_activate_IRQ_rcvd++;
467 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
468 part_uv->reason = msg->reason;
469 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
471 (*wakeup_hb_checker)++;
474 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
475 struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
477 msg = container_of(msg_hdr, struct
478 xpc_activate_mq_msg_chctl_closerequest_uv,
480 args = &part->remote_openclose_args[msg->ch_number];
481 args->reason = msg->reason;
483 spin_lock_irqsave(&part->chctl_lock, irq_flags);
484 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
485 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
487 xpc_wakeup_channel_mgr(part);
490 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
491 struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
493 msg = container_of(msg_hdr, struct
494 xpc_activate_mq_msg_chctl_closereply_uv,
497 spin_lock_irqsave(&part->chctl_lock, irq_flags);
498 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
499 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
501 xpc_wakeup_channel_mgr(part);
504 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
505 struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
507 msg = container_of(msg_hdr, struct
508 xpc_activate_mq_msg_chctl_openrequest_uv,
510 args = &part->remote_openclose_args[msg->ch_number];
511 args->entry_size = msg->entry_size;
512 args->local_nentries = msg->local_nentries;
514 spin_lock_irqsave(&part->chctl_lock, irq_flags);
515 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
516 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
518 xpc_wakeup_channel_mgr(part);
521 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
522 struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
524 msg = container_of(msg_hdr, struct
525 xpc_activate_mq_msg_chctl_openreply_uv, hdr);
526 args = &part->remote_openclose_args[msg->ch_number];
527 args->remote_nentries = msg->remote_nentries;
528 args->local_nentries = msg->local_nentries;
529 args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
531 spin_lock_irqsave(&part->chctl_lock, irq_flags);
532 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
533 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
535 xpc_wakeup_channel_mgr(part);
538 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
539 struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
541 msg = container_of(msg_hdr, struct
542 xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
543 spin_lock_irqsave(&part->chctl_lock, irq_flags);
544 part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
545 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
547 xpc_wakeup_channel_mgr(part);
549 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
550 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
551 part_uv->flags |= XPC_P_ENGAGED_UV;
552 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
555 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
556 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
557 part_uv->flags &= ~XPC_P_ENGAGED_UV;
558 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
562 dev_err(xpc_part, "received unknown activate_mq msg type=%d "
563 "from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
565 /* get hb checker to deactivate from the remote partition */
566 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
567 if (part_uv->act_state_req == 0)
568 xpc_activate_IRQ_rcvd++;
569 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
570 part_uv->reason = xpBadMsgType;
571 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
573 (*wakeup_hb_checker)++;
577 if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
578 part->remote_rp_ts_jiffies != 0) {
580 * ??? Does what we do here need to be sensitive to
581 * ??? act_state or remote_act_state?
583 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
584 if (part_uv->act_state_req == 0)
585 xpc_activate_IRQ_rcvd++;
586 part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
587 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
589 (*wakeup_hb_checker)++;
594 xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
596 struct xpc_activate_mq_msghdr_uv *msg_hdr;
598 struct xpc_partition *part;
599 int wakeup_hb_checker = 0;
603 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
607 partid = msg_hdr->partid;
608 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
609 dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
610 "received invalid partid=0x%x in message\n",
613 part = &xpc_partitions[partid];
615 part_referenced = xpc_part_ref(part);
616 xpc_handle_activate_mq_msg_uv(part, msg_hdr,
619 xpc_part_deref(part);
622 gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
625 if (wakeup_hb_checker)
626 wake_up_interruptible(&xpc_activate_IRQ_wq);
631 static enum xp_retval
632 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
633 unsigned long gru_mq_desc_gpa)
637 ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
638 sizeof(struct gru_message_queue_desc));
639 if (ret == xpSuccess)
640 gru_mq_desc->mq = NULL;
645 static enum xp_retval
646 xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
649 struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
650 struct xpc_partition_uv *part_uv = &part->sn.uv;
651 struct gru_message_queue_desc *gru_mq_desc;
652 unsigned long irq_flags;
655 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
657 msg_hdr->type = msg_type;
658 msg_hdr->partid = xp_partition_id;
659 msg_hdr->act_state = part->act_state;
660 msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
662 mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
664 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
665 gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
666 if (gru_mq_desc == NULL) {
667 gru_mq_desc = kmalloc(sizeof(struct
668 gru_message_queue_desc),
670 if (gru_mq_desc == NULL) {
674 part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
677 ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
679 activate_gru_mq_desc_gpa);
680 if (ret != xpSuccess)
683 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
684 part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
685 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
688 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
689 ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
691 if (ret != xpSuccess) {
692 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
693 if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
697 mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
702 xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
703 size_t msg_size, int msg_type)
707 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
708 if (unlikely(ret != xpSuccess))
709 XPC_DEACTIVATE_PARTITION(part, ret);
713 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
714 void *msg, size_t msg_size, int msg_type)
716 struct xpc_partition *part = &xpc_partitions[ch->partid];
719 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
720 if (unlikely(ret != xpSuccess)) {
721 if (irq_flags != NULL)
722 spin_unlock_irqrestore(&ch->lock, *irq_flags);
724 XPC_DEACTIVATE_PARTITION(part, ret);
726 if (irq_flags != NULL)
727 spin_lock_irqsave(&ch->lock, *irq_flags);
732 xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
734 unsigned long irq_flags;
735 struct xpc_partition_uv *part_uv = &part->sn.uv;
738 * !!! Make our side think that the remote partition sent an activate
739 * !!! mq message our way by doing what the activate IRQ handler would
740 * !!! do had one really been sent.
743 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
744 if (part_uv->act_state_req == 0)
745 xpc_activate_IRQ_rcvd++;
746 part_uv->act_state_req = act_state_req;
747 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
749 wake_up_interruptible(&xpc_activate_IRQ_wq);
752 static enum xp_retval
753 xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
759 #if defined CONFIG_X86_64
760 status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
762 if (status == BIOS_STATUS_SUCCESS)
764 else if (status == BIOS_STATUS_MORE_PASSES)
765 ret = xpNeedMoreInfo;
769 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
770 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
771 if (status == SALRET_OK)
773 else if (status == SALRET_MORE_PASSES)
774 ret = xpNeedMoreInfo;
779 #error not a supported configuration
786 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
789 &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
790 rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
791 rp->sn.uv.activate_gru_mq_desc_gpa =
792 uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
797 xpc_allow_hb_uv(short partid)
802 xpc_disallow_hb_uv(short partid)
807 xpc_disallow_all_hbs_uv(void)
812 xpc_increment_heartbeat_uv(void)
814 xpc_heartbeat_uv->value++;
818 xpc_offline_heartbeat_uv(void)
820 xpc_increment_heartbeat_uv();
821 xpc_heartbeat_uv->offline = 1;
825 xpc_online_heartbeat_uv(void)
827 xpc_increment_heartbeat_uv();
828 xpc_heartbeat_uv->offline = 0;
832 xpc_heartbeat_init_uv(void)
834 xpc_heartbeat_uv->value = 1;
835 xpc_heartbeat_uv->offline = 0;
839 xpc_heartbeat_exit_uv(void)
841 xpc_offline_heartbeat_uv();
844 static enum xp_retval
845 xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
847 struct xpc_partition_uv *part_uv = &part->sn.uv;
850 ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
851 part_uv->heartbeat_gpa,
852 sizeof(struct xpc_heartbeat_uv));
853 if (ret != xpSuccess)
856 if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
857 !part_uv->cached_heartbeat.offline) {
861 part->last_heartbeat = part_uv->cached_heartbeat.value;
867 xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
868 unsigned long remote_rp_gpa, int nasid)
870 short partid = remote_rp->SAL_partid;
871 struct xpc_partition *part = &xpc_partitions[partid];
872 struct xpc_activate_mq_msg_activate_req_uv msg;
874 part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
875 part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
876 part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
877 part->sn.uv.activate_gru_mq_desc_gpa =
878 remote_rp->sn.uv.activate_gru_mq_desc_gpa;
881 * ??? Is it a good idea to make this conditional on what is
882 * ??? potentially stale state information?
884 if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
885 msg.rp_gpa = uv_gpa(xpc_rsvd_page);
886 msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
887 msg.activate_gru_mq_desc_gpa =
888 xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
889 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
890 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
893 if (part->act_state == XPC_P_AS_INACTIVE)
894 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
898 xpc_request_partition_reactivation_uv(struct xpc_partition *part)
900 xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
904 xpc_request_partition_deactivation_uv(struct xpc_partition *part)
906 struct xpc_activate_mq_msg_deactivate_req_uv msg;
909 * ??? Is it a good idea to make this conditional on what is
910 * ??? potentially stale state information?
912 if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
913 part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
915 msg.reason = part->reason;
916 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
917 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
922 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
924 /* nothing needs to be done */
929 xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
933 spin_lock_init(&head->lock);
938 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
940 unsigned long irq_flags;
941 struct xpc_fifo_entry_uv *first;
943 spin_lock_irqsave(&head->lock, irq_flags);
945 if (head->first != NULL) {
946 head->first = first->next;
947 if (head->first == NULL)
951 BUG_ON(head->n_entries < 0);
952 spin_unlock_irqrestore(&head->lock, irq_flags);
958 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
959 struct xpc_fifo_entry_uv *last)
961 unsigned long irq_flags;
964 spin_lock_irqsave(&head->lock, irq_flags);
965 if (head->last != NULL)
966 head->last->next = last;
971 spin_unlock_irqrestore(&head->lock, irq_flags);
975 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
977 return head->n_entries;
981 * Setup the channel structures that are uv specific.
983 static enum xp_retval
984 xpc_setup_ch_structures_uv(struct xpc_partition *part)
986 struct xpc_channel_uv *ch_uv;
989 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
990 ch_uv = &part->channels[ch_number].sn.uv;
992 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
993 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1000 * Teardown the channel structures that are uv specific.
1003 xpc_teardown_ch_structures_uv(struct xpc_partition *part)
1005 /* nothing needs to be done */
1009 static enum xp_retval
1010 xpc_make_first_contact_uv(struct xpc_partition *part)
1012 struct xpc_activate_mq_msg_uv msg;
1015 * We send a sync msg to get the remote partition's remote_act_state
1016 * updated to our current act_state which at this point should
1017 * be XPC_P_AS_ACTIVATING.
1019 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1020 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
1022 while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
1024 dev_dbg(xpc_part, "waiting to make first contact with "
1025 "partition %d\n", XPC_PARTID(part));
1027 /* wait a 1/4 of a second or so */
1028 (void)msleep_interruptible(250);
1030 if (part->act_state == XPC_P_AS_DEACTIVATING)
1031 return part->reason;
1038 xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
1040 unsigned long irq_flags;
1041 union xpc_channel_ctl_flags chctl;
1043 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1044 chctl = part->chctl;
1045 if (chctl.all_flags != 0)
1046 part->chctl.all_flags = 0;
1048 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1049 return chctl.all_flags;
1052 static enum xp_retval
1053 xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
1055 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1056 struct xpc_send_msg_slot_uv *msg_slot;
1057 unsigned long irq_flags;
1062 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1063 nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
1064 ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1065 if (ch_uv->send_msg_slots == NULL)
1068 for (entry = 0; entry < nentries; entry++) {
1069 msg_slot = &ch_uv->send_msg_slots[entry];
1071 msg_slot->msg_slot_number = entry;
1072 xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
1076 spin_lock_irqsave(&ch->lock, irq_flags);
1077 if (nentries < ch->local_nentries)
1078 ch->local_nentries = nentries;
1079 spin_unlock_irqrestore(&ch->lock, irq_flags);
1086 static enum xp_retval
1087 xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
1089 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1090 struct xpc_notify_mq_msg_uv *msg_slot;
1091 unsigned long irq_flags;
1096 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1097 nbytes = nentries * ch->entry_size;
1098 ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
1099 if (ch_uv->recv_msg_slots == NULL)
1102 for (entry = 0; entry < nentries; entry++) {
1103 msg_slot = ch_uv->recv_msg_slots +
1104 entry * ch->entry_size;
1106 msg_slot->hdr.msg_slot_number = entry;
1109 spin_lock_irqsave(&ch->lock, irq_flags);
1110 if (nentries < ch->remote_nentries)
1111 ch->remote_nentries = nentries;
1112 spin_unlock_irqrestore(&ch->lock, irq_flags);
1120 * Allocate msg_slots associated with the channel.
1122 static enum xp_retval
1123 xpc_setup_msg_structures_uv(struct xpc_channel *ch)
1125 static enum xp_retval ret;
1126 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1128 DBUG_ON(ch->flags & XPC_C_SETUP);
1130 ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
1131 gru_message_queue_desc),
1133 if (ch_uv->cached_notify_gru_mq_desc == NULL)
1136 ret = xpc_allocate_send_msg_slot_uv(ch);
1137 if (ret == xpSuccess) {
1139 ret = xpc_allocate_recv_msg_slot_uv(ch);
1140 if (ret != xpSuccess) {
1141 kfree(ch_uv->send_msg_slots);
1142 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1149 * Free up msg_slots and clear other stuff that were setup for the specified
1153 xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
1155 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1157 DBUG_ON(!spin_is_locked(&ch->lock));
1159 kfree(ch_uv->cached_notify_gru_mq_desc);
1160 ch_uv->cached_notify_gru_mq_desc = NULL;
1162 if (ch->flags & XPC_C_SETUP) {
1163 xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
1164 kfree(ch_uv->send_msg_slots);
1165 xpc_init_fifo_uv(&ch_uv->recv_msg_list);
1166 kfree(ch_uv->recv_msg_slots);
1171 xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1173 struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
1175 msg.ch_number = ch->number;
1176 msg.reason = ch->reason;
1177 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1178 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
1182 xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1184 struct xpc_activate_mq_msg_chctl_closereply_uv msg;
1186 msg.ch_number = ch->number;
1187 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1188 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
1192 xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1194 struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
1196 msg.ch_number = ch->number;
1197 msg.entry_size = ch->entry_size;
1198 msg.local_nentries = ch->local_nentries;
1199 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1200 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
1204 xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1206 struct xpc_activate_mq_msg_chctl_openreply_uv msg;
1208 msg.ch_number = ch->number;
1209 msg.local_nentries = ch->local_nentries;
1210 msg.remote_nentries = ch->remote_nentries;
1211 msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
1212 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1213 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
1217 xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
1219 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
1221 msg.ch_number = ch->number;
1222 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
1223 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
1227 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
1229 unsigned long irq_flags;
1231 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1232 part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
1233 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1235 xpc_wakeup_channel_mgr(part);
1238 static enum xp_retval
1239 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
1240 unsigned long gru_mq_desc_gpa)
1242 struct xpc_channel_uv *ch_uv = &ch->sn.uv;
1244 DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
1245 return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
1250 xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
1252 struct xpc_activate_mq_msg_uv msg;
1254 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1255 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
1259 xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
1261 struct xpc_activate_mq_msg_uv msg;
1263 xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
1264 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
1268 xpc_assume_partition_disengaged_uv(short partid)
1270 struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
1271 unsigned long irq_flags;
1273 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
1274 part_uv->flags &= ~XPC_P_ENGAGED_UV;
1275 spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
1279 xpc_partition_engaged_uv(short partid)
1281 return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
1285 xpc_any_partition_engaged_uv(void)
1287 struct xpc_partition_uv *part_uv;
1290 for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
1291 part_uv = &xpc_partitions[partid].sn.uv;
1292 if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
1298 static enum xp_retval
1299 xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
1300 struct xpc_send_msg_slot_uv **address_of_msg_slot)
1303 struct xpc_send_msg_slot_uv *msg_slot;
1304 struct xpc_fifo_entry_uv *entry;
1307 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
1311 if (flags & XPC_NOWAIT)
1314 ret = xpc_allocate_msg_wait(ch);
1315 if (ret != xpInterrupted && ret != xpTimeout)
1319 msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
1320 *address_of_msg_slot = msg_slot;
1325 xpc_free_msg_slot_uv(struct xpc_channel *ch,
1326 struct xpc_send_msg_slot_uv *msg_slot)
1328 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
1330 /* wakeup anyone waiting for a free msg slot */
1331 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1332 wake_up(&ch->msg_allocate_wq);
1336 xpc_notify_sender_uv(struct xpc_channel *ch,
1337 struct xpc_send_msg_slot_uv *msg_slot,
1338 enum xp_retval reason)
1340 xpc_notify_func func = msg_slot->func;
1342 if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
1344 atomic_dec(&ch->n_to_notify);
1346 dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
1347 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1348 msg_slot->msg_slot_number, ch->partid, ch->number);
1350 func(reason, ch->partid, ch->number, msg_slot->key);
1352 dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
1353 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
1354 msg_slot->msg_slot_number, ch->partid, ch->number);
1359 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
1360 struct xpc_notify_mq_msg_uv *msg)
1362 struct xpc_send_msg_slot_uv *msg_slot;
1363 int entry = msg->hdr.msg_slot_number % ch->local_nentries;
1365 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1367 BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
1368 msg_slot->msg_slot_number += ch->local_nentries;
1370 if (msg_slot->func != NULL)
1371 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
1373 xpc_free_msg_slot_uv(ch, msg_slot);
1377 xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
1378 struct xpc_notify_mq_msg_uv *msg)
1380 struct xpc_partition_uv *part_uv = &part->sn.uv;
1381 struct xpc_channel *ch;
1382 struct xpc_channel_uv *ch_uv;
1383 struct xpc_notify_mq_msg_uv *msg_slot;
1384 unsigned long irq_flags;
1385 int ch_number = msg->hdr.ch_number;
1387 if (unlikely(ch_number >= part->nchannels)) {
1388 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
1389 "channel number=0x%x in message from partid=%d\n",
1390 ch_number, XPC_PARTID(part));
1392 /* get hb checker to deactivate from the remote partition */
1393 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1394 if (part_uv->act_state_req == 0)
1395 xpc_activate_IRQ_rcvd++;
1396 part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
1397 part_uv->reason = xpBadChannelNumber;
1398 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
1400 wake_up_interruptible(&xpc_activate_IRQ_wq);
1404 ch = &part->channels[ch_number];
1405 xpc_msgqueue_ref(ch);
1407 if (!(ch->flags & XPC_C_CONNECTED)) {
1408 xpc_msgqueue_deref(ch);
1412 /* see if we're really dealing with an ACK for a previously sent msg */
1413 if (msg->hdr.size == 0) {
1414 xpc_handle_notify_mq_ack_uv(ch, msg);
1415 xpc_msgqueue_deref(ch);
1419 /* we're dealing with a normal message sent via the notify_mq */
1422 msg_slot = ch_uv->recv_msg_slots +
1423 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
1425 BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
1426 BUG_ON(msg_slot->hdr.size != 0);
1428 memcpy(msg_slot, msg, msg->hdr.size);
1430 xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
1432 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
1434 * If there is an existing idle kthread get it to deliver
1435 * the payload, otherwise we'll have to get the channel mgr
1436 * for this partition to create a kthread to do the delivery.
1438 if (atomic_read(&ch->kthreads_idle) > 0)
1439 wake_up_nr(&ch->idle_wq, 1);
1441 xpc_send_chctl_local_msgrequest_uv(part, ch->number);
1443 xpc_msgqueue_deref(ch);
1447 xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
1449 struct xpc_notify_mq_msg_uv *msg;
1451 struct xpc_partition *part;
1453 while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
1456 partid = msg->hdr.partid;
1457 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
1458 dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
1459 "invalid partid=0x%x in message\n", partid);
1461 part = &xpc_partitions[partid];
1463 if (xpc_part_ref(part)) {
1464 xpc_handle_notify_mq_msg_uv(part, msg);
1465 xpc_part_deref(part);
1469 gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
1476 xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
1478 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
1482 xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
1484 struct xpc_channel *ch = &part->channels[ch_number];
1485 int ndeliverable_payloads;
1487 xpc_msgqueue_ref(ch);
1489 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
1491 if (ndeliverable_payloads > 0 &&
1492 (ch->flags & XPC_C_CONNECTED) &&
1493 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
1495 xpc_activate_kthreads(ch, ndeliverable_payloads);
1498 xpc_msgqueue_deref(ch);
1501 static enum xp_retval
1502 xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
1503 u16 payload_size, u8 notify_type, xpc_notify_func func,
1506 enum xp_retval ret = xpSuccess;
1507 struct xpc_send_msg_slot_uv *msg_slot = NULL;
1508 struct xpc_notify_mq_msg_uv *msg;
1509 u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
1512 DBUG_ON(notify_type != XPC_N_CALL);
1514 msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
1515 if (msg_size > ch->entry_size)
1516 return xpPayloadTooBig;
1518 xpc_msgqueue_ref(ch);
1520 if (ch->flags & XPC_C_DISCONNECTING) {
1524 if (!(ch->flags & XPC_C_CONNECTED)) {
1525 ret = xpNotConnected;
1529 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
1530 if (ret != xpSuccess)
1534 atomic_inc(&ch->n_to_notify);
1536 msg_slot->key = key;
1537 smp_wmb(); /* a non-NULL func must hit memory after the key */
1538 msg_slot->func = func;
1540 if (ch->flags & XPC_C_DISCONNECTING) {
1546 msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
1547 msg->hdr.partid = xp_partition_id;
1548 msg->hdr.ch_number = ch->number;
1549 msg->hdr.size = msg_size;
1550 msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
1551 memcpy(&msg->payload, payload, payload_size);
1553 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1555 if (ret == xpSuccess)
1558 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1562 * Try to NULL the msg_slot's func field. If we fail, then
1563 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1564 * case we need to pretend we succeeded to send the message
1565 * since the user will get a callout for the disconnect error
1566 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1567 * error returned here will confuse them. Additionally, since
1568 * in this case the channel is being disconnected we don't need
1569 * to put the the msg_slot back on the free list.
1571 if (cmpxchg(&msg_slot->func, func, NULL) != func) {
1576 msg_slot->key = NULL;
1577 atomic_dec(&ch->n_to_notify);
1579 xpc_free_msg_slot_uv(ch, msg_slot);
1581 xpc_msgqueue_deref(ch);
1586 * Tell the callers of xpc_send_notify() that the status of their payloads
1587 * is unknown because the channel is now disconnecting.
1589 * We don't worry about putting these msg_slots on the free list since the
1590 * msg_slots themselves are about to be kfree'd.
1593 xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
1595 struct xpc_send_msg_slot_uv *msg_slot;
1598 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
1600 for (entry = 0; entry < ch->local_nentries; entry++) {
1602 if (atomic_read(&ch->n_to_notify) == 0)
1605 msg_slot = &ch->sn.uv.send_msg_slots[entry];
1606 if (msg_slot->func != NULL)
1607 xpc_notify_sender_uv(ch, msg_slot, ch->reason);
1612 * Get the next deliverable message's payload.
1615 xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
1617 struct xpc_fifo_entry_uv *entry;
1618 struct xpc_notify_mq_msg_uv *msg;
1619 void *payload = NULL;
1621 if (!(ch->flags & XPC_C_DISCONNECTING)) {
1622 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
1623 if (entry != NULL) {
1624 msg = container_of(entry, struct xpc_notify_mq_msg_uv,
1626 payload = &msg->payload;
1633 xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
1635 struct xpc_notify_mq_msg_uv *msg;
1638 msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
1640 /* return an ACK to the sender of this message */
1642 msg->hdr.partid = xp_partition_id;
1643 msg->hdr.size = 0; /* size of zero indicates this is an ACK */
1645 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
1646 sizeof(struct xpc_notify_mq_msghdr_uv));
1647 if (ret != xpSuccess)
1648 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
1650 msg->hdr.msg_slot_number += ch->remote_nentries;
1653 static struct xpc_arch_operations xpc_arch_ops_uv = {
1654 .setup_partitions = xpc_setup_partitions_uv,
1655 .teardown_partitions = xpc_teardown_partitions_uv,
1656 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
1657 .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
1658 .setup_rsvd_page = xpc_setup_rsvd_page_uv,
1660 .allow_hb = xpc_allow_hb_uv,
1661 .disallow_hb = xpc_disallow_hb_uv,
1662 .disallow_all_hbs = xpc_disallow_all_hbs_uv,
1663 .increment_heartbeat = xpc_increment_heartbeat_uv,
1664 .offline_heartbeat = xpc_offline_heartbeat_uv,
1665 .online_heartbeat = xpc_online_heartbeat_uv,
1666 .heartbeat_init = xpc_heartbeat_init_uv,
1667 .heartbeat_exit = xpc_heartbeat_exit_uv,
1668 .get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
1670 .request_partition_activation =
1671 xpc_request_partition_activation_uv,
1672 .request_partition_reactivation =
1673 xpc_request_partition_reactivation_uv,
1674 .request_partition_deactivation =
1675 xpc_request_partition_deactivation_uv,
1676 .cancel_partition_deactivation_request =
1677 xpc_cancel_partition_deactivation_request_uv,
1679 .setup_ch_structures = xpc_setup_ch_structures_uv,
1680 .teardown_ch_structures = xpc_teardown_ch_structures_uv,
1682 .make_first_contact = xpc_make_first_contact_uv,
1684 .get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
1685 .send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
1686 .send_chctl_closereply = xpc_send_chctl_closereply_uv,
1687 .send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
1688 .send_chctl_openreply = xpc_send_chctl_openreply_uv,
1689 .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
1690 .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
1692 .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
1694 .setup_msg_structures = xpc_setup_msg_structures_uv,
1695 .teardown_msg_structures = xpc_teardown_msg_structures_uv,
1697 .indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
1698 .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
1699 .assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
1700 .partition_engaged = xpc_partition_engaged_uv,
1701 .any_partition_engaged = xpc_any_partition_engaged_uv,
1703 .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
1704 .send_payload = xpc_send_payload_uv,
1705 .get_deliverable_payload = xpc_get_deliverable_payload_uv,
1706 .received_payload = xpc_received_payload_uv,
1707 .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
1713 xpc_arch_ops = xpc_arch_ops_uv;
1715 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
1716 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1717 XPC_MSG_HDR_MAX_SIZE);
1721 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
1722 XPC_ACTIVATE_IRQ_NAME,
1723 xpc_handle_activate_IRQ_uv);
1724 if (IS_ERR(xpc_activate_mq_uv))
1725 return PTR_ERR(xpc_activate_mq_uv);
1727 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
1728 XPC_NOTIFY_IRQ_NAME,
1729 xpc_handle_notify_IRQ_uv);
1730 if (IS_ERR(xpc_notify_mq_uv)) {
1731 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1732 return PTR_ERR(xpc_notify_mq_uv);
1741 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1742 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);