powerpc/powernv: Add queue mechanism for early messages
authorDeb McLemore <debmc@linux.vnet.ibm.com>
Mon, 21 May 2018 02:04:38 +0000 (21:04 -0500)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 11 Oct 2019 08:42:06 +0000 (19:42 +1100)
When issuing a BMC soft poweroff during IPL, the poweroff can be lost
so the machine would not poweroff.

This is because opal messages can be received before the opal-power
code registered its notifiers.

Fix it by buffering messages. If we receive a message and do not yet
have a handler for that type, store the message and replay when a
handler for that type is registered.

Signed-off-by: Deb McLemore <debmc@linux.vnet.ibm.com>
[mpe: Single unlock path in opal_message_notifier_register(), tweak
      comments/formatting and change log.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1526868278-4204-1-git-send-email-debmc@linux.vnet.ibm.com
arch/powerpc/platforms/powernv/opal.c

index 38e90270280bf62420f6793d2760088ea4cbb999..b1d064842da404bc5c49fc56993fe7cf24d2ff65 100644 (file)
 
 #include "powernv.h"
 
+#define OPAL_MSG_QUEUE_MAX 16
+
+struct opal_msg_node {
+       struct list_head        list;
+       struct opal_msg         msg;
+};
+
+static DEFINE_SPINLOCK(msg_list_lock);
+static LIST_HEAD(msg_list);
+
 /* /sys/firmware/opal */
 struct kobject *opal_kobj;
 
@@ -50,6 +60,8 @@ struct mcheck_recoverable_range {
        u64 recover_addr;
 };
 
+static int msg_list_size;
+
 static struct mcheck_recoverable_range *mc_recoverable_range;
 static int mc_recoverable_range_len;
 
@@ -237,6 +249,43 @@ static int __init opal_register_exception_handlers(void)
 }
 machine_early_initcall(powernv, opal_register_exception_handlers);
 
+static void queue_replay_msg(void *msg)
+{
+       struct opal_msg_node *msg_node;
+
+       if (msg_list_size < OPAL_MSG_QUEUE_MAX) {
+               msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
+               if (msg_node) {
+                       INIT_LIST_HEAD(&msg_node->list);
+                       memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
+                       list_add_tail(&msg_node->list, &msg_list);
+                       msg_list_size++;
+               } else
+                       pr_warn_once("message queue no memory\n");
+
+               if (msg_list_size >= OPAL_MSG_QUEUE_MAX)
+                       pr_warn_once("message queue full\n");
+       }
+}
+
+static void dequeue_replay_msg(enum opal_msg_type msg_type)
+{
+       struct opal_msg_node *msg_node, *tmp;
+
+       list_for_each_entry_safe(msg_node, tmp, &msg_list, list) {
+               if (be32_to_cpu(msg_node->msg.msg_type) != msg_type)
+                       continue;
+
+               atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
+                                       msg_type,
+                                       &msg_node->msg);
+
+               list_del(&msg_node->list);
+               kfree(msg_node);
+               msg_list_size--;
+       }
+}
+
 /*
  * Opal message notifier based on message type. Allow subscribers to get
  * notified for specific messgae type.
@@ -244,14 +293,30 @@ machine_early_initcall(powernv, opal_register_exception_handlers);
 int opal_message_notifier_register(enum opal_msg_type msg_type,
                                        struct notifier_block *nb)
 {
+       int ret;
+       unsigned long flags;
+
        if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
                pr_warn("%s: Invalid arguments, msg_type:%d\n",
                        __func__, msg_type);
                return -EINVAL;
        }
 
-       return atomic_notifier_chain_register(
-                               &opal_msg_notifier_head[msg_type], nb);
+       spin_lock_irqsave(&msg_list_lock, flags);
+       ret = atomic_notifier_chain_register(
+               &opal_msg_notifier_head[msg_type], nb);
+
+       /*
+        * If the registration succeeded, replay any queued messages that came
+        * in prior to the notifier chain registration. msg_list_lock held here
+        * to ensure they're delivered prior to any subsequent messages.
+        */
+       if (ret == 0)
+               dequeue_replay_msg(msg_type);
+
+       spin_unlock_irqrestore(&msg_list_lock, flags);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(opal_message_notifier_register);
 
@@ -265,6 +330,23 @@ EXPORT_SYMBOL_GPL(opal_message_notifier_unregister);
 
 static void opal_message_do_notify(uint32_t msg_type, void *msg)
 {
+       unsigned long flags;
+       bool queued = false;
+
+       spin_lock_irqsave(&msg_list_lock, flags);
+       if (opal_msg_notifier_head[msg_type].head == NULL) {
+               /*
+                * Queue up the msg since no notifiers have registered
+                * yet for this msg_type.
+                */
+               queue_replay_msg(msg);
+               queued = true;
+       }
+       spin_unlock_irqrestore(&msg_list_lock, flags);
+
+       if (queued)
+               return;
+
        /* notify subscribers */
        atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
                                        msg_type, msg);