1 // SPDX-License-Identifier: GPL-2.0+
5 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
8 * Author: MontaVista Software, Inc.
9 * Corey Minyard <minyard@mvista.com>
12 * Copyright 2002 MontaVista Software Inc.
13 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
17 * This file holds the "policy" for the interface to the SMI state
18 * machine. It does the configuration, handles timers and interrupts,
19 * and drives the real SMI state machine.
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/sched.h>
25 #include <linux/seq_file.h>
26 #include <linux/timer.h>
27 #include <linux/errno.h>
28 #include <linux/spinlock.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/list.h>
32 #include <linux/notifier.h>
33 #include <linux/mutex.h>
34 #include <linux/kthread.h>
36 #include <linux/interrupt.h>
37 #include <linux/rcupdate.h>
38 #include <linux/ipmi.h>
39 #include <linux/ipmi_smi.h>
41 #include <linux/string.h>
42 #include <linux/ctype.h>
44 #define PFX "ipmi_si: "
46 /* Measure times between events in the driver. */
49 /* Call every 10 ms. */
50 #define SI_TIMEOUT_TIME_USEC 10000
51 #define SI_USEC_PER_JIFFY (1000000/HZ)
52 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
53 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
64 /* FIXME - add watchdog stuff. */
67 /* Some BT-specific defines we need here. */
68 #define IPMI_BT_INTMASK_REG 2
69 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
70 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
72 static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
74 static int initialized;
77 * Indexes into stats[] in smi_info below.
79 enum si_stat_indexes {
81 * Number of times the driver requested a timer while an operation
84 SI_STAT_short_timeouts = 0,
87 * Number of times the driver requested a timer while nothing was in
90 SI_STAT_long_timeouts,
92 /* Number of times the interface was idle while being polled. */
95 /* Number of interrupts the driver handled. */
98 /* Number of time the driver got an ATTN from the hardware. */
101 /* Number of times the driver requested flags from the hardware. */
102 SI_STAT_flag_fetches,
104 /* Number of times the hardware didn't follow the state machine. */
107 /* Number of completed messages. */
108 SI_STAT_complete_transactions,
110 /* Number of IPMI events received from the hardware. */
113 /* Number of watchdog pretimeouts. */
114 SI_STAT_watchdog_pretimeouts,
116 /* Number of asynchronous messages received. */
117 SI_STAT_incoming_messages,
120 /* This *must* remain last, add new values above this. */
126 struct ipmi_smi *intf;
127 struct si_sm_data *si_sm;
128 const struct si_sm_handlers *handlers;
130 struct ipmi_smi_msg *waiting_msg;
131 struct ipmi_smi_msg *curr_msg;
132 enum si_intf_state si_state;
135 * Used to handle the various types of I/O that can occur with
141 * Per-OEM handler, called from handle_flags(). Returns 1
142 * when handle_flags() needs to be re-run or 0 indicating it
143 * set si_state itself.
145 int (*oem_data_avail_handler)(struct smi_info *smi_info);
148 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
149 * is set to hold the flags until we are done handling everything
152 #define RECEIVE_MSG_AVAIL 0x01
153 #define EVENT_MSG_BUFFER_FULL 0x02
154 #define WDT_PRE_TIMEOUT_INT 0x08
155 #define OEM0_DATA_AVAIL 0x20
156 #define OEM1_DATA_AVAIL 0x40
157 #define OEM2_DATA_AVAIL 0x80
158 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
161 unsigned char msg_flags;
163 /* Does the BMC have an event buffer? */
164 bool has_event_buffer;
167 * If set to true, this will request events the next time the
168 * state machine is idle.
173 * If true, run the state machine to completion on every send
174 * call. Generally used after a panic to make sure stuff goes
177 bool run_to_completion;
179 /* The timer for this si. */
180 struct timer_list si_timer;
182 /* This flag is set, if the timer can be set */
183 bool timer_can_start;
185 /* This flag is set, if the timer is running (timer_pending() isn't enough) */
188 /* The time (in jiffies) the last timeout occurred at. */
189 unsigned long last_timeout_jiffies;
191 /* Are we waiting for the events, pretimeouts, received msgs? */
195 * The driver will disable interrupts when it gets into a
196 * situation where it cannot handle messages due to lack of
197 * memory. Once that situation clears up, it will re-enable
200 bool interrupt_disabled;
203 * Does the BMC support events?
205 bool supports_event_msg_buff;
208 * Can we disable interrupts the global enables receive irq
209 * bit? There are currently two forms of brokenness, some
210 * systems cannot disable the bit (which is technically within
211 * the spec but a bad idea) and some systems have the bit
212 * forced to zero even though interrupts work (which is
213 * clearly outside the spec). The next bool tells which form
214 * of brokenness is present.
216 bool cannot_disable_irq;
219 * Some systems are broken and cannot set the irq enable
220 * bit, even if they support interrupts.
222 bool irq_enable_broken;
225 * Did we get an attention that we did not handle?
229 /* From the get device id response... */
230 struct ipmi_device_id device_id;
232 /* Default driver model device. */
233 struct platform_device *pdev;
235 /* Have we added the device group to the device? */
236 bool dev_group_added;
238 /* Have we added the platform device? */
239 bool pdev_registered;
241 /* Counters and things for the proc filesystem. */
242 atomic_t stats[SI_NUM_STATS];
244 struct task_struct *thread;
246 struct list_head link;
249 #define smi_inc_stat(smi, stat) \
250 atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
251 #define smi_get_stat(smi, stat) \
252 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
254 #define IPMI_MAX_INTFS 4
255 static int force_kipmid[IPMI_MAX_INTFS];
256 static int num_force_kipmid;
258 static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
259 static int num_max_busy_us;
261 static bool unload_when_empty = true;
263 static int try_smi_init(struct smi_info *smi);
264 static void shutdown_one_si(struct smi_info *smi_info);
265 static void cleanup_one_si(struct smi_info *smi_info);
266 static void cleanup_ipmi_si(void);
269 void debug_timestamp(char *msg)
273 getnstimeofday64(&t);
274 pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
277 #define debug_timestamp(x)
280 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
281 static int register_xaction_notifier(struct notifier_block *nb)
283 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
286 static void deliver_recv_msg(struct smi_info *smi_info,
287 struct ipmi_smi_msg *msg)
289 /* Deliver the message to the upper layer. */
290 ipmi_smi_msg_received(smi_info->intf, msg);
293 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
295 struct ipmi_smi_msg *msg = smi_info->curr_msg;
297 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
298 cCode = IPMI_ERR_UNSPECIFIED;
299 /* else use it as is */
301 /* Make it a response */
302 msg->rsp[0] = msg->data[0] | 4;
303 msg->rsp[1] = msg->data[1];
307 smi_info->curr_msg = NULL;
308 deliver_recv_msg(smi_info, msg);
311 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
315 if (!smi_info->waiting_msg) {
316 smi_info->curr_msg = NULL;
321 smi_info->curr_msg = smi_info->waiting_msg;
322 smi_info->waiting_msg = NULL;
323 debug_timestamp("Start2");
324 err = atomic_notifier_call_chain(&xaction_notifier_list,
326 if (err & NOTIFY_STOP_MASK) {
327 rv = SI_SM_CALL_WITHOUT_DELAY;
330 err = smi_info->handlers->start_transaction(
332 smi_info->curr_msg->data,
333 smi_info->curr_msg->data_size);
335 return_hosed_msg(smi_info, err);
337 rv = SI_SM_CALL_WITHOUT_DELAY;
343 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
345 if (!smi_info->timer_can_start)
347 smi_info->last_timeout_jiffies = jiffies;
348 mod_timer(&smi_info->si_timer, new_val);
349 smi_info->timer_running = true;
353 * Start a new message and (re)start the timer and thread.
355 static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
358 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
360 if (smi_info->thread)
361 wake_up_process(smi_info->thread);
363 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
366 static void start_check_enables(struct smi_info *smi_info)
368 unsigned char msg[2];
370 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
371 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
373 start_new_msg(smi_info, msg, 2);
374 smi_info->si_state = SI_CHECKING_ENABLES;
377 static void start_clear_flags(struct smi_info *smi_info)
379 unsigned char msg[3];
381 /* Make sure the watchdog pre-timeout flag is not set at startup. */
382 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
383 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
384 msg[2] = WDT_PRE_TIMEOUT_INT;
386 start_new_msg(smi_info, msg, 3);
387 smi_info->si_state = SI_CLEARING_FLAGS;
390 static void start_getting_msg_queue(struct smi_info *smi_info)
392 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
393 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
394 smi_info->curr_msg->data_size = 2;
396 start_new_msg(smi_info, smi_info->curr_msg->data,
397 smi_info->curr_msg->data_size);
398 smi_info->si_state = SI_GETTING_MESSAGES;
401 static void start_getting_events(struct smi_info *smi_info)
403 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
404 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
405 smi_info->curr_msg->data_size = 2;
407 start_new_msg(smi_info, smi_info->curr_msg->data,
408 smi_info->curr_msg->data_size);
409 smi_info->si_state = SI_GETTING_EVENTS;
413 * When we have a situtaion where we run out of memory and cannot
414 * allocate messages, we just leave them in the BMC and run the system
415 * polled until we can allocate some memory. Once we have some
416 * memory, we will re-enable the interrupt.
418 * Note that we cannot just use disable_irq(), since the interrupt may
421 static inline bool disable_si_irq(struct smi_info *smi_info)
423 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
424 smi_info->interrupt_disabled = true;
425 start_check_enables(smi_info);
431 static inline bool enable_si_irq(struct smi_info *smi_info)
433 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
434 smi_info->interrupt_disabled = false;
435 start_check_enables(smi_info);
442 * Allocate a message. If unable to allocate, start the interrupt
443 * disable process and return NULL. If able to allocate but
444 * interrupts are disabled, free the message and return NULL after
445 * starting the interrupt enable process.
447 static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
449 struct ipmi_smi_msg *msg;
451 msg = ipmi_alloc_smi_msg();
453 if (!disable_si_irq(smi_info))
454 smi_info->si_state = SI_NORMAL;
455 } else if (enable_si_irq(smi_info)) {
456 ipmi_free_smi_msg(msg);
462 static void handle_flags(struct smi_info *smi_info)
465 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
466 /* Watchdog pre-timeout */
467 smi_inc_stat(smi_info, watchdog_pretimeouts);
469 start_clear_flags(smi_info);
470 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
471 ipmi_smi_watchdog_pretimeout(smi_info->intf);
472 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
473 /* Messages available. */
474 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
475 if (!smi_info->curr_msg)
478 start_getting_msg_queue(smi_info);
479 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
480 /* Events available. */
481 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
482 if (!smi_info->curr_msg)
485 start_getting_events(smi_info);
486 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
487 smi_info->oem_data_avail_handler) {
488 if (smi_info->oem_data_avail_handler(smi_info))
491 smi_info->si_state = SI_NORMAL;
495 * Global enables we care about.
497 #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
498 IPMI_BMC_EVT_MSG_INTR)
500 static u8 current_global_enables(struct smi_info *smi_info, u8 base,
505 if (smi_info->supports_event_msg_buff)
506 enables |= IPMI_BMC_EVT_MSG_BUFF;
508 if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
509 smi_info->cannot_disable_irq) &&
510 !smi_info->irq_enable_broken)
511 enables |= IPMI_BMC_RCV_MSG_INTR;
513 if (smi_info->supports_event_msg_buff &&
514 smi_info->io.irq && !smi_info->interrupt_disabled &&
515 !smi_info->irq_enable_broken)
516 enables |= IPMI_BMC_EVT_MSG_INTR;
518 *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
523 static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
525 u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
527 irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
529 if ((bool)irqstate == irq_on)
533 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
534 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
536 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
539 static void handle_transaction_done(struct smi_info *smi_info)
541 struct ipmi_smi_msg *msg;
543 debug_timestamp("Done");
544 switch (smi_info->si_state) {
546 if (!smi_info->curr_msg)
549 smi_info->curr_msg->rsp_size
550 = smi_info->handlers->get_result(
552 smi_info->curr_msg->rsp,
553 IPMI_MAX_MSG_LENGTH);
556 * Do this here becase deliver_recv_msg() releases the
557 * lock, and a new message can be put in during the
558 * time the lock is released.
560 msg = smi_info->curr_msg;
561 smi_info->curr_msg = NULL;
562 deliver_recv_msg(smi_info, msg);
565 case SI_GETTING_FLAGS:
567 unsigned char msg[4];
570 /* We got the flags from the SMI, now handle them. */
571 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
573 /* Error fetching flags, just give up for now. */
574 smi_info->si_state = SI_NORMAL;
575 } else if (len < 4) {
577 * Hmm, no flags. That's technically illegal, but
578 * don't use uninitialized data.
580 smi_info->si_state = SI_NORMAL;
582 smi_info->msg_flags = msg[3];
583 handle_flags(smi_info);
588 case SI_CLEARING_FLAGS:
590 unsigned char msg[3];
592 /* We cleared the flags. */
593 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
595 /* Error clearing flags */
596 dev_warn(smi_info->io.dev,
597 "Error clearing flags: %2.2x\n", msg[2]);
599 smi_info->si_state = SI_NORMAL;
603 case SI_GETTING_EVENTS:
605 smi_info->curr_msg->rsp_size
606 = smi_info->handlers->get_result(
608 smi_info->curr_msg->rsp,
609 IPMI_MAX_MSG_LENGTH);
612 * Do this here becase deliver_recv_msg() releases the
613 * lock, and a new message can be put in during the
614 * time the lock is released.
616 msg = smi_info->curr_msg;
617 smi_info->curr_msg = NULL;
618 if (msg->rsp[2] != 0) {
619 /* Error getting event, probably done. */
622 /* Take off the event flag. */
623 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
624 handle_flags(smi_info);
626 smi_inc_stat(smi_info, events);
629 * Do this before we deliver the message
630 * because delivering the message releases the
631 * lock and something else can mess with the
634 handle_flags(smi_info);
636 deliver_recv_msg(smi_info, msg);
641 case SI_GETTING_MESSAGES:
643 smi_info->curr_msg->rsp_size
644 = smi_info->handlers->get_result(
646 smi_info->curr_msg->rsp,
647 IPMI_MAX_MSG_LENGTH);
650 * Do this here becase deliver_recv_msg() releases the
651 * lock, and a new message can be put in during the
652 * time the lock is released.
654 msg = smi_info->curr_msg;
655 smi_info->curr_msg = NULL;
656 if (msg->rsp[2] != 0) {
657 /* Error getting event, probably done. */
660 /* Take off the msg flag. */
661 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
662 handle_flags(smi_info);
664 smi_inc_stat(smi_info, incoming_messages);
667 * Do this before we deliver the message
668 * because delivering the message releases the
669 * lock and something else can mess with the
672 handle_flags(smi_info);
674 deliver_recv_msg(smi_info, msg);
679 case SI_CHECKING_ENABLES:
681 unsigned char msg[4];
685 /* We got the flags from the SMI, now handle them. */
686 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
688 dev_warn(smi_info->io.dev,
689 "Couldn't get irq info: %x.\n", msg[2]);
690 dev_warn(smi_info->io.dev,
691 "Maybe ok, but ipmi might run very slowly.\n");
692 smi_info->si_state = SI_NORMAL;
695 enables = current_global_enables(smi_info, 0, &irq_on);
696 if (smi_info->io.si_type == SI_BT)
697 /* BT has its own interrupt enable bit. */
698 check_bt_irq(smi_info, irq_on);
699 if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
700 /* Enables are not correct, fix them. */
701 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
702 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
703 msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
704 smi_info->handlers->start_transaction(
705 smi_info->si_sm, msg, 3);
706 smi_info->si_state = SI_SETTING_ENABLES;
707 } else if (smi_info->supports_event_msg_buff) {
708 smi_info->curr_msg = ipmi_alloc_smi_msg();
709 if (!smi_info->curr_msg) {
710 smi_info->si_state = SI_NORMAL;
713 start_getting_events(smi_info);
715 smi_info->si_state = SI_NORMAL;
720 case SI_SETTING_ENABLES:
722 unsigned char msg[4];
724 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
726 dev_warn(smi_info->io.dev,
727 "Could not set the global enables: 0x%x.\n",
730 if (smi_info->supports_event_msg_buff) {
731 smi_info->curr_msg = ipmi_alloc_smi_msg();
732 if (!smi_info->curr_msg) {
733 smi_info->si_state = SI_NORMAL;
736 start_getting_events(smi_info);
738 smi_info->si_state = SI_NORMAL;
746 * Called on timeouts and events. Timeouts should pass the elapsed
747 * time, interrupts should pass in zero. Must be called with
748 * si_lock held and interrupts disabled.
750 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
753 enum si_sm_result si_sm_result;
757 * There used to be a loop here that waited a little while
758 * (around 25us) before giving up. That turned out to be
759 * pointless, the minimum delays I was seeing were in the 300us
760 * range, which is far too long to wait in an interrupt. So
761 * we just run until the state machine tells us something
762 * happened or it needs a delay.
764 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
766 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
767 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
769 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
770 smi_inc_stat(smi_info, complete_transactions);
772 handle_transaction_done(smi_info);
774 } else if (si_sm_result == SI_SM_HOSED) {
775 smi_inc_stat(smi_info, hosed_count);
778 * Do the before return_hosed_msg, because that
781 smi_info->si_state = SI_NORMAL;
782 if (smi_info->curr_msg != NULL) {
784 * If we were handling a user message, format
785 * a response to send to the upper layer to
786 * tell it about the error.
788 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
794 * We prefer handling attn over new messages. But don't do
795 * this if there is not yet an upper layer to handle anything.
797 if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
798 unsigned char msg[2];
800 if (smi_info->si_state != SI_NORMAL) {
802 * We got an ATTN, but we are doing something else.
803 * Handle the ATTN later.
805 smi_info->got_attn = true;
807 smi_info->got_attn = false;
808 smi_inc_stat(smi_info, attentions);
811 * Got a attn, send down a get message flags to see
812 * what's causing it. It would be better to handle
813 * this in the upper layer, but due to the way
814 * interrupts work with the SMI, that's not really
817 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
818 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
820 start_new_msg(smi_info, msg, 2);
821 smi_info->si_state = SI_GETTING_FLAGS;
826 /* If we are currently idle, try to start the next message. */
827 if (si_sm_result == SI_SM_IDLE) {
828 smi_inc_stat(smi_info, idles);
830 si_sm_result = start_next_msg(smi_info);
831 if (si_sm_result != SI_SM_IDLE)
835 if ((si_sm_result == SI_SM_IDLE)
836 && (atomic_read(&smi_info->req_events))) {
838 * We are idle and the upper layer requested that I fetch
841 atomic_set(&smi_info->req_events, 0);
844 * Take this opportunity to check the interrupt and
845 * message enable state for the BMC. The BMC can be
846 * asynchronously reset, and may thus get interrupts
847 * disable and messages disabled.
849 if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
850 start_check_enables(smi_info);
852 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
853 if (!smi_info->curr_msg)
856 start_getting_events(smi_info);
861 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
862 /* Ok it if fails, the timer will just go off. */
863 if (del_timer(&smi_info->si_timer))
864 smi_info->timer_running = false;
871 static void check_start_timer_thread(struct smi_info *smi_info)
873 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
874 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
876 if (smi_info->thread)
877 wake_up_process(smi_info->thread);
879 start_next_msg(smi_info);
880 smi_event_handler(smi_info, 0);
884 static void flush_messages(void *send_info)
886 struct smi_info *smi_info = send_info;
887 enum si_sm_result result;
890 * Currently, this function is called only in run-to-completion
891 * mode. This means we are single-threaded, no need for locks.
893 result = smi_event_handler(smi_info, 0);
894 while (result != SI_SM_IDLE) {
895 udelay(SI_SHORT_TIMEOUT_USEC);
896 result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
900 static void sender(void *send_info,
901 struct ipmi_smi_msg *msg)
903 struct smi_info *smi_info = send_info;
906 debug_timestamp("Enqueue");
908 if (smi_info->run_to_completion) {
910 * If we are running to completion, start it. Upper
911 * layer will call flush_messages to clear it out.
913 smi_info->waiting_msg = msg;
917 spin_lock_irqsave(&smi_info->si_lock, flags);
919 * The following two lines don't need to be under the lock for
920 * the lock's sake, but they do need SMP memory barriers to
921 * avoid getting things out of order. We are already claiming
922 * the lock, anyway, so just do it under the lock to avoid the
925 BUG_ON(smi_info->waiting_msg);
926 smi_info->waiting_msg = msg;
927 check_start_timer_thread(smi_info);
928 spin_unlock_irqrestore(&smi_info->si_lock, flags);
931 static void set_run_to_completion(void *send_info, bool i_run_to_completion)
933 struct smi_info *smi_info = send_info;
935 smi_info->run_to_completion = i_run_to_completion;
936 if (i_run_to_completion)
937 flush_messages(smi_info);
941 * Use -1 in the nsec value of the busy waiting timespec to tell that
942 * we are spinning in kipmid looking for something and not delaying
945 static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
949 static inline int ipmi_si_is_busy(struct timespec64 *ts)
951 return ts->tv_nsec != -1;
954 static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
955 const struct smi_info *smi_info,
956 struct timespec64 *busy_until)
958 unsigned int max_busy_us = 0;
960 if (smi_info->intf_num < num_max_busy_us)
961 max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
962 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
963 ipmi_si_set_not_busy(busy_until);
964 else if (!ipmi_si_is_busy(busy_until)) {
965 getnstimeofday64(busy_until);
966 timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
968 struct timespec64 now;
970 getnstimeofday64(&now);
971 if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
972 ipmi_si_set_not_busy(busy_until);
981 * A busy-waiting loop for speeding up IPMI operation.
983 * Lousy hardware makes this hard. This is only enabled for systems
984 * that are not BT and do not have interrupts. It starts spinning
985 * when an operation is complete or until max_busy tells it to stop
986 * (if that is enabled). See the paragraph on kimid_max_busy_us in
987 * Documentation/IPMI.txt for details.
989 static int ipmi_thread(void *data)
991 struct smi_info *smi_info = data;
993 enum si_sm_result smi_result;
994 struct timespec64 busy_until;
996 ipmi_si_set_not_busy(&busy_until);
997 set_user_nice(current, MAX_NICE);
998 while (!kthread_should_stop()) {
1001 spin_lock_irqsave(&(smi_info->si_lock), flags);
1002 smi_result = smi_event_handler(smi_info, 0);
1005 * If the driver is doing something, there is a possible
1006 * race with the timer. If the timer handler see idle,
1007 * and the thread here sees something else, the timer
1008 * handler won't restart the timer even though it is
1009 * required. So start it here if necessary.
1011 if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
1012 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1014 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1015 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1017 if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1019 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1021 else if (smi_result == SI_SM_IDLE) {
1022 if (atomic_read(&smi_info->need_watch)) {
1023 schedule_timeout_interruptible(100);
1025 /* Wait to be woken up when we are needed. */
1026 __set_current_state(TASK_INTERRUPTIBLE);
1030 schedule_timeout_interruptible(1);
1036 static void poll(void *send_info)
1038 struct smi_info *smi_info = send_info;
1039 unsigned long flags = 0;
1040 bool run_to_completion = smi_info->run_to_completion;
1043 * Make sure there is some delay in the poll loop so we can
1044 * drive time forward and timeout things.
1047 if (!run_to_completion)
1048 spin_lock_irqsave(&smi_info->si_lock, flags);
1049 smi_event_handler(smi_info, 10);
1050 if (!run_to_completion)
1051 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1054 static void request_events(void *send_info)
1056 struct smi_info *smi_info = send_info;
1058 if (!smi_info->has_event_buffer)
1061 atomic_set(&smi_info->req_events, 1);
1064 static void set_need_watch(void *send_info, bool enable)
1066 struct smi_info *smi_info = send_info;
1067 unsigned long flags;
1069 atomic_set(&smi_info->need_watch, enable);
1070 spin_lock_irqsave(&smi_info->si_lock, flags);
1071 check_start_timer_thread(smi_info);
1072 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1075 static void smi_timeout(struct timer_list *t)
1077 struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
1078 enum si_sm_result smi_result;
1079 unsigned long flags;
1080 unsigned long jiffies_now;
1084 spin_lock_irqsave(&(smi_info->si_lock), flags);
1085 debug_timestamp("Timer");
1087 jiffies_now = jiffies;
1088 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1089 * SI_USEC_PER_JIFFY);
1090 smi_result = smi_event_handler(smi_info, time_diff);
1092 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
1093 /* Running with interrupts, only do long timeouts. */
1094 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1095 smi_inc_stat(smi_info, long_timeouts);
1100 * If the state machine asks for a short delay, then shorten
1101 * the timer timeout.
1103 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1104 smi_inc_stat(smi_info, short_timeouts);
1105 timeout = jiffies + 1;
1107 smi_inc_stat(smi_info, long_timeouts);
1108 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1112 if (smi_result != SI_SM_IDLE)
1113 smi_mod_timer(smi_info, timeout);
1115 smi_info->timer_running = false;
1116 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1119 irqreturn_t ipmi_si_irq_handler(int irq, void *data)
1121 struct smi_info *smi_info = data;
1122 unsigned long flags;
1124 if (smi_info->io.si_type == SI_BT)
1125 /* We need to clear the IRQ flag for the BT interface. */
1126 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1127 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1128 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1130 spin_lock_irqsave(&(smi_info->si_lock), flags);
1132 smi_inc_stat(smi_info, interrupts);
1134 debug_timestamp("Interrupt");
1136 smi_event_handler(smi_info, 0);
1137 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1141 static int smi_start_processing(void *send_info,
1142 struct ipmi_smi *intf)
1144 struct smi_info *new_smi = send_info;
1147 new_smi->intf = intf;
1149 /* Set up the timer that drives the interface. */
1150 timer_setup(&new_smi->si_timer, smi_timeout, 0);
1151 new_smi->timer_can_start = true;
1152 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1154 /* Try to claim any interrupts. */
1155 if (new_smi->io.irq_setup) {
1156 new_smi->io.irq_handler_data = new_smi;
1157 new_smi->io.irq_setup(&new_smi->io);
1161 * Check if the user forcefully enabled the daemon.
1163 if (new_smi->intf_num < num_force_kipmid)
1164 enable = force_kipmid[new_smi->intf_num];
1166 * The BT interface is efficient enough to not need a thread,
1167 * and there is no need for a thread if we have interrupts.
1169 else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
1173 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1174 "kipmi%d", new_smi->intf_num);
1175 if (IS_ERR(new_smi->thread)) {
1176 dev_notice(new_smi->io.dev, "Could not start"
1177 " kernel thread due to error %ld, only using"
1178 " timers to drive the interface\n",
1179 PTR_ERR(new_smi->thread));
1180 new_smi->thread = NULL;
1187 static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1189 struct smi_info *smi = send_info;
1191 data->addr_src = smi->io.addr_source;
1192 data->dev = smi->io.dev;
1193 data->addr_info = smi->io.addr_info;
1194 get_device(smi->io.dev);
1199 static void set_maintenance_mode(void *send_info, bool enable)
1201 struct smi_info *smi_info = send_info;
1204 atomic_set(&smi_info->req_events, 0);
1207 static void shutdown_smi(void *send_info);
1208 static const struct ipmi_smi_handlers handlers = {
1209 .owner = THIS_MODULE,
1210 .start_processing = smi_start_processing,
1211 .shutdown = shutdown_smi,
1212 .get_smi_info = get_smi_info,
1214 .request_events = request_events,
1215 .set_need_watch = set_need_watch,
1216 .set_maintenance_mode = set_maintenance_mode,
1217 .set_run_to_completion = set_run_to_completion,
1218 .flush_messages = flush_messages,
1222 static LIST_HEAD(smi_infos);
1223 static DEFINE_MUTEX(smi_infos_lock);
1224 static int smi_num; /* Used to sequence the SMIs */
1226 static const char * const addr_space_to_str[] = { "i/o", "mem" };
1228 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1229 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1230 " disabled(0). Normally the IPMI driver auto-detects"
1231 " this, but the value may be overridden by this parm.");
1232 module_param(unload_when_empty, bool, 0);
1233 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1234 " specified or found, default is 1. Setting to 0"
1235 " is useful for hot add of devices using hotmod.");
1236 module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1237 MODULE_PARM_DESC(kipmid_max_busy_us,
1238 "Max time (in microseconds) to busy-wait for IPMI data before"
1239 " sleeping. 0 (default) means to wait forever. Set to 100-500"
1240 " if kipmid is using up a lot of CPU time.");
1242 void ipmi_irq_finish_setup(struct si_sm_io *io)
1244 if (io->si_type == SI_BT)
1245 /* Enable the interrupt in the BT interface. */
1246 io->outputb(io, IPMI_BT_INTMASK_REG,
1247 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1250 void ipmi_irq_start_cleanup(struct si_sm_io *io)
1252 if (io->si_type == SI_BT)
1253 /* Disable the interrupt in the BT interface. */
1254 io->outputb(io, IPMI_BT_INTMASK_REG, 0);
1257 static void std_irq_cleanup(struct si_sm_io *io)
1259 ipmi_irq_start_cleanup(io);
1260 free_irq(io->irq, io->irq_handler_data);
1263 int ipmi_std_irq_setup(struct si_sm_io *io)
1270 rv = request_irq(io->irq,
1271 ipmi_si_irq_handler,
1274 io->irq_handler_data);
1276 dev_warn(io->dev, "%s unable to claim interrupt %d,"
1277 " running polled\n",
1278 DEVICE_NAME, io->irq);
1281 io->irq_cleanup = std_irq_cleanup;
1282 ipmi_irq_finish_setup(io);
1283 dev_info(io->dev, "Using irq %d\n", io->irq);
1289 static int wait_for_msg_done(struct smi_info *smi_info)
1291 enum si_sm_result smi_result;
1293 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1295 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1296 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1297 schedule_timeout_uninterruptible(1);
1298 smi_result = smi_info->handlers->event(
1299 smi_info->si_sm, jiffies_to_usecs(1));
1300 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1301 smi_result = smi_info->handlers->event(
1302 smi_info->si_sm, 0);
1306 if (smi_result == SI_SM_HOSED)
1308 * We couldn't get the state machine to run, so whatever's at
1309 * the port is probably not an IPMI SMI interface.
1316 static int try_get_dev_id(struct smi_info *smi_info)
1318 unsigned char msg[2];
1319 unsigned char *resp;
1320 unsigned long resp_len;
1323 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1328 * Do a Get Device ID command, since it comes back with some
1331 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1332 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1333 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1335 rv = wait_for_msg_done(smi_info);
1339 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1340 resp, IPMI_MAX_MSG_LENGTH);
1342 /* Check and record info from the get device id, in case we need it. */
1343 rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
1344 resp + 2, resp_len - 2, &smi_info->device_id);
1351 static int get_global_enables(struct smi_info *smi_info, u8 *enables)
1353 unsigned char msg[3];
1354 unsigned char *resp;
1355 unsigned long resp_len;
1358 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1362 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1363 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1364 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1366 rv = wait_for_msg_done(smi_info);
1368 dev_warn(smi_info->io.dev,
1369 "Error getting response from get global enables command: %d\n",
1374 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1375 resp, IPMI_MAX_MSG_LENGTH);
1378 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1379 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
1381 dev_warn(smi_info->io.dev,
1382 "Invalid return from get global enables command: %ld %x %x %x\n",
1383 resp_len, resp[0], resp[1], resp[2]);
1396 * Returns 1 if it gets an error from the command.
1398 static int set_global_enables(struct smi_info *smi_info, u8 enables)
1400 unsigned char msg[3];
1401 unsigned char *resp;
1402 unsigned long resp_len;
1405 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1409 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1410 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1412 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1414 rv = wait_for_msg_done(smi_info);
1416 dev_warn(smi_info->io.dev,
1417 "Error getting response from set global enables command: %d\n",
1422 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1423 resp, IPMI_MAX_MSG_LENGTH);
1426 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1427 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1428 dev_warn(smi_info->io.dev,
1429 "Invalid return from set global enables command: %ld %x %x\n",
1430 resp_len, resp[0], resp[1]);
1444 * Some BMCs do not support clearing the receive irq bit in the global
1445 * enables (even if they don't support interrupts on the BMC). Check
1446 * for this and handle it properly.
1448 static void check_clr_rcv_irq(struct smi_info *smi_info)
1453 rv = get_global_enables(smi_info, &enables);
1455 if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
1456 /* Already clear, should work ok. */
1459 enables &= ~IPMI_BMC_RCV_MSG_INTR;
1460 rv = set_global_enables(smi_info, enables);
1464 dev_err(smi_info->io.dev,
1465 "Cannot check clearing the rcv irq: %d\n", rv);
1471 * An error when setting the event buffer bit means
1472 * clearing the bit is not supported.
1474 dev_warn(smi_info->io.dev,
1475 "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1476 smi_info->cannot_disable_irq = true;
1481 * Some BMCs do not support setting the interrupt bits in the global
1482 * enables even if they support interrupts. Clearly bad, but we can
1485 static void check_set_rcv_irq(struct smi_info *smi_info)
1490 if (!smi_info->io.irq)
1493 rv = get_global_enables(smi_info, &enables);
1495 enables |= IPMI_BMC_RCV_MSG_INTR;
1496 rv = set_global_enables(smi_info, enables);
1500 dev_err(smi_info->io.dev,
1501 "Cannot check setting the rcv irq: %d\n", rv);
1507 * An error when setting the event buffer bit means
1508 * setting the bit is not supported.
1510 dev_warn(smi_info->io.dev,
1511 "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1512 smi_info->cannot_disable_irq = true;
1513 smi_info->irq_enable_broken = true;
1517 static int try_enable_event_buffer(struct smi_info *smi_info)
1519 unsigned char msg[3];
1520 unsigned char *resp;
1521 unsigned long resp_len;
1524 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1528 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1529 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1530 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1532 rv = wait_for_msg_done(smi_info);
1534 pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
1538 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1539 resp, IPMI_MAX_MSG_LENGTH);
1542 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1543 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
1545 pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
1550 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
1551 /* buffer is already enabled, nothing to do. */
1552 smi_info->supports_event_msg_buff = true;
1556 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1557 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1558 msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
1559 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1561 rv = wait_for_msg_done(smi_info);
1563 pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
1567 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1568 resp, IPMI_MAX_MSG_LENGTH);
1571 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1572 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1573 pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
1580 * An error when setting the event buffer bit means
1581 * that the event buffer is not supported.
1585 smi_info->supports_event_msg_buff = true;
1592 #ifdef CONFIG_IPMI_PROC_INTERFACE
1593 static int smi_type_proc_show(struct seq_file *m, void *v)
1595 struct smi_info *smi = m->private;
1597 seq_printf(m, "%s\n", si_to_str[smi->io.si_type]);
1602 static int smi_type_proc_open(struct inode *inode, struct file *file)
1604 return single_open(file, smi_type_proc_show, PDE_DATA(inode));
1607 static const struct file_operations smi_type_proc_ops = {
1608 .open = smi_type_proc_open,
1610 .llseek = seq_lseek,
1611 .release = single_release,
1614 static int smi_si_stats_proc_show(struct seq_file *m, void *v)
1616 struct smi_info *smi = m->private;
1618 seq_printf(m, "interrupts_enabled: %d\n",
1619 smi->io.irq && !smi->interrupt_disabled);
1620 seq_printf(m, "short_timeouts: %u\n",
1621 smi_get_stat(smi, short_timeouts));
1622 seq_printf(m, "long_timeouts: %u\n",
1623 smi_get_stat(smi, long_timeouts));
1624 seq_printf(m, "idles: %u\n",
1625 smi_get_stat(smi, idles));
1626 seq_printf(m, "interrupts: %u\n",
1627 smi_get_stat(smi, interrupts));
1628 seq_printf(m, "attentions: %u\n",
1629 smi_get_stat(smi, attentions));
1630 seq_printf(m, "flag_fetches: %u\n",
1631 smi_get_stat(smi, flag_fetches));
1632 seq_printf(m, "hosed_count: %u\n",
1633 smi_get_stat(smi, hosed_count));
1634 seq_printf(m, "complete_transactions: %u\n",
1635 smi_get_stat(smi, complete_transactions));
1636 seq_printf(m, "events: %u\n",
1637 smi_get_stat(smi, events));
1638 seq_printf(m, "watchdog_pretimeouts: %u\n",
1639 smi_get_stat(smi, watchdog_pretimeouts));
1640 seq_printf(m, "incoming_messages: %u\n",
1641 smi_get_stat(smi, incoming_messages));
1645 static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
1647 return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
1650 static const struct file_operations smi_si_stats_proc_ops = {
1651 .open = smi_si_stats_proc_open,
1653 .llseek = seq_lseek,
1654 .release = single_release,
1657 static int smi_params_proc_show(struct seq_file *m, void *v)
1659 struct smi_info *smi = m->private;
1662 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1663 si_to_str[smi->io.si_type],
1664 addr_space_to_str[smi->io.addr_type],
1670 smi->io.slave_addr);
1675 static int smi_params_proc_open(struct inode *inode, struct file *file)
1677 return single_open(file, smi_params_proc_show, PDE_DATA(inode));
1680 static const struct file_operations smi_params_proc_ops = {
1681 .open = smi_params_proc_open,
1683 .llseek = seq_lseek,
1684 .release = single_release,
1688 #define IPMI_SI_ATTR(name) \
1689 static ssize_t ipmi_##name##_show(struct device *dev, \
1690 struct device_attribute *attr, \
1693 struct smi_info *smi_info = dev_get_drvdata(dev); \
1695 return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
1697 static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
1699 static ssize_t ipmi_type_show(struct device *dev,
1700 struct device_attribute *attr,
1703 struct smi_info *smi_info = dev_get_drvdata(dev);
1705 return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
1707 static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
1709 static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
1710 struct device_attribute *attr,
1713 struct smi_info *smi_info = dev_get_drvdata(dev);
1714 int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
1716 return snprintf(buf, 10, "%d\n", enabled);
1718 static DEVICE_ATTR(interrupts_enabled, S_IRUGO,
1719 ipmi_interrupts_enabled_show, NULL);
1721 IPMI_SI_ATTR(short_timeouts);
1722 IPMI_SI_ATTR(long_timeouts);
1723 IPMI_SI_ATTR(idles);
1724 IPMI_SI_ATTR(interrupts);
1725 IPMI_SI_ATTR(attentions);
1726 IPMI_SI_ATTR(flag_fetches);
1727 IPMI_SI_ATTR(hosed_count);
1728 IPMI_SI_ATTR(complete_transactions);
1729 IPMI_SI_ATTR(events);
1730 IPMI_SI_ATTR(watchdog_pretimeouts);
1731 IPMI_SI_ATTR(incoming_messages);
1733 static ssize_t ipmi_params_show(struct device *dev,
1734 struct device_attribute *attr,
1737 struct smi_info *smi_info = dev_get_drvdata(dev);
1739 return snprintf(buf, 200,
1740 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1741 si_to_str[smi_info->io.si_type],
1742 addr_space_to_str[smi_info->io.addr_type],
1743 smi_info->io.addr_data,
1744 smi_info->io.regspacing,
1745 smi_info->io.regsize,
1746 smi_info->io.regshift,
1748 smi_info->io.slave_addr);
1750 static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL);
1752 static struct attribute *ipmi_si_dev_attrs[] = {
1753 &dev_attr_type.attr,
1754 &dev_attr_interrupts_enabled.attr,
1755 &dev_attr_short_timeouts.attr,
1756 &dev_attr_long_timeouts.attr,
1757 &dev_attr_idles.attr,
1758 &dev_attr_interrupts.attr,
1759 &dev_attr_attentions.attr,
1760 &dev_attr_flag_fetches.attr,
1761 &dev_attr_hosed_count.attr,
1762 &dev_attr_complete_transactions.attr,
1763 &dev_attr_events.attr,
1764 &dev_attr_watchdog_pretimeouts.attr,
1765 &dev_attr_incoming_messages.attr,
1766 &dev_attr_params.attr,
1770 static const struct attribute_group ipmi_si_dev_attr_group = {
1771 .attrs = ipmi_si_dev_attrs,
1775 * oem_data_avail_to_receive_msg_avail
1776 * @info - smi_info structure with msg_flags set
1778 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1779 * Returns 1 indicating need to re-run handle_flags().
1781 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
1783 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
1789 * setup_dell_poweredge_oem_data_handler
1790 * @info - smi_info.device_id must be populated
1792 * Systems that match, but have firmware version < 1.40 may assert
1793 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
1794 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
1795 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
1796 * as RECEIVE_MSG_AVAIL instead.
1798 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
1799 * assert the OEM[012] bits, and if it did, the driver would have to
1800 * change to handle that properly, we don't actually check for the
1802 * Device ID = 0x20 BMC on PowerEdge 8G servers
1803 * Device Revision = 0x80
1804 * Firmware Revision1 = 0x01 BMC version 1.40
1805 * Firmware Revision2 = 0x40 BCD encoded
1806 * IPMI Version = 0x51 IPMI 1.5
1807 * Manufacturer ID = A2 02 00 Dell IANA
1809 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
1810 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
1813 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
1814 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
1815 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
1816 #define DELL_IANA_MFR_ID 0x0002a2
1817 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
1819 struct ipmi_device_id *id = &smi_info->device_id;
1820 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
1821 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
1822 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
1823 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
1824 smi_info->oem_data_avail_handler =
1825 oem_data_avail_to_receive_msg_avail;
1826 } else if (ipmi_version_major(id) < 1 ||
1827 (ipmi_version_major(id) == 1 &&
1828 ipmi_version_minor(id) < 5)) {
1829 smi_info->oem_data_avail_handler =
1830 oem_data_avail_to_receive_msg_avail;
1835 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
1836 static void return_hosed_msg_badsize(struct smi_info *smi_info)
1838 struct ipmi_smi_msg *msg = smi_info->curr_msg;
1840 /* Make it a response */
1841 msg->rsp[0] = msg->data[0] | 4;
1842 msg->rsp[1] = msg->data[1];
1843 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
1845 smi_info->curr_msg = NULL;
1846 deliver_recv_msg(smi_info, msg);
1850 * dell_poweredge_bt_xaction_handler
1851 * @info - smi_info.device_id must be populated
1853 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
1854 * not respond to a Get SDR command if the length of the data
1855 * requested is exactly 0x3A, which leads to command timeouts and no
1856 * data returned. This intercepts such commands, and causes userspace
1857 * callers to try again with a different-sized buffer, which succeeds.
1860 #define STORAGE_NETFN 0x0A
1861 #define STORAGE_CMD_GET_SDR 0x23
1862 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
1863 unsigned long unused,
1866 struct smi_info *smi_info = in;
1867 unsigned char *data = smi_info->curr_msg->data;
1868 unsigned int size = smi_info->curr_msg->data_size;
1870 (data[0]>>2) == STORAGE_NETFN &&
1871 data[1] == STORAGE_CMD_GET_SDR &&
1873 return_hosed_msg_badsize(smi_info);
1879 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
1880 .notifier_call = dell_poweredge_bt_xaction_handler,
1884 * setup_dell_poweredge_bt_xaction_handler
1885 * @info - smi_info.device_id must be filled in already
1887 * Fills in smi_info.device_id.start_transaction_pre_hook
1888 * when we know what function to use there.
1891 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
1893 struct ipmi_device_id *id = &smi_info->device_id;
1894 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
1895 smi_info->io.si_type == SI_BT)
1896 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
1900 * setup_oem_data_handler
1901 * @info - smi_info.device_id must be filled in already
1903 * Fills in smi_info.device_id.oem_data_available_handler
1904 * when we know what function to use there.
1907 static void setup_oem_data_handler(struct smi_info *smi_info)
1909 setup_dell_poweredge_oem_data_handler(smi_info);
1912 static void setup_xaction_handlers(struct smi_info *smi_info)
1914 setup_dell_poweredge_bt_xaction_handler(smi_info);
1917 static void check_for_broken_irqs(struct smi_info *smi_info)
1919 check_clr_rcv_irq(smi_info);
1920 check_set_rcv_irq(smi_info);
1923 static inline void stop_timer_and_thread(struct smi_info *smi_info)
1925 if (smi_info->thread != NULL) {
1926 kthread_stop(smi_info->thread);
1927 smi_info->thread = NULL;
1930 smi_info->timer_can_start = false;
1931 if (smi_info->timer_running)
1932 del_timer_sync(&smi_info->si_timer);
1935 static struct smi_info *find_dup_si(struct smi_info *info)
1939 list_for_each_entry(e, &smi_infos, link) {
1940 if (e->io.addr_type != info->io.addr_type)
1942 if (e->io.addr_data == info->io.addr_data) {
1944 * This is a cheap hack, ACPI doesn't have a defined
1945 * slave address but SMBIOS does. Pick it up from
1946 * any source that has it available.
1948 if (info->io.slave_addr && !e->io.slave_addr)
1949 e->io.slave_addr = info->io.slave_addr;
1957 int ipmi_si_add_smi(struct si_sm_io *io)
1960 struct smi_info *new_smi, *dup;
1962 if (!io->io_setup) {
1963 if (io->addr_type == IPMI_IO_ADDR_SPACE) {
1964 io->io_setup = ipmi_si_port_setup;
1965 } else if (io->addr_type == IPMI_MEM_ADDR_SPACE) {
1966 io->io_setup = ipmi_si_mem_setup;
1972 new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
1975 spin_lock_init(&new_smi->si_lock);
1979 mutex_lock(&smi_infos_lock);
1980 dup = find_dup_si(new_smi);
1982 if (new_smi->io.addr_source == SI_ACPI &&
1983 dup->io.addr_source == SI_SMBIOS) {
1984 /* We prefer ACPI over SMBIOS. */
1985 dev_info(dup->io.dev,
1986 "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
1987 si_to_str[new_smi->io.si_type]);
1988 cleanup_one_si(dup);
1990 dev_info(new_smi->io.dev,
1991 "%s-specified %s state machine: duplicate\n",
1992 ipmi_addr_src_to_str(new_smi->io.addr_source),
1993 si_to_str[new_smi->io.si_type]);
2000 pr_info(PFX "Adding %s-specified %s state machine\n",
2001 ipmi_addr_src_to_str(new_smi->io.addr_source),
2002 si_to_str[new_smi->io.si_type]);
2004 list_add_tail(&new_smi->link, &smi_infos);
2007 rv = try_smi_init(new_smi);
2009 cleanup_one_si(new_smi);
2010 mutex_unlock(&smi_infos_lock);
2015 mutex_unlock(&smi_infos_lock);
2020 * Try to start up an interface. Must be called with smi_infos_lock
2021 * held, primarily to keep smi_num consistent, we only one to do these
2024 static int try_smi_init(struct smi_info *new_smi)
2028 char *init_name = NULL;
2030 pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
2031 ipmi_addr_src_to_str(new_smi->io.addr_source),
2032 si_to_str[new_smi->io.si_type],
2033 addr_space_to_str[new_smi->io.addr_type],
2034 new_smi->io.addr_data,
2035 new_smi->io.slave_addr, new_smi->io.irq);
2037 switch (new_smi->io.si_type) {
2039 new_smi->handlers = &kcs_smi_handlers;
2043 new_smi->handlers = &smic_smi_handlers;
2047 new_smi->handlers = &bt_smi_handlers;
2051 /* No support for anything else yet. */
2056 new_smi->intf_num = smi_num;
2058 /* Do this early so it's available for logs. */
2059 if (!new_smi->io.dev) {
2060 init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
2064 * If we don't already have a device from something
2065 * else (like PCI), then register a new one.
2067 new_smi->pdev = platform_device_alloc("ipmi_si",
2069 if (!new_smi->pdev) {
2070 pr_err(PFX "Unable to allocate platform device\n");
2074 new_smi->io.dev = &new_smi->pdev->dev;
2075 new_smi->io.dev->driver = &ipmi_platform_driver.driver;
2076 /* Nulled by device_add() */
2077 new_smi->io.dev->init_name = init_name;
2080 /* Allocate the state machine's data and initialize it. */
2081 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2082 if (!new_smi->si_sm) {
2086 new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
2089 /* Now that we know the I/O size, we can set up the I/O. */
2090 rv = new_smi->io.io_setup(&new_smi->io);
2092 dev_err(new_smi->io.dev, "Could not set up I/O space\n");
2096 /* Do low-level detection first. */
2097 if (new_smi->handlers->detect(new_smi->si_sm)) {
2098 if (new_smi->io.addr_source)
2099 dev_err(new_smi->io.dev,
2100 "Interface detection failed\n");
2106 * Attempt a get device id command. If it fails, we probably
2107 * don't have a BMC here.
2109 rv = try_get_dev_id(new_smi);
2111 if (new_smi->io.addr_source)
2112 dev_err(new_smi->io.dev,
2113 "There appears to be no BMC at this location\n");
2117 setup_oem_data_handler(new_smi);
2118 setup_xaction_handlers(new_smi);
2119 check_for_broken_irqs(new_smi);
2121 new_smi->waiting_msg = NULL;
2122 new_smi->curr_msg = NULL;
2123 atomic_set(&new_smi->req_events, 0);
2124 new_smi->run_to_completion = false;
2125 for (i = 0; i < SI_NUM_STATS; i++)
2126 atomic_set(&new_smi->stats[i], 0);
2128 new_smi->interrupt_disabled = true;
2129 atomic_set(&new_smi->need_watch, 0);
2131 rv = try_enable_event_buffer(new_smi);
2133 new_smi->has_event_buffer = true;
2136 * Start clearing the flags before we enable interrupts or the
2137 * timer to avoid racing with the timer.
2139 start_clear_flags(new_smi);
2142 * IRQ is defined to be set when non-zero. req_events will
2143 * cause a global flags check that will enable interrupts.
2145 if (new_smi->io.irq) {
2146 new_smi->interrupt_disabled = false;
2147 atomic_set(&new_smi->req_events, 1);
2150 if (new_smi->pdev && !new_smi->pdev_registered) {
2151 rv = platform_device_add(new_smi->pdev);
2153 dev_err(new_smi->io.dev,
2154 "Unable to register system interface device: %d\n",
2158 new_smi->pdev_registered = true;
2161 dev_set_drvdata(new_smi->io.dev, new_smi);
2162 rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
2164 dev_err(new_smi->io.dev,
2165 "Unable to add device attributes: error %d\n",
2169 new_smi->dev_group_added = true;
2171 rv = ipmi_register_smi(&handlers,
2174 new_smi->io.slave_addr);
2176 dev_err(new_smi->io.dev,
2177 "Unable to register device: error %d\n",
2182 #ifdef CONFIG_IPMI_PROC_INTERFACE
2183 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2187 dev_err(new_smi->io.dev,
2188 "Unable to create proc entry: %d\n", rv);
2192 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2193 &smi_si_stats_proc_ops,
2196 dev_err(new_smi->io.dev,
2197 "Unable to create proc entry: %d\n", rv);
2201 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2202 &smi_params_proc_ops,
2205 dev_err(new_smi->io.dev,
2206 "Unable to create proc entry: %d\n", rv);
2211 /* Don't increment till we know we have succeeded. */
2214 dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
2215 si_to_str[new_smi->io.si_type]);
2217 WARN_ON(new_smi->io.dev->init_name != NULL);
2223 shutdown_one_si(new_smi);
2230 static int init_ipmi_si(void)
2233 enum ipmi_addr_src type = SI_INVALID;
2238 pr_info("IPMI System Interface driver.\n");
2240 /* If the user gave us a device, they presumably want us to use it */
2241 if (!ipmi_si_hardcode_find_bmc())
2244 ipmi_si_platform_init();
2248 ipmi_si_parisc_init();
2250 /* We prefer devices with interrupts, but in the case of a machine
2251 with multiple BMCs we assume that there will be several instances
2252 of a given type so if we succeed in registering a type then also
2253 try to register everything else of the same type */
2255 mutex_lock(&smi_infos_lock);
2256 list_for_each_entry(e, &smi_infos, link) {
2257 /* Try to register a device if it has an IRQ and we either
2258 haven't successfully registered a device yet or this
2259 device has the same type as one we successfully registered */
2260 if (e->io.irq && (!type || e->io.addr_source == type)) {
2261 if (!try_smi_init(e)) {
2262 type = e->io.addr_source;
2267 /* type will only have been set if we successfully registered an si */
2269 goto skip_fallback_noirq;
2271 /* Fall back to the preferred device */
2273 list_for_each_entry(e, &smi_infos, link) {
2274 if (!e->io.irq && (!type || e->io.addr_source == type)) {
2275 if (!try_smi_init(e)) {
2276 type = e->io.addr_source;
2281 skip_fallback_noirq:
2283 mutex_unlock(&smi_infos_lock);
2288 mutex_lock(&smi_infos_lock);
2289 if (unload_when_empty && list_empty(&smi_infos)) {
2290 mutex_unlock(&smi_infos_lock);
2292 pr_warn(PFX "Unable to find any System Interface(s)\n");
2295 mutex_unlock(&smi_infos_lock);
2299 module_init(init_ipmi_si);
2301 static void shutdown_smi(void *send_info)
2303 struct smi_info *smi_info = send_info;
2305 if (smi_info->dev_group_added) {
2306 device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
2307 smi_info->dev_group_added = false;
2309 if (smi_info->io.dev)
2310 dev_set_drvdata(smi_info->io.dev, NULL);
2313 * Make sure that interrupts, the timer and the thread are
2314 * stopped and will not run again.
2316 smi_info->interrupt_disabled = true;
2317 if (smi_info->io.irq_cleanup) {
2318 smi_info->io.irq_cleanup(&smi_info->io);
2319 smi_info->io.irq_cleanup = NULL;
2321 stop_timer_and_thread(smi_info);
2324 * Wait until we know that we are out of any interrupt
2325 * handlers might have been running before we freed the
2328 synchronize_sched();
2331 * Timeouts are stopped, now make sure the interrupts are off
2332 * in the BMC. Note that timers and CPU interrupts are off,
2333 * so no need for locks.
2335 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2337 schedule_timeout_uninterruptible(1);
2339 if (smi_info->handlers)
2340 disable_si_irq(smi_info);
2341 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2343 schedule_timeout_uninterruptible(1);
2345 if (smi_info->handlers)
2346 smi_info->handlers->cleanup(smi_info->si_sm);
2348 if (smi_info->io.addr_source_cleanup) {
2349 smi_info->io.addr_source_cleanup(&smi_info->io);
2350 smi_info->io.addr_source_cleanup = NULL;
2352 if (smi_info->io.io_cleanup) {
2353 smi_info->io.io_cleanup(&smi_info->io);
2354 smi_info->io.io_cleanup = NULL;
2357 kfree(smi_info->si_sm);
2358 smi_info->si_sm = NULL;
2361 static void shutdown_one_si(struct smi_info *smi_info)
2363 ipmi_smi_t intf = smi_info->intf;
2368 smi_info->intf = NULL;
2369 ipmi_unregister_smi(intf);
2372 static void cleanup_one_si(struct smi_info *smi_info)
2377 list_del(&smi_info->link);
2379 shutdown_one_si(smi_info);
2381 if (smi_info->pdev) {
2382 if (smi_info->pdev_registered)
2383 platform_device_unregister(smi_info->pdev);
2385 platform_device_put(smi_info->pdev);
2391 int ipmi_si_remove_by_dev(struct device *dev)
2396 mutex_lock(&smi_infos_lock);
2397 list_for_each_entry(e, &smi_infos, link) {
2398 if (e->io.dev == dev) {
2404 mutex_unlock(&smi_infos_lock);
2409 void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
2413 struct smi_info *e, *tmp_e;
2415 mutex_lock(&smi_infos_lock);
2416 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
2417 if (e->io.addr_type != addr_space)
2419 if (e->io.si_type != si_type)
2421 if (e->io.addr_data == addr)
2424 mutex_unlock(&smi_infos_lock);
2427 static void cleanup_ipmi_si(void)
2429 struct smi_info *e, *tmp_e;
2434 ipmi_si_pci_shutdown();
2436 ipmi_si_parisc_shutdown();
2438 ipmi_si_platform_shutdown();
2440 mutex_lock(&smi_infos_lock);
2441 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2443 mutex_unlock(&smi_infos_lock);
2445 module_exit(cleanup_ipmi_si);
2447 MODULE_ALIAS("platform:dmi-ipmi-si");
2448 MODULE_LICENSE("GPL");
2449 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2450 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
2451 " system interfaces.");